aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/ppc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-06-06 10:28:14 +0200
committerMichaël Zasso <targos@protonmail.com>2017-06-07 10:33:31 +0200
commit3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09 (patch)
tree9dee56e142638b34f1eccbd0ad88c3bce5377c29 /deps/v8/src/ppc
parent91a1bbe3055a660194ca4d403795aa0c03e9d056 (diff)
downloadandroid-node-v8-3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09.tar.gz
android-node-v8-3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09.tar.bz2
android-node-v8-3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09.zip
deps: update V8 to 5.9.211.32
PR-URL: https://github.com/nodejs/node/pull/13263 Reviewed-By: Gibson Fahnestock <gibfahn@gmail.com> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/ppc')
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h64
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc505
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h362
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc393
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc3
-rw-r--r--deps/v8/src/ppc/constants-ppc.h495
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc25
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc31
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc45
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc153
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h31
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc1526
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h87
13 files changed, 1655 insertions, 2065 deletions
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 216650c2f4..9ce247ad38 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -49,7 +49,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
-bool CpuFeatures::SupportsSimd128() { return false; }
+bool CpuFeatures::SupportsWasmSimd128() { return false; }
void RelocInfo::apply(intptr_t delta) {
// absolute code pointer inside code object moves with the code object.
@@ -61,7 +61,7 @@ void RelocInfo::apply(intptr_t delta) {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, host_);
- Assembler::set_target_address_at(isolate_, pc_, host_, target + delta,
+ Assembler::set_target_address_at(nullptr, pc_, host_, target + delta,
SKIP_ICACHE_FLUSH);
}
}
@@ -175,30 +175,28 @@ Address Assembler::return_address_from_call_start(Address pc) {
return pc + (len + 2) * kInstrSize;
}
-Object* RelocInfo::target_object() {
+HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+ return HeapObject::cast(
+ reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
}
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<Object>(
- reinterpret_cast<Object**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(
+ reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
}
-
-void RelocInfo::set_target_object(Object* target,
+void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(isolate_, pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
- target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target));
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@@ -215,13 +213,12 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-
-void RelocInfo::set_target_runtime_entry(Address target,
+void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(target, write_barrier_mode, icache_flush_mode);
+ set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
@@ -266,10 +263,9 @@ static const int kCodeAgingTargetDelta = 1 * Assembler::kInstrSize;
static const int kNoCodeAgeSequenceLength =
(kNoCodeAgeSequenceInstructions * Assembler::kInstrSize);
-
-Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on PPC.
- return Handle<Object>();
+ return Handle<Code>();
}
@@ -283,9 +279,9 @@ Code* RelocInfo::code_age_stub() {
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(isolate_, pc_ + kCodeAgingTargetDelta, host_,
- stub->instruction_start(),
- icache_flush_mode);
+ Assembler::set_target_address_at(
+ stub->GetIsolate(), pc_ + kCodeAgingTargetDelta, host_,
+ stub->instruction_start(), icache_flush_mode);
}
@@ -294,19 +290,17 @@ Address RelocInfo::debug_call_address() {
return Assembler::target_address_at(pc_, host_);
}
-
-void RelocInfo::set_debug_call_address(Address target) {
+void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
- Assembler::set_target_address_at(isolate_, pc_, host_, target);
+ Assembler::set_target_address_at(isolate, pc_, host_, target);
if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
+ Code* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ target_code);
}
}
-
-void RelocInfo::WipeOut() {
+void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
@@ -316,10 +310,10 @@ void RelocInfo::WipeOut() {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate_, pc_, host_, NULL,
+ Assembler::set_target_address_at(isolate, pc_, host_, NULL,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate, pc_, host_, NULL);
}
}
@@ -622,6 +616,8 @@ void Assembler::deserialization_set_target_internal_reference_at(
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
+ DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
+
if (FLAG_enable_embedded_constant_pool && constant_pool) {
ConstantPoolEntry::Access access;
if (IsConstantPoolLoadStart(pc, &access)) {
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 645561dbdd..ec6330a351 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -184,13 +184,13 @@ uint32_t RelocInfo::wasm_function_table_size_reference() {
}
void RelocInfo::unchecked_update_wasm_memory_reference(
- Address address, ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+ Isolate* isolate, Address address, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
}
-void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate_, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -231,8 +231,8 @@ MemOperand::MemOperand(Register ra, Register rb) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
+Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
+ : AssemblerBase(isolate_data, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
@@ -493,9 +493,9 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// pointer in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
- CodePatcher::DONT_FLUSH);
- patcher.masm()->bitwise_mov32(dst, offset);
+ PatchingAssembler patcher(isolate_data(),
+ reinterpret_cast<byte*>(buffer_ + pos), 2);
+ patcher.bitwise_mov32(dst, offset);
break;
}
case kUnboundAddLabelOffsetOpcode: {
@@ -504,26 +504,27 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Register dst = Register::from_code((operands >> 21) & 0x1f);
Register base = Register::from_code((operands >> 16) & 0x1f);
int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
- CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
- CodePatcher::DONT_FLUSH);
- patcher.masm()->bitwise_add32(dst, base, offset);
+ PatchingAssembler patcher(isolate_data(),
+ reinterpret_cast<byte*>(buffer_ + pos), 2);
+ patcher.bitwise_add32(dst, base, offset);
break;
}
case kUnboundMovLabelAddrOpcode: {
// Load the address of the label in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
- CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
- kMovInstructionsNoConstantPool,
- CodePatcher::DONT_FLUSH);
+ PatchingAssembler patcher(isolate_data(),
+ reinterpret_cast<byte*>(buffer_ + pos),
+ kMovInstructionsNoConstantPool);
// Keep internal references relative until EmitRelocations.
- patcher.masm()->bitwise_mov(dst, target_pos);
+ patcher.bitwise_mov(dst, target_pos);
break;
}
case kUnboundJumpTableEntryOpcode: {
- CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
- kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
+ PatchingAssembler patcher(isolate_data(),
+ reinterpret_cast<byte*>(buffer_ + pos),
+ kPointerSize / kInstrSize);
// Keep internal references relative until EmitRelocations.
- patcher.masm()->dp(target_pos);
+ patcher.dp(target_pos);
break;
}
default:
@@ -640,26 +641,11 @@ void Assembler::d_form(Instr instr, Register rt, Register ra,
emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
}
-
-void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
- RCBit r) {
- emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
-}
-
void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
OEBit o, RCBit r) {
emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
}
-void Assembler::xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
- DoubleRegister b) {
- int AX = ((a.code() & 0x20) >> 5) & 0x1;
- int BX = ((b.code() & 0x20) >> 5) & 0x1;
- int TX = ((t.code() & 0x20) >> 5) & 0x1;
- emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 | (b.code()
- & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
-}
-
void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
int maskbit, RCBit r) {
int sh0_4 = shift & 0x1f;
@@ -766,26 +752,6 @@ void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
}
-void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
- x_form(EXT2 | XORX, dst, src1, src2, rc);
-}
-
-
-void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
- x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
-}
-
-
-void Assembler::popcntw(Register ra, Register rs) {
- emit(EXT2 | POPCNTW | rs.code() * B21 | ra.code() * B16);
-}
-
-
-void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
- x_form(EXT2 | ANDX, ra, rs, rb, rc);
-}
-
-
void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
RCBit rc) {
sh &= 0x1f;
@@ -841,26 +807,6 @@ void Assembler::clrlwi(Register dst, Register src, const Operand& val,
}
-void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
- emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
-}
-
-
-void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
- x_form(EXT2 | SRWX, dst, src1, src2, r);
-}
-
-
-void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
- x_form(EXT2 | SLWX, dst, src1, src2, r);
-}
-
-
-void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
- x_form(EXT2 | SRAW, ra, rs, rb, r);
-}
-
-
void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
rlwnm(ra, rs, rb, 0, 31, r);
}
@@ -954,13 +900,6 @@ void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
}
-void Assembler::modsw(Register rt, Register ra, Register rb) {
- x_form(EXT2 | MODSW, ra, rt, rb, LeaveRC);
-}
-
-void Assembler::moduw(Register rt, Register ra, Register rb) {
- x_form(EXT2 | MODUW, ra, rt, rb, LeaveRC);
-}
void Assembler::addi(Register dst, Register src, const Operand& imm) {
DCHECK(!src.is(r0)); // use li instead to show intent
@@ -989,16 +928,6 @@ void Assembler::andis(Register ra, Register rs, const Operand& imm) {
}
-void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
- x_form(EXT2 | NORX, dst, src1, src2, r);
-}
-
-
-void Assembler::notx(Register dst, Register src, RCBit r) {
- x_form(EXT2 | NORX, dst, src, src, r);
-}
-
-
void Assembler::ori(Register ra, Register rs, const Operand& imm) {
d_form(ORI, rs, ra, imm.imm_, false);
}
@@ -1009,16 +938,6 @@ void Assembler::oris(Register dst, Register src, const Operand& imm) {
}
-void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
- x_form(EXT2 | ORX, dst, src1, src2, rc);
-}
-
-
-void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
- x_form(EXT2 | ORC, dst, src1, src2, rc);
-}
-
-
void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
intptr_t imm16 = src2.imm_;
#if V8_TARGET_ARCH_PPC64
@@ -1047,30 +966,6 @@ void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
}
-void Assembler::cmp(Register src1, Register src2, CRegister cr) {
-#if V8_TARGET_ARCH_PPC64
- int L = 1;
-#else
- int L = 0;
-#endif
- DCHECK(cr.code() >= 0 && cr.code() <= 7);
- emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
- src2.code() * B11);
-}
-
-
-void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
-#if V8_TARGET_ARCH_PPC64
- int L = 1;
-#else
- int L = 0;
-#endif
- DCHECK(cr.code() >= 0 && cr.code() <= 7);
- emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
- src2.code() * B11);
-}
-
-
void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
intptr_t imm16 = src2.imm_;
int L = 0;
@@ -1099,22 +994,6 @@ void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
}
-void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
- int L = 0;
- DCHECK(cr.code() >= 0 && cr.code() <= 7);
- emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
- src2.code() * B11);
-}
-
-
-void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
- int L = 0;
- DCHECK(cr.code() >= 0 && cr.code() <= 7);
- emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
- src2.code() * B11);
-}
-
-
void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
cb * B6);
@@ -1145,56 +1024,12 @@ void Assembler::lbz(Register dst, const MemOperand& src) {
}
-void Assembler::lbzx(Register rt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::lbzux(Register rt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
void Assembler::lhz(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(LHZ, dst, src.ra(), src.offset(), true);
}
-void Assembler::lhzx(Register rt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::lhzux(Register rt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::lhax(Register rt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
-}
-
-
void Assembler::lwz(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(LWZ, dst, src.ra(), src.offset(), true);
@@ -1207,24 +1042,6 @@ void Assembler::lwzu(Register dst, const MemOperand& src) {
}
-void Assembler::lwzx(Register rt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::lwzux(Register rt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
void Assembler::lha(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(LHA, dst, src.ra(), src.offset(), true);
@@ -1243,82 +1060,18 @@ void Assembler::lwa(Register dst, const MemOperand& src) {
#endif
}
-
-void Assembler::lwax(Register rt, const MemOperand& src) {
-#if V8_TARGET_ARCH_PPC64
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
-#else
- lwzx(rt, src);
-#endif
-}
-
-
-void Assembler::ldbrx(Register dst, const MemOperand& src) {
- x_form(EXT2 | LDBRX, src.ra(), dst, src.rb(), LeaveRC);
-}
-
-
-void Assembler::lwbrx(Register dst, const MemOperand& src) {
- x_form(EXT2 | LWBRX, src.ra(), dst, src.rb(), LeaveRC);
-}
-
-
-void Assembler::lhbrx(Register dst, const MemOperand& src) {
- x_form(EXT2 | LHBRX, src.ra(), dst, src.rb(), LeaveRC);
-}
-
-
void Assembler::stb(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(STB, dst, src.ra(), src.offset(), true);
}
-void Assembler::stbx(Register rs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::stbux(Register rs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
void Assembler::sth(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(STH, dst, src.ra(), src.offset(), true);
}
-void Assembler::sthx(Register rs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::sthux(Register rs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
void Assembler::stw(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(STW, dst, src.ra(), src.offset(), true);
@@ -1331,54 +1084,11 @@ void Assembler::stwu(Register dst, const MemOperand& src) {
}
-void Assembler::stwx(Register rs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::stwux(Register rs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::extsb(Register rs, Register ra, RCBit rc) {
- emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
-}
-
-
-void Assembler::extsh(Register rs, Register ra, RCBit rc) {
- emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
-}
-
-
-void Assembler::extsw(Register rs, Register ra, RCBit rc) {
-#if V8_TARGET_ARCH_PPC64
- emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
-#else
- // nop on 32-bit
- DCHECK(rs.is(ra) && rc == LeaveRC);
-#endif
-}
-
-
void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
}
-void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
- x_form(EXT2 | ANDCX, dst, src1, src2, rc);
-}
-
-
#if V8_TARGET_ARCH_PPC64
// 64bit specific instructions
void Assembler::ld(Register rd, const MemOperand& src) {
@@ -1390,14 +1100,6 @@ void Assembler::ld(Register rd, const MemOperand& src) {
}
-void Assembler::ldx(Register rd, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
-}
-
-
void Assembler::ldu(Register rd, const MemOperand& src) {
int offset = src.offset();
DCHECK(!src.ra_.is(r0));
@@ -1407,14 +1109,6 @@ void Assembler::ldu(Register rd, const MemOperand& src) {
}
-void Assembler::ldux(Register rd, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
-}
-
-
void Assembler::std(Register rs, const MemOperand& src) {
int offset = src.offset();
DCHECK(!src.ra_.is(r0));
@@ -1424,14 +1118,6 @@ void Assembler::std(Register rs, const MemOperand& src) {
}
-void Assembler::stdx(Register rs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
-}
-
-
void Assembler::stdu(Register rs, const MemOperand& src) {
int offset = src.offset();
DCHECK(!src.ra_.is(r0));
@@ -1441,14 +1127,6 @@ void Assembler::stdu(Register rs, const MemOperand& src) {
}
-void Assembler::stdux(Register rs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
-}
-
-
void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
}
@@ -1509,21 +1187,6 @@ void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
}
-void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
- x_form(EXT2 | SRDX, dst, src1, src2, r);
-}
-
-
-void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
- x_form(EXT2 | SLDX, dst, src1, src2, r);
-}
-
-
-void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
- x_form(EXT2 | SRAD, ra, rs, rb, r);
-}
-
-
void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
rldcl(ra, rs, rb, 0, r);
}
@@ -1539,16 +1202,6 @@ void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
}
-void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
- x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
-}
-
-
-void Assembler::popcntd(Register ra, Register rs) {
- emit(EXT2 | POPCNTD | rs.code() * B21 | ra.code() * B16);
-}
-
-
void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
@@ -1565,14 +1218,6 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
}
-
-void Assembler::modsd(Register rt, Register ra, Register rb) {
- x_form(EXT2 | MODSD, ra, rt, rb, LeaveRC);
-}
-
-void Assembler::modud(Register rt, Register ra, Register rb) {
- x_form(EXT2 | MODUD, ra, rt, rb, LeaveRC);
-}
#endif
@@ -2020,24 +1665,6 @@ void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
}
-void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
@@ -2060,24 +1687,6 @@ void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
}
-void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
@@ -2100,24 +1709,6 @@ void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
}
-void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
@@ -2140,24 +1731,6 @@ void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
}
-void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
-void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
- Register ra = src.ra();
- Register rb = src.rb();
- DCHECK(!ra.is(r0));
- emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- LeaveRC);
-}
-
-
void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frb, RCBit rc) {
a_form(EXT4 | FSUB, frt, fra, frb, rc);
@@ -2355,25 +1928,6 @@ void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
frc.code() * B6 | rc);
}
-// Support for VSX instructions
-
-void Assembler::xsadddp(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frb) {
- xx3_form(EXT6 | XSADDDP, frt, fra, frb);
-}
-void Assembler::xssubdp(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frb) {
- xx3_form(EXT6 | XSSUBDP, frt, fra, frb);
-}
-void Assembler::xsdivdp(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frb) {
- xx3_form(EXT6 | XSDIVDP, frt, fra, frb);
-}
-void Assembler::xsmuldp(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frb) {
- xx3_form(EXT6 | XSMULDP, frt, fra, frb);
-}
-
// Pseudo instructions.
void Assembler::nop(int type) {
Register reg = r0;
@@ -2513,7 +2067,7 @@ void Assembler::EmitRelocations() {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
Code* code = NULL;
- RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
+ RelocInfo rinfo(pc, rmode, it->data(), code);
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
@@ -2523,7 +2077,7 @@ void Assembler::EmitRelocations() {
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
- set_target_address_at(isolate(), pc, code, buffer_ + pos,
+ set_target_address_at(nullptr, pc, code, buffer_ + pos,
SKIP_ICACHE_FLUSH);
}
@@ -2568,6 +2122,21 @@ void Assembler::CheckTrampolinePool() {
}
}
+PatchingAssembler::PatchingAssembler(IsolateData isolate_data, byte* address,
+ int instructions)
+ : Assembler(isolate_data, address, instructions * kInstrSize + kGap) {
+ DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
+}
+
+PatchingAssembler::~PatchingAssembler() {
+ // Check that the code was patched as expected.
+ DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
+ DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
+}
+
+void PatchingAssembler::FlushICache(Isolate* isolate) {
+ Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 810b42f900..04678e3393 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -149,16 +149,16 @@ struct Register {
kCode_no_reg = -1
};
- static const int kNumRegisters = Code::kAfterLast;
+ static constexpr int kNumRegisters = Code::kAfterLast;
#define REGISTER_COUNT(R) 1 +
- static const int kNumAllocatable =
- ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT)0;
+ static constexpr int kNumAllocatable =
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
#undef REGISTER_COUNT
#define REGISTER_BIT(R) 1 << kCode_##R |
- static const RegList kAllocatable =
- ALLOCATABLE_GENERAL_REGISTERS(REGISTER_BIT)0;
+ static constexpr RegList kAllocatable =
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_BIT) 0;
#undef REGISTER_BIT
static Register from_code(int code) {
@@ -183,30 +183,30 @@ struct Register {
}
#if V8_TARGET_LITTLE_ENDIAN
- static const int kMantissaOffset = 0;
- static const int kExponentOffset = 4;
+ static constexpr int kMantissaOffset = 0;
+ static constexpr int kExponentOffset = 4;
#else
- static const int kMantissaOffset = 4;
- static const int kExponentOffset = 0;
+ static constexpr int kMantissaOffset = 4;
+ static constexpr int kExponentOffset = 0;
#endif
// Unfortunately we can't make this private in a struct.
int reg_code;
};
-#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
-GENERAL_REGISTERS(DECLARE_REGISTER)
-#undef DECLARE_REGISTER
-const Register no_reg = {Register::kCode_no_reg};
+#define DEFINE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DEFINE_REGISTER)
+#undef DEFINE_REGISTER
+constexpr Register no_reg = {Register::kCode_no_reg};
// Aliases
-const Register kLithiumScratch = r11; // lithium scratch.
-const Register kConstantPoolRegister = r28; // Constant pool.
-const Register kRootRegister = r29; // Roots array pointer.
-const Register cp = r30; // JavaScript context pointer.
+constexpr Register kLithiumScratch = r11; // lithium scratch.
+constexpr Register kConstantPoolRegister = r28; // Constant pool.
+constexpr Register kRootRegister = r29; // Roots array pointer.
+constexpr Register cp = r30; // JavaScript context pointer.
-static const bool kSimpleFPAliasing = true;
-static const bool kSimdMaskRegisters = false;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
// Double word FP register.
struct DoubleRegister {
@@ -218,8 +218,8 @@ struct DoubleRegister {
kCode_no_reg = -1
};
- static const int kNumRegisters = Code::kAfterLast;
- static const int kMaxNumRegisters = kNumRegisters;
+ static constexpr int kNumRegisters = Code::kAfterLast;
+ static constexpr int kMaxNumRegisters = kNumRegisters;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
@@ -245,19 +245,16 @@ typedef DoubleRegister FloatRegister;
// TODO(ppc) Define SIMD registers.
typedef DoubleRegister Simd128Register;
-#define DECLARE_REGISTER(R) \
- const DoubleRegister R = {DoubleRegister::kCode_##R};
-DOUBLE_REGISTERS(DECLARE_REGISTER)
-#undef DECLARE_REGISTER
-const Register no_dreg = {Register::kCode_no_reg};
+#define DEFINE_REGISTER(R) \
+ constexpr DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DEFINE_REGISTER)
+#undef DEFINE_REGISTER
+constexpr Register no_dreg = {Register::kCode_no_reg};
-// Aliases for double registers. Defined using #define instead of
-// "static const DoubleRegister&" because Clang complains otherwise when a
-// compilation unit that includes this header doesn't use the variables.
-#define kFirstCalleeSavedDoubleReg d14
-#define kLastCalleeSavedDoubleReg d31
-#define kDoubleRegZero d14
-#define kScratchDoubleReg d13
+constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
+constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
+constexpr DoubleRegister kDoubleRegZero = d14;
+constexpr DoubleRegister kScratchDoubleReg = d13;
Register ToRegister(int num);
@@ -278,25 +275,24 @@ struct CRegister {
int reg_code;
};
+constexpr CRegister no_creg = {-1};
-const CRegister no_creg = {-1};
-
-const CRegister cr0 = {0};
-const CRegister cr1 = {1};
-const CRegister cr2 = {2};
-const CRegister cr3 = {3};
-const CRegister cr4 = {4};
-const CRegister cr5 = {5};
-const CRegister cr6 = {6};
-const CRegister cr7 = {7};
+constexpr CRegister cr0 = {0};
+constexpr CRegister cr1 = {1};
+constexpr CRegister cr2 = {2};
+constexpr CRegister cr3 = {3};
+constexpr CRegister cr4 = {4};
+constexpr CRegister cr5 = {5};
+constexpr CRegister cr6 = {6};
+constexpr CRegister cr7 = {7};
// -----------------------------------------------------------------------------
// Machine instruction Operands
#if V8_TARGET_ARCH_PPC64
-const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
+constexpr RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
#else
-const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
+constexpr RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
#endif
// Class Operand represents a shifter operand in data processing instructions
@@ -401,7 +397,9 @@ class Assembler : public AssemblerBase {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
+ Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : Assembler(IsolateData(isolate), buffer, buffer_size) {}
+ Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -466,6 +464,7 @@ class Assembler : public AssemblerBase {
ConstantPoolEntry::Type type));
// Read/Modify the code target address in the branch/call instruction at pc.
+ // The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
@@ -495,7 +494,7 @@ class Assembler : public AssemblerBase {
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
+ static constexpr int kInstrSize = sizeof(Instr);
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
@@ -503,25 +502,25 @@ class Assembler : public AssemblerBase {
// are split across two consecutive instructions and don't exist separately
// in the code, so the serializer should not step forwards in memory after
// a target is resolved and written.
- static const int kSpecialTargetSize = 0;
+ static constexpr int kSpecialTargetSize = 0;
// Number of instructions to load an address via a mov sequence.
#if V8_TARGET_ARCH_PPC64
- static const int kMovInstructionsConstantPool = 1;
- static const int kMovInstructionsNoConstantPool = 5;
+ static constexpr int kMovInstructionsConstantPool = 1;
+ static constexpr int kMovInstructionsNoConstantPool = 5;
#if defined(V8_PPC_TAGGING_OPT)
- static const int kTaggedLoadInstructions = 1;
+ static constexpr int kTaggedLoadInstructions = 1;
#else
- static const int kTaggedLoadInstructions = 2;
+ static constexpr int kTaggedLoadInstructions = 2;
#endif
#else
- static const int kMovInstructionsConstantPool = 1;
- static const int kMovInstructionsNoConstantPool = 2;
- static const int kTaggedLoadInstructions = 1;
+ static constexpr int kMovInstructionsConstantPool = 1;
+ static constexpr int kMovInstructionsNoConstantPool = 2;
+ static constexpr int kTaggedLoadInstructions = 1;
#endif
- static const int kMovInstructions = FLAG_enable_embedded_constant_pool
- ? kMovInstructionsConstantPool
- : kMovInstructionsNoConstantPool;
+ static constexpr int kMovInstructions = FLAG_enable_embedded_constant_pool
+ ? kMovInstructionsConstantPool
+ : kMovInstructionsNoConstantPool;
// Distance between the instruction referring to the address of the call
// target and the return address.
@@ -531,7 +530,7 @@ class Assembler : public AssemblerBase {
// mtlr r8
// blrl
// @ return address
- static const int kCallTargetAddressOffset =
+ static constexpr int kCallTargetAddressOffset =
(kMovInstructions + 2) * kInstrSize;
// Distance between start of patched debug break slot and the emitted address
@@ -540,19 +539,156 @@ class Assembler : public AssemblerBase {
// mov r0, <address>
// mtlr r0
// blrl
- static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
+ static constexpr int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
// This is the length of the code sequence from SetDebugBreakAtSlot()
// FIXED_SEQUENCE
- static const int kDebugBreakSlotInstructions =
+ static constexpr int kDebugBreakSlotInstructions =
kMovInstructionsNoConstantPool + 2;
- static const int kDebugBreakSlotLength =
+ static constexpr int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
return ((cr.code() * CRWIDTH) + crbit);
}
+#define DECLARE_PPC_X_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
+ inline void name(const Register rt, const Register ra, \
+ const Register rb, const RCBit rc = LeaveRC) { \
+ x_form(instr_name, rt, ra, rb, rc); \
+ }
+
+#define DECLARE_PPC_X_INSTRUCTIONS_B_FORM(name, instr_name, instr_value) \
+ inline void name(const Register ra, const Register rs, \
+ const Register rb, const RCBit rc = LeaveRC) { \
+ x_form(instr_name, rs, ra, rb, rc); \
+ }
+
+#define DECLARE_PPC_X_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
+ inline void name(const Register dst, const Register src, \
+ const RCBit rc = LeaveRC) { \
+ x_form(instr_name, src, dst, r0, rc); \
+ }
+
+#define DECLARE_PPC_X_INSTRUCTIONS_D_FORM(name, instr_name, instr_value) \
+ template <class R> \
+ inline void name(const R rt, const Register ra, \
+ const Register rb, const RCBit rc = LeaveRC) { \
+ DCHECK(!ra.is(r0)); \
+ x_form(instr_name, rt.code(), ra.code(), rb.code(), rc); \
+ } \
+ template <class R> \
+ inline void name(const R dst, const MemOperand& src) { \
+ name(dst, src.ra(), src.rb()); \
+ }
+
+#define DECLARE_PPC_X_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
+ inline void name(const Register dst, const Register src, \
+ const int sh, const RCBit rc = LeaveRC) { \
+ x_form(instr_name, src.code(), dst.code(), sh, rc); \
+ }
+
+#define DECLARE_PPC_X_INSTRUCTIONS_F_FORM(name, instr_name, instr_value) \
+ inline void name(const Register src1, const Register src2, \
+ const CRegister cr = cr7, const RCBit rc = LeaveRC) { \
+ x_form(instr_name, cr, src1, src2, rc); \
+ } \
+ inline void name##w(const Register src1, const Register src2, \
+ const CRegister cr = cr7, const RCBit rc = LeaveRC) { \
+ x_form(instr_name, cr.code() * B2, src1.code(), src2.code(), LeaveRC); \
+ }
+
+#define DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM(name, instr_name, instr_value) \
+ inline void name(const Register dst, const MemOperand& src) { \
+ x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
+ }
+#define DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM(name, instr_name, instr_value) \
+ inline void name(const Register dst, const MemOperand& src) { \
+ DCHECK(!src.ra_.is(r0)); \
+ x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
+ }
+
+ inline void x_form(Instr instr, int f1, int f2, int f3, int rc) {
+ emit(instr | f1 * B21 | f2 * B16 | f3 * B11 | rc);
+ }
+ inline void x_form(Instr instr, Register rs, Register ra, Register rb,
+ RCBit rc) {
+ emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | rc);
+ }
+ inline void x_form(Instr instr, Register ra, Register rs, Register rb,
+ EHBit eh = SetEH) {
+ emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | eh);
+ }
+ inline void x_form(Instr instr, CRegister cr, Register s1, Register s2,
+ RCBit rc) {
+#if V8_TARGET_ARCH_PPC64
+ int L = 1;
+#else
+ int L = 0;
+#endif
+ emit(instr | cr.code() * B23 | L * B21 | s1.code() * B16 |
+ s2.code() * B11 | rc);
+ }
+
+ PPC_X_OPCODE_A_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_A_FORM)
+ PPC_X_OPCODE_B_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_B_FORM)
+ PPC_X_OPCODE_C_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_C_FORM)
+ PPC_X_OPCODE_D_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_D_FORM)
+ PPC_X_OPCODE_E_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_E_FORM)
+ PPC_X_OPCODE_F_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_F_FORM)
+ PPC_X_OPCODE_EH_S_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM)
+ PPC_X_OPCODE_EH_L_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM)
+
+ inline void notx(Register dst, Register src, RCBit rc = LeaveRC) {
+ nor(dst, src, src, rc);
+ }
+ inline void lwax(Register rt, const MemOperand& src) {
+#if V8_TARGET_ARCH_PPC64
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ x_form(LWAX, rt, ra, rb, LeaveRC);
+#else
+ lwzx(rt, src);
+#endif
+ }
+ inline void extsw(Register rs, Register ra, RCBit rc = LeaveRC) {
+#if V8_TARGET_ARCH_PPC64
+ emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
+#else
+ // nop on 32-bit
+ DCHECK(rs.is(ra) && rc == LeaveRC);
+#endif
+ }
+
+#undef DECLARE_PPC_X_INSTRUCTIONS_A_FORM
+#undef DECLARE_PPC_X_INSTRUCTIONS_B_FORM
+#undef DECLARE_PPC_X_INSTRUCTIONS_C_FORM
+#undef DECLARE_PPC_X_INSTRUCTIONS_D_FORM
+#undef DECLARE_PPC_X_INSTRUCTIONS_E_FORM
+#undef DECLARE_PPC_X_INSTRUCTIONS_F_FORM
+#undef DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM
+#undef DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM
+
+#define DECLARE_PPC_XX3_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const DoubleRegister rt, const DoubleRegister ra, \
+ const DoubleRegister rb) { \
+ xx3_form(instr_name, rt, ra, rb); \
+ }
+
+ inline void xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
+ DoubleRegister b) {
+ int AX = ((a.code() & 0x20) >> 5) & 0x1;
+ int BX = ((b.code() & 0x20) >> 5) & 0x1;
+ int TX = ((t.code() & 0x20) >> 5) & 0x1;
+
+ emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 |
+ (b.code() & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
+ }
+
+ PPC_XX3_OPCODE_LIST(DECLARE_PPC_XX3_INSTRUCTIONS)
+#undef DECLARE_PPC_XX3_INSTRUCTIONS
+
// ---------------------------------------------------------------------------
// Code generation
@@ -831,26 +967,17 @@ class Assembler : public AssemblerBase {
RCBit r = LeaveRC);
void divwu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
- void modsw(Register rt, Register ra, Register rb);
- void moduw(Register rt, Register ra, Register rb);
void addi(Register dst, Register src, const Operand& imm);
void addis(Register dst, Register src, const Operand& imm);
void addic(Register dst, Register src, const Operand& imm);
- void and_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
- void andc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
void andi(Register ra, Register rs, const Operand& imm);
void andis(Register ra, Register rs, const Operand& imm);
- void nor(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
- void notx(Register dst, Register src, RCBit r = LeaveRC);
void ori(Register dst, Register src, const Operand& imm);
void oris(Register dst, Register src, const Operand& imm);
- void orx(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
- void orc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
void xori(Register dst, Register src, const Operand& imm);
void xoris(Register ra, Register rs, const Operand& imm);
- void xor_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
void cmpi(Register src1, const Operand& src2, CRegister cr = cr7);
void cmpli(Register src1, const Operand& src2, CRegister cr = cr7);
void cmpwi(Register src1, const Operand& src2, CRegister cr = cr7);
@@ -860,48 +987,22 @@ class Assembler : public AssemblerBase {
void mr(Register dst, Register src);
void lbz(Register dst, const MemOperand& src);
- void lbzx(Register dst, const MemOperand& src);
- void lbzux(Register dst, const MemOperand& src);
void lhz(Register dst, const MemOperand& src);
- void lhzx(Register dst, const MemOperand& src);
- void lhzux(Register dst, const MemOperand& src);
void lha(Register dst, const MemOperand& src);
- void lhax(Register dst, const MemOperand& src);
void lwz(Register dst, const MemOperand& src);
void lwzu(Register dst, const MemOperand& src);
- void lwzx(Register dst, const MemOperand& src);
- void lwzux(Register dst, const MemOperand& src);
void lwa(Register dst, const MemOperand& src);
- void lwax(Register dst, const MemOperand& src);
- void ldbrx(Register dst, const MemOperand& src);
- void lwbrx(Register dst, const MemOperand& src);
- void lhbrx(Register dst, const MemOperand& src);
void stb(Register dst, const MemOperand& src);
- void stbx(Register dst, const MemOperand& src);
- void stbux(Register dst, const MemOperand& src);
void sth(Register dst, const MemOperand& src);
- void sthx(Register dst, const MemOperand& src);
- void sthux(Register dst, const MemOperand& src);
void stw(Register dst, const MemOperand& src);
void stwu(Register dst, const MemOperand& src);
- void stwx(Register rs, const MemOperand& src);
- void stwux(Register rs, const MemOperand& src);
-
- void extsb(Register rs, Register ra, RCBit r = LeaveRC);
- void extsh(Register rs, Register ra, RCBit r = LeaveRC);
- void extsw(Register rs, Register ra, RCBit r = LeaveRC);
-
void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC);
#if V8_TARGET_ARCH_PPC64
void ld(Register rd, const MemOperand& src);
- void ldx(Register rd, const MemOperand& src);
void ldu(Register rd, const MemOperand& src);
- void ldux(Register rd, const MemOperand& src);
void std(Register rs, const MemOperand& src);
- void stdx(Register rs, const MemOperand& src);
void stdu(Register rs, const MemOperand& src);
- void stdux(Register rs, const MemOperand& src);
void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
void rldcl(Register ra, Register rs, Register rb, int mb, RCBit r = LeaveRC);
@@ -914,22 +1015,15 @@ class Assembler : public AssemblerBase {
void clrldi(Register dst, Register src, const Operand& val,
RCBit rc = LeaveRC);
void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
- void srd(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
- void sld(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
- void srad(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
void rotld(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
- void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC);
- void popcntd(Register dst, Register src);
void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void divdu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
- void modsd(Register rt, Register ra, Register rb);
- void modud(Register rt, Register ra, Register rb);
#endif
void rlwinm(Register ra, Register rs, int sh, int mb, int me,
@@ -944,24 +1038,12 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
void clrlwi(Register dst, Register src, const Operand& val,
RCBit rc = LeaveRC);
- void srawi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
- void srw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
- void slw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
- void sraw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
void rotlw(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
void rotlwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
- void cntlzw_(Register dst, Register src, RCBit rc = LeaveRC);
- void popcntw(Register dst, Register src);
-
void subi(Register dst, Register src1, const Operand& src2);
- void cmp(Register src1, Register src2, CRegister cr = cr7);
- void cmpl(Register src1, Register src2, CRegister cr = cr7);
- void cmpw(Register src1, Register src2, CRegister cr = cr7);
- void cmplw(Register src1, Register src2, CRegister cr = cr7);
-
void mov(Register dst, const Operand& src);
void bitwise_mov(Register dst, intptr_t value);
void bitwise_mov32(Register dst, int32_t value);
@@ -1025,20 +1107,12 @@ class Assembler : public AssemblerBase {
// Support for floating point
void lfd(const DoubleRegister frt, const MemOperand& src);
void lfdu(const DoubleRegister frt, const MemOperand& src);
- void lfdx(const DoubleRegister frt, const MemOperand& src);
- void lfdux(const DoubleRegister frt, const MemOperand& src);
void lfs(const DoubleRegister frt, const MemOperand& src);
void lfsu(const DoubleRegister frt, const MemOperand& src);
- void lfsx(const DoubleRegister frt, const MemOperand& src);
- void lfsux(const DoubleRegister frt, const MemOperand& src);
void stfd(const DoubleRegister frs, const MemOperand& src);
void stfdu(const DoubleRegister frs, const MemOperand& src);
- void stfdx(const DoubleRegister frs, const MemOperand& src);
- void stfdux(const DoubleRegister frs, const MemOperand& src);
void stfs(const DoubleRegister frs, const MemOperand& src);
void stfsu(const DoubleRegister frs, const MemOperand& src);
- void stfsx(const DoubleRegister frs, const MemOperand& src);
- void stfsux(const DoubleRegister frs, const MemOperand& src);
void fadd(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frb, RCBit rc = LeaveRC);
@@ -1102,17 +1176,6 @@ class Assembler : public AssemblerBase {
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc = LeaveRC);
- // Support for VSX instructions
-
- void xsadddp(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frb);
- void xssubdp(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frb);
- void xsdivdp(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frb);
- void xsmuldp(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frc);
-
// Pseudo instructions
// Different nop operations are used by the code generator to detect certain
@@ -1365,14 +1428,16 @@ class Assembler : public AssemblerBase {
bool is_trampoline_emitted() const { return trampoline_emitted_; }
- private:
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
- static const int kGap = 32;
+ static constexpr int kGap = 32;
+
+ RelocInfoWriter reloc_info_writer;
+ private:
// Repeated checking whether the trampoline pool should be emitted is rather
// expensive. By default we only check again once a number of instructions
// has been generated.
@@ -1387,8 +1452,7 @@ class Assembler : public AssemblerBase {
// Relocation info generation
// Each relocation is encoded as a variable size value
- static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
- RelocInfoWriter reloc_info_writer;
+ static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
std::vector<DeferredRelocInfo> relocations_;
// The bound position, before this we cannot do instruction elimination.
@@ -1412,11 +1476,8 @@ class Assembler : public AssemblerBase {
DoubleRegister frb, RCBit r);
void d_form(Instr instr, Register rt, Register ra, const intptr_t val,
bool signed_disp);
- void x_form(Instr instr, Register ra, Register rs, Register rb, RCBit r);
void xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o,
RCBit r);
- void xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
- DoubleRegister b);
void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit,
RCBit r);
void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit,
@@ -1467,10 +1528,10 @@ class Assembler : public AssemblerBase {
// trigger different mode of branch instruction generation, where we
// no longer use a single branch instruction.
bool trampoline_emitted_;
- static const int kTrampolineSlotsSize = kInstrSize;
- static const int kMaxCondBranchReach = (1 << (16 - 1)) - 1;
- static const int kMaxBlockTrampolineSectionSize = 64 * kInstrSize;
- static const int kInvalidSlotPos = -1;
+ static constexpr int kTrampolineSlotsSize = kInstrSize;
+ static constexpr int kMaxCondBranchReach = (1 << (16 - 1)) - 1;
+ static constexpr int kMaxBlockTrampolineSectionSize = 64 * kInstrSize;
+ static constexpr int kInvalidSlotPos = -1;
Trampoline trampoline_;
bool internal_trampoline_exception_;
@@ -1487,6 +1548,15 @@ class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
};
+
+class PatchingAssembler : public Assembler {
+ public:
+ PatchingAssembler(IsolateData isolate_data, byte* address, int instructions);
+ ~PatchingAssembler();
+
+ void FlushICache(Isolate* isolate);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 389cba2f17..8c1ea4647e 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -1215,186 +1215,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
-// Just jump directly to runtime if native RegExp is not selected at compile
-// time or if regexp entry in generated code is turned off runtime switch or
-// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec);
+ // This case is handled prior to the RegExpExecStub call.
+ __ Abort(kUnexpectedRegExpExecCall);
#else // V8_INTERPRETED_REGEXP
-
- // Stack frame on entry.
- // sp[0]: last_match_info (expected JSArray)
- // sp[4]: previous index
- // sp[8]: subject string
- // sp[12]: JSRegExp object
-
- const int kLastMatchInfoOffset = 0 * kPointerSize;
- const int kPreviousIndexOffset = 1 * kPointerSize;
- const int kSubjectOffset = 2 * kPointerSize;
- const int kJSRegExpOffset = 3 * kPointerSize;
-
- Label runtime, br_over, encoding_type_UC16;
-
- // Allocation of registers for this function. These are in callee save
- // registers and will be preserved by the call to the native RegExp code, as
- // this code is called using the normal C calling convention. When calling
- // directly from generated code the native RegExp code will not do a GC and
- // therefore the content of these registers are safe to use after the call.
- Register subject = r14;
- Register regexp_data = r15;
- Register last_match_info_elements = r16;
- Register code = r17;
-
- // Ensure register assigments are consistent with callee save masks
- DCHECK(subject.bit() & kCalleeSaved);
- DCHECK(regexp_data.bit() & kCalleeSaved);
- DCHECK(last_match_info_elements.bit() & kCalleeSaved);
- DCHECK(code.bit() & kCalleeSaved);
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ mov(r3, Operand(address_of_regexp_stack_memory_size));
- __ LoadP(r3, MemOperand(r3, 0));
- __ cmpi(r3, Operand::Zero());
- __ beq(&runtime);
-
- // Check that the first argument is a JSRegExp object.
- __ LoadP(r3, MemOperand(sp, kJSRegExpOffset));
- __ JumpIfSmi(r3, &runtime);
- __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
- __ bne(&runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ LoadP(regexp_data, FieldMemOperand(r3, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ TestIfSmi(regexp_data, r0);
- __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
- __ CompareObjectType(regexp_data, r3, r3, FIXED_ARRAY_TYPE);
- __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
- }
-
- // regexp_data: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ LoadP(r3, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
- // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
- __ CmpSmiLiteral(r3, Smi::FromInt(JSRegExp::IRREGEXP), r0);
- __ bne(&runtime);
-
- // regexp_data: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ LoadP(r5,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Check (number_of_captures + 1) * 2 <= offsets vector size
- // Or number_of_captures * 2 <= offsets vector size - 2
- // SmiToShortArrayOffset accomplishes the multiplication by 2 and
- // SmiUntag (which is a nop for 32-bit).
- __ SmiToShortArrayOffset(r5, r5);
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- __ cmpli(r5, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
- __ bgt(&runtime);
-
- // Reset offset for possibly sliced string.
- __ li(r11, Operand::Zero());
- __ LoadP(subject, MemOperand(sp, kSubjectOffset));
- __ JumpIfSmi(subject, &runtime);
- __ mr(r6, subject); // Make a copy of the original subject string.
- // subject: subject string
- // r6: subject string
- // regexp_data: RegExp data (FixedArray)
- // Handle subject string according to its encoding and representation:
- // (1) Sequential string? If yes, go to (4).
- // (2) Sequential or cons? If not, go to (5).
- // (3) Cons string. If the string is flat, replace subject with first string
- // and go to (1). Otherwise bail out to runtime.
- // (4) Sequential string. Load regexp code according to encoding.
- // (E) Carry on.
- /// [...]
-
- // Deferred code at the end of the stub:
- // (5) Long external string? If not, go to (7).
- // (6) External string. Make it, offset-wise, look like a sequential string.
- // Go to (4).
- // (7) Short external string or not a string? If yes, bail out to runtime.
- // (8) Sliced or thin string. Replace subject with parent. Go to (1).
-
- Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
- not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
-
- __ bind(&check_underlying);
- __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-
- // (1) Sequential string? If yes, go to (4).
-
- STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
- kShortExternalStringMask) == 0xa7);
- __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
- kShortExternalStringMask));
- STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ beq(&seq_string, cr0); // Go to (4).
-
- // (2) Sequential or cons? If not, go to (5).
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kThinStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- STATIC_ASSERT(kExternalStringTag < 0xffffu);
- __ cmpi(r4, Operand(kExternalStringTag));
- __ bge(&not_seq_nor_cons); // Go to (5).
-
- // (3) Cons string. Check that it's flat.
- // Replace subject with first string and reload instance type.
- __ LoadP(r3, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ CompareRoot(r3, Heap::kempty_stringRootIndex);
- __ bne(&runtime);
- __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- __ b(&check_underlying);
-
- // (4) Sequential string. Load regexp code according to encoding.
- __ bind(&seq_string);
- // subject: sequential subject string (or look-alike, external string)
- // r6: original subject string
- // Load previous index and check range before r6 is overwritten. We have to
- // use r6 instead of subject here because subject might have been only made
- // to look like a sequential string when it actually is an external string.
- __ LoadP(r4, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(r4, &runtime);
- __ LoadP(r6, FieldMemOperand(r6, String::kLengthOffset));
- __ cmpl(r6, r4);
- __ ble(&runtime);
- __ SmiUntag(r4);
-
- STATIC_ASSERT(8 == kOneByteStringTag);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- STATIC_ASSERT(kStringEncodingMask == 8);
- __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC);
- __ beq(&encoding_type_UC16, cr0);
- __ LoadP(code,
- FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
- __ b(&br_over);
- __ bind(&encoding_type_UC16);
- __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
- __ bind(&br_over);
-
- // (E) Carry on. String handling is done.
- // code: irregexp code
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // a smi (code flushing support).
- __ JumpIfSmi(code, &runtime);
-
- // r4: previous index
- // r6: encoding of subject string (1 if one_byte, 0 if two_byte);
- // code: Address of generated regexp code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r3, r5);
-
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 10;
const int kParameterRegisters = 8;
@@ -1404,8 +1228,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Arguments are before that on the stack or in registers.
// Argument 10 (in stack parameter area): Pass current isolate address.
- __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- __ StoreP(r3, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+ __ mov(r11, Operand(ExternalReference::isolate_address(isolate())));
+ __ StoreP(r11,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
// Argument 9 is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
@@ -1414,11 +1239,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ li(r10, Operand(1));
// Argument 7 (r9): Start (high end) of backtracking stack memory area.
- __ mov(r3, Operand(address_of_regexp_stack_memory_address));
- __ LoadP(r3, MemOperand(r3, 0));
- __ mov(r5, Operand(address_of_regexp_stack_memory_size));
- __ LoadP(r5, MemOperand(r5, 0));
- __ add(r9, r3, r5);
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
+ __ mov(r11, Operand(address_of_regexp_stack_memory_address));
+ __ LoadP(r11, MemOperand(r11, 0));
+ __ mov(ip, Operand(address_of_regexp_stack_memory_size));
+ __ LoadP(ip, MemOperand(ip, 0));
+ __ add(r9, r11, ip);
// Argument 6 (r8): Set the number of capture registers to zero to force
// global egexps to behave as non-global. This does not affect non-global
@@ -1430,205 +1259,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
r7,
Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
- // For arguments 4 (r6) and 3 (r5) get string length, calculate start of data
- // and calculate the shift of the index (0 for one-byte and 1 for two-byte).
- __ addi(r18, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ xori(r6, r6, Operand(1));
- // Load the length from the original subject string from the previous stack
- // frame. Therefore we have to use fp, which points exactly to two pointer
- // sizes below the previous sp. (Because creating a new stack frame pushes
- // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
- __ LoadP(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
- // If slice offset is not 0, load the length from the original sliced string.
// Argument 4, r6: End of string data
// Argument 3, r5: Start of string data
- // Prepare start and end index of the input.
- __ ShiftLeft_(r11, r11, r6);
- __ add(r11, r18, r11);
- __ ShiftLeft_(r5, r4, r6);
- __ add(r5, r11, r5);
-
- __ LoadP(r18, FieldMemOperand(subject, String::kLengthOffset));
- __ SmiUntag(r18);
- __ ShiftLeft_(r6, r18, r6);
- __ add(r6, r11, r6);
+ CHECK(r6.is(RegExpExecDescriptor::StringEndRegister()));
+ CHECK(r5.is(RegExpExecDescriptor::StringStartRegister()));
// Argument 2 (r4): Previous index.
- // Already there
+ CHECK(r4.is(RegExpExecDescriptor::LastIndexRegister()));
// Argument 1 (r3): Subject string.
- __ mr(r3, subject);
+ CHECK(r3.is(RegExpExecDescriptor::StringRegister()));
// Locate the code entry and call it.
- __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Register code_reg = RegExpExecDescriptor::CodeRegister();
+ __ addi(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm, code);
+ stub.GenerateCall(masm, code_reg);
__ LeaveExitFrame(false, no_reg, true);
- // r3: result (int32)
- // subject: subject string (callee saved)
- // regexp_data: RegExp data (callee saved)
- // last_match_info_elements: Last match info elements (callee saved)
- // Check the result.
- Label success;
- __ cmpwi(r3, Operand(1));
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
- __ beq(&success);
- Label failure;
- __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::FAILURE));
- __ beq(&failure);
- __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ bne(&runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r4, Operand(isolate()->factory()->the_hole_value()));
- __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- __ LoadP(r3, MemOperand(r5, 0));
- __ cmp(r3, r4);
- __ beq(&runtime);
-
- // For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow);
-
- __ bind(&failure);
- // For failure and exception return null.
- __ mov(r3, Operand(isolate()->factory()->null_value()));
- __ addi(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Process the result from the native regexp code.
- __ bind(&success);
- __ LoadP(r4,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- // SmiToShortArrayOffset accomplishes the multiplication by 2 and
- // SmiUntag (which is a nop for 32-bit).
- __ SmiToShortArrayOffset(r4, r4);
- __ addi(r4, r4, Operand(2));
-
- // Check that the last match info is a FixedArray.
- __ LoadP(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(last_match_info_elements, &runtime);
- // Check that the object has fast elements.
- __ LoadP(r3,
- FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex);
- __ bne(&runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ LoadP(
- r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ addi(r5, r4, Operand(RegExpMatchInfo::kLastMatchOverhead));
- __ SmiUntag(r0, r3);
- __ cmp(r5, r0);
- __ bgt(&runtime);
-
- // r4: number of capture registers
- // subject: subject string
- // Store the capture count.
- __ SmiTag(r5, r4);
- __ StoreP(r5, FieldMemOperand(last_match_info_elements,
- RegExpMatchInfo::kNumberOfCapturesOffset),
- r0);
- // Store last subject and last input.
- __ StoreP(subject, FieldMemOperand(last_match_info_elements,
- RegExpMatchInfo::kLastSubjectOffset),
- r0);
- __ mr(r5, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpMatchInfo::kLastSubjectOffset, subject, r10,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ mr(subject, r5);
- __ StoreP(subject, FieldMemOperand(last_match_info_elements,
- RegExpMatchInfo::kLastInputOffset),
- r0);
- __ RecordWriteField(last_match_info_elements,
- RegExpMatchInfo::kLastInputOffset, subject, r10,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate());
- __ mov(r5, Operand(address_of_static_offsets_vector));
-
- // r4: number of capture registers
- // r5: offsets vector
- Label next_capture;
- // Capture register counter starts from number of capture registers and
- // counts down until wrapping after zero.
- __ addi(r3, last_match_info_elements,
- Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag -
- kPointerSize));
- __ addi(r5, r5, Operand(-kIntSize)); // bias down for lwzu
- __ mtctr(r4);
- __ bind(&next_capture);
- // Read the value from the static offsets vector buffer.
- __ lwzu(r6, MemOperand(r5, kIntSize));
- // Store the smi value in the last match info.
- __ SmiTag(r6);
- __ StorePU(r6, MemOperand(r3, kPointerSize));
- __ bdnz(&next_capture);
-
- // Return last match info.
- __ mr(r3, last_match_info_elements);
- __ addi(sp, sp, Operand(4 * kPointerSize));
+ // Return the smi-tagged result.
+ __ SmiTag(r3);
__ Ret();
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec);
-
- // Deferred code for string handling.
- // (5) Long external string? If not, go to (7).
- __ bind(&not_seq_nor_cons);
- // Compare flags are still set.
- __ bgt(&not_long_external); // Go to (7).
-
- // (6) External string. Make it, offset-wise, look like a sequential string.
- __ bind(&external_string);
- __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- STATIC_ASSERT(kIsIndirectStringMask == 1);
- __ andi(r0, r3, Operand(kIsIndirectStringMask));
- __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
- }
- __ LoadP(subject,
- FieldMemOperand(subject, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subi(subject, subject,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ b(&seq_string); // Go to (4).
-
- // (7) Short external string or not a string? If yes, bail out to runtime.
- __ bind(&not_long_external);
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
- __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ bne(&runtime, cr0);
-
- // (8) Sliced or thin string. Replace subject with parent. Go to (4).
- Label thin_string;
- __ cmpi(r4, Operand(kThinStringTag));
- __ beq(&thin_string);
- // Load offset into r11 and replace subject string with parent.
- __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ SmiUntag(r11);
- __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- __ b(&check_underlying); // Go to (4).
-
- __ bind(&thin_string);
- __ LoadP(subject, FieldMemOperand(subject, ThinString::kActualOffset));
- __ b(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 212e6db11d..1d4cdd0fcb 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -40,7 +40,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(&desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
+ !RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index d131438139..e1929dbf63 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -555,8 +555,6 @@ typedef uint32_t Instr;
V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C) \
/* Vector Load Word into Two Half Words Even */ \
V(evlwhe, EVLWHE, 0x10000311) \
- /* Vector Load Word into Two Half Words Even Indexed */ \
- V(evlwhex, EVLWHEX, 0x10000310) \
/* Vector Load Word into Two Half Words Odd Signed (with sign extension) */ \
V(evlwhos, EVLWHOS, 0x10000317) \
/* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */ \
@@ -1079,63 +1077,165 @@ typedef uint32_t Instr;
/* Vector Compare Greater Than Unsigned Word */ \
V(vcmpgtuw, VCMPGTUW, 0x10000286)
-#define PPC_X_OPCODE_LIST(V) \
+#define PPC_X_OPCODE_A_FORM_LIST(V) \
+ /* Modulo Signed Dword */ \
+ V(modsd, MODSD, 0x7C000612) \
+ /* Modulo Unsigned Dword */ \
+ V(modud, MODUD, 0x7C000212) \
+ /* Modulo Signed Word */ \
+ V(modsw, MODSW, 0x7C000616) \
+ /* Modulo Unsigned Word */ \
+ V(moduw, MODUW, 0x7C000216)
+
+#define PPC_X_OPCODE_B_FORM_LIST(V) \
+ /* XOR */ \
+ V(xor_, XORX, 0x7C000278) \
+ /* AND */ \
+ V(and_, ANDX, 0x7C000038) \
+ /* AND with Complement */ \
+ V(andc, ANDCX, 0x7C000078) \
+ /* OR */ \
+ V(orx, ORX, 0x7C000378) \
+ /* OR with Complement */ \
+ V(orc, ORC, 0x7C000338) \
+ /* NOR */ \
+ V(nor, NORX, 0x7C0000F8) \
+ /* Shift Right Word */ \
+ V(srw, SRWX, 0x7C000430) \
+ /* Shift Left Word */ \
+ V(slw, SLWX, 0x7C000030) \
+ /* Shift Right Algebraic Word */ \
+ V(sraw, SRAW, 0x7C000630) \
+ /* Shift Left Doubleword */ \
+ V(sld, SLDX, 0x7C000036) \
+ /* Shift Right Algebraic Doubleword */ \
+ V(srad, SRAD, 0x7C000634) \
+ /* Shift Right Doubleword */ \
+ V(srd, SRDX, 0x7C000436)
+
+#define PPC_X_OPCODE_C_FORM_LIST(V) \
+ /* Count Leading Zeros Word */ \
+ V(cntlzw, CNTLZWX, 0x7C000034) \
+ /* Count Leading Zeros Doubleword */ \
+ V(cntlzd, CNTLZDX, 0x7C000074) \
+ /* Population Count Byte-wise */ \
+ V(popcntb, POPCNTB, 0x7C0000F4) \
+ /* Population Count Words */ \
+ V(popcntw, POPCNTW, 0x7C0002F4) \
+ /* Population Count Doubleword */ \
+ V(popcntd, POPCNTD, 0x7C0003F4) \
+ /* Extend Sign Byte */ \
+ V(extsb, EXTSB, 0x7C000774) \
+ /* Extend Sign Halfword */ \
+ V(extsh, EXTSH, 0x7C000734)
+
+#define PPC_X_OPCODE_D_FORM_LIST(V) \
+ /* Load Halfword Byte-Reverse Indexed */ \
+ V(lhbrx, LHBRX, 0x7C00062C) \
+ /* Load Word Byte-Reverse Indexed */ \
+ V(lwbrx, LWBRX, 0x7C00042C) \
+ /* Load Doubleword Byte-Reverse Indexed */ \
+ V(ldbrx, LDBRX, 0x7C000428) \
+ /* Load Byte and Zero Indexed */ \
+ V(lbzx, LBZX, 0x7C0000AE) \
+ /* Load Byte and Zero with Update Indexed */ \
+ V(lbzux, LBZUX, 0x7C0000EE) \
+ /* Load Halfword and Zero Indexed */ \
+ V(lhzx, LHZX, 0x7C00022E) \
+ /* Load Halfword and Zero with Update Indexed */ \
+ V(lhzux, LHZUX, 0x7C00026E) \
+ /* Load Halfword Algebraic Indexed */ \
+ V(lhax, LHAX, 0x7C0002AE) \
+ /* Load Word and Zero Indexed */ \
+ V(lwzx, LWZX, 0x7C00002E) \
+ /* Load Word and Zero with Update Indexed */ \
+ V(lwzux, LWZUX, 0x7C00006E) \
+ /* Load Doubleword Indexed */ \
+ V(ldx, LDX, 0x7C00002A) \
+ /* Load Doubleword with Update Indexed */ \
+ V(ldux, LDUX, 0x7C00006A) \
+ /* Load Floating-Point Double Indexed */ \
+ V(lfdx, LFDX, 0x7C0004AE) \
+ /* Load Floating-Point Single Indexed */ \
+ V(lfsx, LFSX, 0x7C00042E) \
+ /* Load Floating-Point Double with Update Indexed */ \
+ V(lfdux, LFDUX, 0x7C0004EE) \
+ /* Load Floating-Point Single with Update Indexed */ \
+ V(lfsux, LFSUX, 0x7C00046E) \
+ /* Store Byte with Update Indexed */ \
+ V(stbux, STBUX, 0x7C0001EE) \
+ /* Store Byte Indexed */ \
+ V(stbx, STBX, 0x7C0001AE) \
+ /* Store Halfword with Update Indexed */ \
+ V(sthux, STHUX, 0x7C00036E) \
+ /* Store Halfword Indexed */ \
+ V(sthx, STHX, 0x7C00032E) \
+ /* Store Word with Update Indexed */ \
+ V(stwux, STWUX, 0x7C00016E) \
+ /* Store Word Indexed */ \
+ V(stwx, STWX, 0x7C00012E) \
+ /* Store Doubleword with Update Indexed */ \
+ V(stdux, STDUX, 0x7C00016A) \
+ /* Store Doubleword Indexed */ \
+ V(stdx, STDX, 0x7C00012A) \
+ /* Store Floating-Point Double with Update Indexed */ \
+ V(stfdux, STFDUX, 0x7C0005EE) \
+ /* Store Floating-Point Double Indexed */ \
+ V(stfdx, STFDX, 0x7C0005AE) \
+ /* Store Floating-Point Single with Update Indexed */ \
+ V(stfsux, STFSUX, 0x7C00056E) \
+ /* Store Floating-Point Single Indexed */ \
+ V(stfsx, STFSX, 0x7C00052E)
+
+#define PPC_X_OPCODE_E_FORM_LIST(V) \
+ /* Shift Right Algebraic Word Immediate */ \
+ V(srawi, SRAWIX, 0x7C000670)
+
+#define PPC_X_OPCODE_F_FORM_LIST(V) \
+ /* Compare */ \
+ V(cmp, CMP, 0x7C000000) \
+ /* Compare Logical */ \
+ V(cmpl, CMPL, 0x7C000040)
+
+#define PPC_X_OPCODE_EH_S_FORM_LIST(V) \
+ /* Store Byte Conditional Indexed */ \
+ V(stbcx, STBCX, 0x7C00056D) \
+ /* Store Halfword Conditional Indexed Xform */ \
+ V(sthcx, STHCX, 0x7C0005AD) \
+ /* Store Word Conditional Indexed & record CR0 */ \
+ V(stwcx, STWCX, 0x7C00012D)
+
+#define PPC_X_OPCODE_EH_L_FORM_LIST(V) \
+ /* Load Byte And Reserve Indexed */ \
+ V(lbarx, LBARX, 0x7C000068) \
+ /* Load Halfword And Reserve Indexed Xform */ \
+ V(lharx, LHARX, 0x7C0000E8) \
+ /* Load Word and Reserve Indexed */ \
+ V(lwarx, LWARX, 0x7C000028)
+
+#define PPC_X_OPCODE_UNUSED_LIST(V) \
/* Bit Permute Doubleword */ \
V(bpermd, BPERMD, 0x7C0001F8) \
- /* Count Leading Zeros Doubleword */ \
- V(cntlzd, CNTLZDX, 0x7C000074) \
/* Extend Sign Word */ \
V(extsw, EXTSW, 0x7C0007B4) \
/* Load Doubleword And Reserve Indexed */ \
V(ldarx, LDARX, 0x7C0000A8) \
- /* Load Doubleword Byte-Reverse Indexed */ \
- V(ldbrx, LDBRX, 0x7C000428) \
- /* Load Doubleword with Update Indexed */ \
- V(ldux, LDUX, 0x7C00006A) \
- /* Load Doubleword Indexed */ \
- V(ldx, LDX, 0x7C00002A) \
/* Load Word Algebraic with Update Indexed */ \
V(lwaux, LWAUX, 0x7C0002EA) \
/* Load Word Algebraic Indexed */ \
V(lwax, LWAX, 0x7C0002AA) \
- /* Modulo Signed Dword */ \
- V(modsd, MODSD, 0x7C000612) \
- /* Modulo Unsigned Dword */ \
- V(modud, MODUD, 0x7C000212) \
- /* Population Count Doubleword */ \
- V(popcntd, POPCNTD, 0x7C0003F4) \
/* Parity Doubleword */ \
V(prtyd, PRTYD, 0x7C000174) \
- /* Shift Left Doubleword */ \
- V(sld, SLDX, 0x7C000036) \
- /* Shift Right Algebraic Doubleword */ \
- V(srad, SRAD, 0x7C000634) \
- /* Shift Right Doubleword */ \
- V(srd, SRDX, 0x7C000436) \
/* Store Doubleword Byte-Reverse Indexed */ \
V(stdbrx, STDBRX, 0x7C000528) \
/* Store Doubleword Conditional Indexed & record CR0 */ \
V(stdcx, STDCX, 0x7C0001AD) \
- /* Store Doubleword with Update Indexed */ \
- V(stdux, STDUX, 0x7C00016A) \
- /* Store Doubleword Indexed */ \
- V(stdx, STDX, 0x7C00012A) \
/* Trap Doubleword */ \
V(td, TD, 0x7C000088) \
- /* AND */ \
- V(andx, ANDX, 0x7C000038) \
- /* AND with Complement */ \
- V(andc, ANDCX, 0x7C000078) \
/* Branch Conditional to Branch Target Address Register */ \
V(bctar, BCTAR, 0x4C000460) \
- /* Compare */ \
- V(cmp, CMP, 0x7C000000) \
/* Compare Byte */ \
V(cmpb, CMPB, 0x7C0003F8) \
- /* Compare Logical */ \
- V(cmpl, CMPL, 0x7C000040) \
- /* Count Leading Zeros Word */ \
- V(cntlzw, CNTLZWX, 0x7C000034) \
/* Data Cache Block Flush */ \
V(dcbf, DCBF, 0x7C0000AC) \
/* Data Cache Block Store */ \
@@ -1148,94 +1248,22 @@ typedef uint32_t Instr;
V(dcbz, DCBZ, 0x7C0007EC) \
/* Equivalent */ \
V(eqv, EQV, 0x7C000238) \
- /* Extend Sign Byte */ \
- V(extsb, EXTSB, 0x7C000774) \
- /* Extend Sign Halfword */ \
- V(extsh, EXTSH, 0x7C000734) \
/* Instruction Cache Block Invalidate */ \
V(icbi, ICBI, 0x7C0007AC) \
- /* Load Byte And Reserve Indexed */ \
- V(lbarx, LBARX, 0x7C000068) \
- /* Load Byte and Zero with Update Indexed */ \
- V(lbzux, LBZUX, 0x7C0000EE) \
- /* Load Byte and Zero Indexed */ \
- V(lbzx, LBZX, 0x7C0000AE) \
- /* Load Halfword And Reserve Indexed Xform */ \
- V(lharx, LHARX, 0x7C0000E8) \
- /* Load Halfword Algebraic with Update Indexed */ \
- V(lhaux, LHAUX, 0x7C0002EE) \
- /* Load Halfword Algebraic Indexed */ \
- V(lhax, LHAX, 0x7C0002AE) \
- /* Load Halfword Byte-Reverse Indexed */ \
- V(lhbrx, LHBRX, 0x7C00062C) \
- /* Load Halfword and Zero with Update Indexed */ \
- V(lhzux, LHZUX, 0x7C00026E) \
- /* Load Halfword and Zero Indexed */ \
- V(lhzx, LHZX, 0x7C00022E) \
- /* Load Word and Reserve Indexed */ \
- V(lwarx, LWARX, 0x7C000028) \
- /* Load Word Byte-Reverse Indexed */ \
- V(lwbrx, LWBRX, 0x7C00042C) \
- /* Load Word and Zero with Update Indexed */ \
- V(lwzux, LWZUX, 0x7C00006E) \
- /* Load Word and Zero Indexed */ \
- V(lwzx, LWZX, 0x7C00002E) \
- /* Modulo Signed Word */ \
- V(mods, MODSW, 0x7C000616) \
- /* Modulo Unsigned Word */ \
- V(moduw, MODUW, 0x7C000216) \
/* NAND */ \
V(nand, NAND, 0x7C0003B8) \
- /* NOR */ \
- V(nor, NORX, 0x7C0000F8) \
- /* OR */ \
- V(orx, ORX, 0x7C000378) \
- /* OR with Complement */ \
- V(orc, ORC, 0x7C000338) \
- /* Population Count Byte-wise */ \
- V(popcntb, POPCNTB, 0x7C0000F4) \
- /* Population Count Words */ \
- V(popcntw, POPCNTW, 0x7C0002F4) \
/* Parity Word */ \
V(prtyw, PRTYW, 0x7C000134) \
- /* Shift Left Word */ \
- V(slw, SLWX, 0x7C000030) \
- /* Shift Right Algebraic Word */ \
- V(sraw, SRAW, 0x7C000630) \
- /* Shift Right Algebraic Word Immediate */ \
- V(srawi, SRAWIX, 0x7C000670) \
- /* Shift Right Word */ \
- V(srw, SRWX, 0x7C000430) \
- /* Store Byte Conditional Indexed */ \
- V(stbcx, STBCX, 0x7C00056D) \
- /* Store Byte with Update Indexed */ \
- V(stbux, STBUX, 0x7C0001EE) \
- /* Store Byte Indexed */ \
- V(stbx, STBX, 0x7C0001AE) \
/* Store Halfword Byte-Reverse Indexed */ \
V(sthbrx, STHBRX, 0x7C00072C) \
- /* Store Halfword Conditional Indexed Xform */ \
- V(sthcx, STHCX, 0x7C0005AD) \
- /* Store Halfword with Update Indexed */ \
- V(sthux, STHUX, 0x7C00036E) \
- /* Store Halfword Indexed */ \
- V(sthx, STHX, 0x7C00032E) \
/* Store Word Byte-Reverse Indexed */ \
V(stwbrx, STWBRX, 0x7C00052C) \
- /* Store Word Conditional Indexed & record CR0 */ \
- V(stwcx, STWCX, 0x7C00012D) \
- /* Store Word with Update Indexed */ \
- V(stwux, STWUX, 0x7C00016E) \
- /* Store Word Indexed */ \
- V(stwx, STWX, 0x7C00012E) \
/* Synchronize */ \
V(sync, SYNC, 0x7C0004AC) \
/* Trap Word */ \
V(tw, TW, 0x7C000008) \
/* ExecuExecuted No Operation */ \
V(xnop, XNOP, 0x68000000) \
- /* XOR */ \
- V(xorx, XORX, 0x7C000278) \
/* Convert Binary Coded Decimal To Declets */ \
V(cbcdtd, CBCDTD, 0x7C000274) \
/* Convert Declets To Binary Coded Decimal */ \
@@ -1332,8 +1360,6 @@ typedef uint32_t Instr;
V(dcbi, DCBI, 0x7C0003AC) \
/* Instruction Cache Block Touch */ \
V(icbt, ICBT, 0x7C00002C) \
- /* Memory Barrier */ \
- V(mbar, MBAR, 0x7C0006AC) \
/* Move to Condition Register from XER */ \
V(mcrxr, MCRXR, 0x7C000400) \
/* TLB Invalidate Local Indexed */ \
@@ -1436,30 +1462,14 @@ typedef uint32_t Instr;
V(ftdiv, FTDIV, 0xFC000100) \
/* Floating Test for software Square Root */ \
V(ftsqrt, FTSQRT, 0xFC000140) \
- /* Load Floating-Point Double with Update Indexed */ \
- V(lfdux, LFDUX, 0x7C0004EE) \
- /* Load Floating-Point Double Indexed */ \
- V(lfdx, LFDX, 0x7C0004AE) \
/* Load Floating-Point as Integer Word Algebraic Indexed */ \
V(lfiwax, LFIWAX, 0x7C0006AE) \
/* Load Floating-Point as Integer Word and Zero Indexed */ \
V(lfiwzx, LFIWZX, 0x7C0006EE) \
- /* Load Floating-Point Single with Update Indexed */ \
- V(lfsux, LFSUX, 0x7C00046E) \
- /* Load Floating-Point Single Indexed */ \
- V(lfsx, LFSX, 0x7C00042E) \
/* Move To Condition Register from FPSCR */ \
V(mcrfs, MCRFS, 0xFC000080) \
- /* Store Floating-Point Double with Update Indexed */ \
- V(stfdux, STFDUX, 0x7C0005EE) \
- /* Store Floating-Point Double Indexed */ \
- V(stfdx, STFDX, 0x7C0005AE) \
/* Store Floating-Point as Integer Word Indexed */ \
V(stfiwx, STFIWX, 0x7C0007AE) \
- /* Store Floating-Point Single with Update Indexed */ \
- V(stfsux, STFSUX, 0x7C00056E) \
- /* Store Floating-Point Single Indexed */ \
- V(stfsx, STFSX, 0x7C00052E) \
/* Load Floating-Point Double Pair Indexed */ \
V(lfdpx, LFDPX, 0x7C00062E) \
/* Store Floating-Point Double Pair Indexed */ \
@@ -1662,6 +1672,16 @@ typedef uint32_t Instr;
/* Wait for Interrupt */ \
V(wait, WAIT, 0x7C00007C)
+#define PPC_X_OPCODE_LIST(V) \
+ PPC_X_OPCODE_A_FORM_LIST(V) \
+ PPC_X_OPCODE_B_FORM_LIST(V) \
+ PPC_X_OPCODE_C_FORM_LIST(V) \
+ PPC_X_OPCODE_D_FORM_LIST(V) \
+ PPC_X_OPCODE_E_FORM_LIST(V) \
+ PPC_X_OPCODE_F_FORM_LIST(V) \
+ PPC_X_OPCODE_EH_L_FORM_LIST(V) \
+ PPC_X_OPCODE_UNUSED_LIST(V)
+
#define PPC_EVS_OPCODE_LIST(V) \
/* Vector Select */ \
V(evsel, EVSEL, 0x10000278)
@@ -1684,6 +1704,9 @@ typedef uint32_t Instr;
/* Store Quadword */ \
V(stq, STQ, 0xF8000002)
+#define PPC_DQ_OPCODE_LIST(V) \
+ V(lsq, LSQ, 0xE0000000)
+
#define PPC_D_OPCODE_LIST(V) \
/* Trap Doubleword Immediate */ \
V(tdi, TDI, 0x08000000) \
@@ -2025,87 +2048,40 @@ typedef uint32_t Instr;
V(addg, ADDG, 0x7C000094) \
/* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
V(macchw, MACCHW, 0x10000158) \
- /* Multiply Accumulate Cross Halfword to Word Modulo Signed & record OV */ \
- V(macchwo, MACCHWO, 0x10000158) \
/* Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
V(macchws, MACCHWS, 0x100001D8) \
- /* Multiply Accumulate Cross Halfword to Word Saturate Signed & record */ \
- /* OV */ \
- V(macchwso, MACCHWSO, 0x100001D8) \
/* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */ \
V(macchwsu, MACCHWSU, 0x10000198) \
- /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned & record */ \
- /* OV */ \
- V(macchwsuo, MACCHWSUO, 0x10000198) \
/* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */ \
V(macchwu, MACCHWU, 0x10000118) \
- /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned & record */ \
- /* OV */ \
- V(macchwuo, MACCHWUO, 0x10000118) \
/* Multiply Accumulate High Halfword to Word Modulo Signed */ \
V(machhw, MACHHW, 0x10000058) \
- /* Multiply Accumulate High Halfword to Word Modulo Signed & record OV */ \
- V(machhwo, MACHHWO, 0x10000058) \
/* Multiply Accumulate High Halfword to Word Saturate Signed */ \
V(machhws, MACHHWS, 0x100000D8) \
- /* Multiply Accumulate High Halfword to Word Saturate Signed & record OV */ \
- V(machhwso, MACHHWSO, 0x100000D8) \
/* Multiply Accumulate High Halfword to Word Saturate Unsigned */ \
V(machhwsu, MACHHWSU, 0x10000098) \
- /* Multiply Accumulate High Halfword to Word Saturate Unsigned & record */ \
- /* OV */ \
- V(machhwsuo, MACHHWSUO, 0x10000098) \
/* Multiply Accumulate High Halfword to Word Modulo Unsigned */ \
V(machhwu, MACHHWU, 0x10000018) \
- /* Multiply Accumulate High Halfword to Word Modulo Unsigned & record OV */ \
- V(machhwuo, MACHHWUO, 0x10000018) \
/* Multiply Accumulate Low Halfword to Word Modulo Signed */ \
V(maclhw, MACLHW, 0x10000358) \
- /* Multiply Accumulate Low Halfword to Word Modulo Signed & record OV */ \
- V(maclhwo, MACLHWO, 0x10000358) \
/* Multiply Accumulate Low Halfword to Word Saturate Signed */ \
V(maclhws, MACLHWS, 0x100003D8) \
- /* Multiply Accumulate Low Halfword to Word Saturate Signed & record OV */ \
- V(maclhwso, MACLHWSO, 0x100003D8) \
/* Multiply Accumulate Low Halfword to Word Saturate Unsigned */ \
V(maclhwsu, MACLHWSU, 0x10000398) \
- /* Multiply Accumulate Low Halfword to Word Saturate Unsigned & record */ \
- /* OV */ \
- V(maclhwsuo, MACLHWSUO, 0x10000398) \
/* Multiply Accumulate Low Halfword to Word Modulo Unsigned */ \
V(maclhwu, MACLHWU, 0x10000318) \
- /* Multiply Accumulate Low Halfword to Word Modulo Unsigned & record OV */ \
- V(maclhwuo, MACLHWUO, 0x10000318) \
/* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
V(nmacchw, NMACCHW, 0x1000015C) \
- /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed & */ \
- /* record OV */ \
- V(nmacchwo, NMACCHWO, 0x1000015C) \
/* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
V(nmacchws, NMACCHWS, 0x100001DC) \
- /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed & */ \
- /* record OV */ \
- V(nmacchwso, NMACCHWSO, 0x100001DC) \
/* Negative Multiply Accumulate High Halfword to Word Modulo Signed */ \
V(nmachhw, NMACHHW, 0x1000005C) \
- /* Negative Multiply Accumulate High Halfword to Word Modulo Signed & */ \
- /* record OV */ \
- V(nmachhwo, NMACHHWO, 0x1000005C) \
/* Negative Multiply Accumulate High Halfword to Word Saturate Signed */ \
V(nmachhws, NMACHHWS, 0x100000DC) \
- /* Negative Multiply Accumulate High Halfword to Word Saturate Signed & */ \
- /* record OV */ \
- V(nmachhwso, NMACHHWSO, 0x100000DC) \
/* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */ \
V(nmaclhw, NMACLHW, 0x1000035C) \
- /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed & */ \
- /* record OV */ \
- V(nmaclhwo, NMACLHWO, 0x1000035C) \
/* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */ \
V(nmaclhws, NMACLHWS, 0x100003DC) \
- /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed & */ \
- /* record OV */ \
- V(nmaclhwso, NMACLHWSO, 0x100003DC)
#define PPC_XL_OPCODE_LIST(V) \
/* Branch Conditional to Count Register */ \
@@ -2533,36 +2509,36 @@ typedef uint32_t Instr;
/* System Call */ \
V(sc, SC, 0x44000002)
-
-#define PPC_OPCODE_LIST(V) \
- PPC_X_OPCODE_LIST(V) \
- PPC_XO_OPCODE_LIST(V) \
- PPC_DS_OPCODE_LIST(V) \
- PPC_MDS_OPCODE_LIST(V) \
- PPC_MD_OPCODE_LIST(V) \
- PPC_XS_OPCODE_LIST(V) \
- PPC_D_OPCODE_LIST(V) \
- PPC_I_OPCODE_LIST(V) \
- PPC_B_OPCODE_LIST(V) \
- PPC_XL_OPCODE_LIST(V) \
- PPC_A_OPCODE_LIST(V) \
- PPC_XFX_OPCODE_LIST(V) \
- PPC_M_OPCODE_LIST(V) \
- PPC_SC_OPCODE_LIST(V) \
- PPC_Z23_OPCODE_LIST(V) \
- PPC_Z22_OPCODE_LIST(V) \
- PPC_EVX_OPCODE_LIST(V) \
- PPC_XFL_OPCODE_LIST(V) \
- PPC_EVS_OPCODE_LIST(V) \
- PPC_VX_OPCODE_LIST(V) \
- PPC_VA_OPCODE_LIST(V) \
- PPC_VC_OPCODE_LIST(V) \
- PPC_XX1_OPCODE_LIST(V) \
- PPC_XX2_OPCODE_LIST(V) \
- PPC_XX3_OPCODE_LIST(V) \
+#define PPC_OPCODE_LIST(V) \
+ PPC_X_OPCODE_LIST(V) \
+ PPC_X_OPCODE_EH_S_FORM_LIST(V) \
+ PPC_XO_OPCODE_LIST(V) \
+ PPC_DS_OPCODE_LIST(V) \
+ PPC_DQ_OPCODE_LIST(V) \
+ PPC_MDS_OPCODE_LIST(V) \
+ PPC_MD_OPCODE_LIST(V) \
+ PPC_XS_OPCODE_LIST(V) \
+ PPC_D_OPCODE_LIST(V) \
+ PPC_I_OPCODE_LIST(V) \
+ PPC_B_OPCODE_LIST(V) \
+ PPC_XL_OPCODE_LIST(V) \
+ PPC_A_OPCODE_LIST(V) \
+ PPC_XFX_OPCODE_LIST(V) \
+ PPC_M_OPCODE_LIST(V) \
+ PPC_SC_OPCODE_LIST(V) \
+ PPC_Z23_OPCODE_LIST(V) \
+ PPC_Z22_OPCODE_LIST(V) \
+ PPC_EVX_OPCODE_LIST(V) \
+ PPC_XFL_OPCODE_LIST(V) \
+ PPC_EVS_OPCODE_LIST(V) \
+ PPC_VX_OPCODE_LIST(V) \
+ PPC_VA_OPCODE_LIST(V) \
+ PPC_VC_OPCODE_LIST(V) \
+ PPC_XX1_OPCODE_LIST(V) \
+ PPC_XX2_OPCODE_LIST(V) \
+ PPC_XX3_OPCODE_LIST(V) \
PPC_XX4_OPCODE_LIST(V)
-
enum Opcode : uint32_t {
#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \
opcode_name = opcode_value,
@@ -2641,6 +2617,11 @@ enum RCBit { // Bit 0
SetRC = 1, // LT,GT,EQ,SO
LeaveRC = 0 // None
};
+// Exclusive Access hint bit
+enum EHBit { // Bit 0
+ SetEH = 1, // Exclusive Access
+ LeaveEH = 0 // Atomic Update
+};
// Link bit
enum LKBit { // Bit 0
@@ -2817,10 +2798,118 @@ class Instruction {
DECLARE_STATIC_ACCESSOR(RCValue);
inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); }
- inline Opcode OpcodeField() const {
+ inline uint32_t OpcodeField() const {
return static_cast<Opcode>(BitField(31, 26));
}
+#define OPCODE_CASES(name, opcode_name, opcode_value) \
+ case opcode_name:
+
+ inline Opcode OpcodeBase() const {
+ uint32_t opcode = OpcodeField();
+ uint32_t extcode = OpcodeField();
+ switch (opcode) {
+ PPC_D_OPCODE_LIST(OPCODE_CASES)
+ PPC_I_OPCODE_LIST(OPCODE_CASES)
+ PPC_B_OPCODE_LIST(OPCODE_CASES)
+ PPC_M_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+
+ opcode = extcode | BitField(10, 0);
+ switch (opcode) {
+ PPC_VX_OPCODE_LIST(OPCODE_CASES)
+ PPC_X_OPCODE_EH_S_FORM_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(9, 0);
+ switch (opcode) {
+ PPC_VC_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(10, 1) | BitField(20, 20);
+ switch (opcode) {
+ PPC_XFX_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(10, 1);
+ switch (opcode) {
+ PPC_X_OPCODE_LIST(OPCODE_CASES)
+ PPC_XL_OPCODE_LIST(OPCODE_CASES)
+ PPC_XFL_OPCODE_LIST(OPCODE_CASES)
+ PPC_XX1_OPCODE_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_LIST(OPCODE_CASES)
+ PPC_EVX_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(9, 1);
+ switch (opcode) {
+ PPC_XO_OPCODE_LIST(OPCODE_CASES)
+ PPC_Z22_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(10, 2);
+ switch (opcode) {
+ PPC_XS_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(10, 3);
+ switch (opcode) {
+ PPC_EVS_OPCODE_LIST(OPCODE_CASES)
+ PPC_XX3_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(8, 1);
+ switch (opcode) {
+ PPC_Z23_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(5, 0);
+ switch (opcode) {
+ PPC_VA_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(5, 1);
+ switch (opcode) {
+ PPC_A_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(4, 1);
+ switch (opcode) {
+ PPC_MDS_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(4, 2);
+ switch (opcode) {
+ PPC_MD_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(5, 4);
+ switch (opcode) {
+ PPC_XX4_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(2, 0);
+ switch (opcode) {
+ PPC_DQ_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(1, 0);
+ switch (opcode) {
+ PPC_DS_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = extcode | BitField(1, 1);
+ switch (opcode) {
+ PPC_SC_OPCODE_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ UNIMPLEMENTED();
+ return static_cast<Opcode>(0);
+ }
+
+#undef OPCODE_CASES
+
// Fields used in Software interrupt instructions
inline SoftwareInterruptCodes SvcValue() const {
return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index b96dc6fece..c8ad31cf2a 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -121,6 +121,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList saved_regs = restored_regs | sp.bit();
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+ const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters;
// Save all double registers before messing with them.
__ subi(sp, sp, Operand(kDoubleRegsSize));
@@ -131,6 +132,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int offset = code * kDoubleSize;
__ stfd(dreg, MemOperand(sp, offset));
}
+ // Save all float registers before messing with them.
+ __ subi(sp, sp, Operand(kFloatRegsSize));
+ for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
+ int code = config->GetAllocatableFloatCode(i);
+ const FloatRegister freg = FloatRegister::from_code(code);
+ int offset = code * kFloatSize;
+ __ stfs(freg, MemOperand(sp, offset));
+ }
// Push saved_regs (needed to populate FrameDescription::registers_).
// Leave gaps for other registers.
@@ -145,7 +154,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ StoreP(fp, MemOperand(ip));
const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
// Get the bailout id from the stack.
__ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize));
@@ -196,11 +205,21 @@ void Deoptimizer::TableEntryGenerator::Generate() {
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ int src_offset =
+ code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
__ lfd(d0, MemOperand(sp, src_offset));
__ stfd(d0, MemOperand(r4, dst_offset));
}
-
+ int float_regs_offset = FrameDescription::float_registers_offset();
+ // Copy float registers to
+ // float_registers_[FloatRegister::kNumRegisters]
+ for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
+ int code = config->GetAllocatableFloatCode(i);
+ int dst_offset = code * kFloatSize + float_regs_offset;
+ int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
+ __ lfs(d0, MemOperand(sp, src_offset));
+ __ stfs(d0, MemOperand(r4, dst_offset));
+ }
// Remove the bailout id and the saved registers from the stack.
__ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 6baf3d0c7f..3651f4c7ef 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -562,6 +562,10 @@ void Decoder::DecodeExt2(Instruction* instr) {
return;
}
#endif
+ case SYNC: {
+ Format(instr, "sync");
+ return;
+ }
case MODSW: {
Format(instr, "modsw 'rt, 'ra, 'rb");
return;
@@ -649,6 +653,21 @@ void Decoder::DecodeExt2(Instruction* instr) {
}
}
+ switch (EXT2 | (instr->BitField(10, 0))) {
+ case STBCX: {
+ Format(instr, "stbcx 'rs, 'ra, 'rb");
+ return;
+ }
+ case STHCX: {
+ Format(instr, "sthcx 'rs, 'ra, 'rb");
+ return;
+ }
+ case STWCX: {
+ Format(instr, "stwcx 'rs, 'ra, 'rb");
+ return;
+ }
+ }
+
// ?? are all of these xo_form?
switch (EXT2 | (instr->BitField(9, 1))) {
case CMP: {
@@ -859,6 +878,18 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "lhax 'rt, 'ra, 'rb");
return;
}
+ case LBARX: {
+ Format(instr, "lbarx 'rt, 'ra, 'rb");
+ return;
+ }
+ case LHARX: {
+ Format(instr, "lharx 'rt, 'ra, 'rb");
+ return;
+ }
+ case LWARX: {
+ Format(instr, "lwarx 'rt, 'ra, 'rb");
+ return;
+ }
#if V8_TARGET_ARCH_PPC64
case LDX: {
Format(instr, "ldx 'rt, 'ra, 'rb");
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index ed03094dbd..734ed4af36 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -52,11 +52,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
const Register MathPowTaggedDescriptor::exponent() { return r5; }
-
const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
+const Register RegExpExecDescriptor::StringRegister() { return r3; }
+const Register RegExpExecDescriptor::LastIndexRegister() { return r4; }
+const Register RegExpExecDescriptor::StringStartRegister() { return r5; }
+const Register RegExpExecDescriptor::StringEndRegister() { return r6; }
+const Register RegExpExecDescriptor::CodeRegister() { return r17; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
@@ -281,34 +285,6 @@ void StringAddDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void KeyedDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r5, // key
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void NamedDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r5, // name
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CallHandlerDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // receiver
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -339,7 +315,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // argument count (not including receiver)
@@ -349,7 +325,7 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // argument count (not including receiver)
@@ -361,8 +337,8 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsThenConstructArrayDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // argument count (not including receiver)
r4, // target to call checked to be Array function
@@ -387,7 +363,8 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
r3, // the value to pass to the generator
r4, // the JSGeneratorObject to resume
- r5 // the resume mode (tagged)
+ r5, // the resume mode (tagged)
+ r6 // SuspendFlags (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index f2aa2e06f4..4aa901c177 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -20,14 +20,15 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
- : Assembler(arg_isolate, buffer, size),
+ : Assembler(isolate, buffer, size),
generating_stub_(false),
- has_frame_(false) {
+ has_frame_(false),
+ isolate_(isolate) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
- Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
+ Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
}
}
@@ -1544,21 +1545,12 @@ void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
}
-void MacroAssembler::IsObjectNameType(Register object, Register scratch,
- Label* fail) {
- LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- cmpi(scratch, Operand(LAST_NAME_TYPE));
- bgt(fail);
-}
-
-
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
mov(r4, Operand(restart_fp));
- LoadWordArith(r4, MemOperand(r4));
+ LoadP(r4, MemOperand(r4));
cmpi(r4, Operand::Zero());
Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
ne);
@@ -2089,29 +2081,6 @@ void MacroAssembler::CheckMap(Register obj, Register scratch,
}
-void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
- Register scratch2, Handle<WeakCell> cell,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
- Label fail;
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, &fail);
- }
- LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
- CmpWeakValue(scratch1, cell, scratch2);
- Jump(success, RelocInfo::CODE_TARGET, eq);
- bind(&fail);
-}
-
-
-void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
- Register scratch, CRegister cr) {
- mov(scratch, Operand(cell));
- LoadP(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
- cmp(value, scratch, cr);
-}
-
-
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, Operand(cell));
LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
@@ -2124,7 +2093,6 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
JumpIfSmi(value, miss);
}
-
void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) {
Label done, loop;
@@ -2464,27 +2432,6 @@ void MacroAssembler::Assert(Condition cond, BailoutReason reason,
}
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- DCHECK(!elements.is(r0));
- Label ok;
- push(elements);
- LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
- LoadRoot(r0, Heap::kFixedArrayMapRootIndex);
- cmp(elements, r0);
- beq(&ok);
- LoadRoot(r0, Heap::kFixedDoubleArrayMapRootIndex);
- cmp(elements, r0);
- beq(&ok);
- LoadRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
- cmp(elements, r0);
- beq(&ok);
- Abort(kJSObjectWithFastElementsMapHasSlowElements);
- bind(&ok);
- pop(elements);
- }
-}
-
void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
Label L;
@@ -2635,18 +2582,6 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
JumpIfSmi(reg2, on_either_smi);
}
-void MacroAssembler::AssertNotNumber(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- TestIfSmi(object, r0);
- Check(ne, kOperandIsANumber, cr0);
- push(object);
- CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
- pop(object);
- Check(ne, kOperandIsANumber);
- }
-}
-
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2665,34 +2600,6 @@ void MacroAssembler::AssertSmi(Register object) {
}
-void MacroAssembler::AssertString(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotAString, cr0);
- push(object);
- LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
- pop(object);
- Check(lt, kOperandIsNotAString);
- }
-}
-
-
-void MacroAssembler::AssertName(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotAName, cr0);
- push(object);
- LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(object, object, LAST_NAME_TYPE);
- pop(object);
- Check(le, kOperandIsNotAName);
- }
-}
-
-
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2718,29 +2625,33 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
-void MacroAssembler::AssertGeneratorObject(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
- push(object);
- CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
- pop(object);
- Check(eq, kOperandIsNotAGeneratorObject);
- }
-}
+void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
+ // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
+ if (!emit_debug_code()) return;
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
-void MacroAssembler::AssertReceiver(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
- push(object);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
- pop(object);
- Check(ge, kOperandIsNotAReceiver);
- }
+ // Load map
+ Register map = object;
+ push(object);
+ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+
+ Label async, do_check;
+ TestBitMask(flags, static_cast<int>(SuspendFlags::kGeneratorTypeMask), r0);
+ bne(&async, cr0);
+
+ // Check if JSGeneratorObject
+ CompareInstanceType(map, object, JS_GENERATOR_OBJECT_TYPE);
+ b(&do_check);
+
+ bind(&async);
+ // Check if JSAsyncGeneratorObject
+ CompareInstanceType(map, object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
+
+ bind(&do_check);
+ // Restore generator object to register and perform assertion
+ pop(object);
+ Check(eq, kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index a1d2932f43..5f4c8ac9be 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -111,6 +111,7 @@ class MacroAssembler : public Assembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
+ Isolate* isolate() const { return isolate_; }
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
@@ -633,8 +634,6 @@ class MacroAssembler : public Assembler {
void IsObjectJSStringType(Register object, Register scratch, Label* fail);
- void IsObjectNameType(Register object, Register scratch, Label* fail);
-
void DebugBreak();
// Frame restart support
void MaybeDropFrames();
@@ -788,17 +787,6 @@ class MacroAssembler : public Assembler {
Label* fail, SmiCheckType smi_check_type);
- // Check if the map of an object is equal to a specified weak map and branch
- // to a specified target if equal. Skip the smi check if not required
- // (object is known to be a heap object)
- void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
- Handle<WeakCell> cell, Handle<Code> success,
- SmiCheckType smi_check_type);
-
- // Compare the given value and the value of weak cell.
- void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
- CRegister cr = cr7);
-
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
@@ -1033,7 +1021,6 @@ class MacroAssembler : public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
- void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
@@ -1283,9 +1270,6 @@ class MacroAssembler : public Assembler {
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
- // Abort execution if argument is a number, enabled via --debug-code.
- void AssertNotNumber(Register object);
-
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
@@ -1319,12 +1303,6 @@ class MacroAssembler : public Assembler {
#define SmiWordOffset(offset) offset
#endif
- // Abort execution if argument is not a string, enabled via --debug-code.
- void AssertString(Register object);
-
- // Abort execution if argument is not a name, enabled via --debug-code.
- void AssertName(Register object);
-
void AssertFunction(Register object);
// Abort execution if argument is not a JSBoundFunction,
@@ -1333,10 +1311,7 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a JSGeneratorObject,
// enabled via --debug-code.
- void AssertGeneratorObject(Register object);
-
- // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
- void AssertReceiver(Register object);
+ void AssertGeneratorObject(Register object, Register suspend_flags);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
@@ -1514,6 +1489,7 @@ class MacroAssembler : public Assembler {
bool generating_stub_;
bool has_frame_;
+ Isolate* isolate_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -1522,7 +1498,6 @@ class MacroAssembler : public Assembler {
friend class StandardFrame;
};
-
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 058632847d..ed471dce5b 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -25,6 +25,10 @@ namespace internal {
const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+// static
+base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
+ LAZY_INSTANCE_INITIALIZER;
+
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
// ::v8::internal::OS in the same way as SNPrintF is that the
@@ -781,9 +785,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
last_debugger_input_ = NULL;
}
-
-Simulator::~Simulator() { free(stack_); }
-
+Simulator::~Simulator() {
+ global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
+ free(stack_);
+}
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
@@ -987,44 +992,105 @@ void Simulator::TrashCallerSaveRegisters() {
uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
return *ptr;
}
+uint32_t Simulator::ReadExWU(intptr_t addr, Instruction* instr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
+ global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+ &global_monitor_processor_);
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ return *ptr;
+}
int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return *ptr;
}
void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
*ptr = value;
return;
}
+int Simulator::WriteExW(intptr_t addr, uint32_t value, Instruction* instr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
+ global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ addr, &global_monitor_processor_)) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ *ptr = value;
+ return 0;
+ } else {
+ return 1;
+ }
+}
void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr = value;
return;
}
-
uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
+uint16_t Simulator::ReadExHU(intptr_t addr, Instruction* instr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
+ global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+ &global_monitor_processor_);
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+}
int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
return;
@@ -1032,43 +1098,111 @@ void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
return;
}
+int Simulator::WriteExH(intptr_t addr, uint16_t value, Instruction* instr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
+ global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ addr, &global_monitor_processor_)) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return 0;
+ } else {
+ return 1;
+ }
+}
uint8_t Simulator::ReadBU(intptr_t addr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
return *ptr;
}
int8_t Simulator::ReadB(intptr_t addr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
return *ptr;
}
+uint8_t Simulator::ReadExBU(intptr_t addr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
+ global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+ &global_monitor_processor_);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
void Simulator::WriteB(intptr_t addr, uint8_t value) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
*ptr = value;
}
void Simulator::WriteB(intptr_t addr, int8_t value) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
*ptr = value;
}
+int Simulator::WriteExB(intptr_t addr, uint8_t value) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
+ global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ addr, &global_monitor_processor_)) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+ return 0;
+ } else {
+ return 1;
+ }
+}
intptr_t* Simulator::ReadDW(intptr_t addr) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad(addr);
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return ptr;
}
void Simulator::WriteDW(intptr_t addr, int64_t value) {
+ // All supported PPC targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyStore(addr);
+ global_monitor_.Pointer()->NotifyStore_Locked(addr,
+ &global_monitor_processor_);
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
*ptr = value;
return;
@@ -1620,11 +1754,154 @@ void Simulator::ExecuteBranchConditional(Instruction* instr, BCType type) {
}
}
-
-// Handle execution based on instruction types.
-void Simulator::ExecuteExt1(Instruction* instr) {
- uint32_t opcode = EXT1 | instr->BitField(10, 1);
+void Simulator::ExecuteGeneric(Instruction* instr) {
+ uint32_t opcode = instr->OpcodeBase();
switch (opcode) {
+ case SUBFIC: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ intptr_t ra_val = get_register(ra);
+ int32_t im_val = instr->Bits(15, 0);
+ im_val = SIGN_EXT_IMM16(im_val);
+ intptr_t alu_out = im_val - ra_val;
+ set_register(rt, alu_out);
+ // todo - handle RC bit
+ break;
+ }
+ case CMPLI: {
+ int ra = instr->RAValue();
+ uint32_t im_val = instr->Bits(15, 0);
+ int cr = instr->Bits(25, 23);
+ uint32_t bf = 0;
+#if V8_TARGET_ARCH_PPC64
+ int L = instr->Bit(21);
+ if (L) {
+#endif
+ uintptr_t ra_val = get_register(ra);
+ if (ra_val < im_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > im_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == im_val) {
+ bf |= 0x20000000;
+ }
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ uint32_t ra_val = get_register(ra);
+ if (ra_val < im_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > im_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == im_val) {
+ bf |= 0x20000000;
+ }
+ }
+#endif
+ uint32_t condition_mask = 0xF0000000U >> (cr * 4);
+ uint32_t condition = bf >> (cr * 4);
+ condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+ break;
+ }
+ case CMPI: {
+ int ra = instr->RAValue();
+ int32_t im_val = instr->Bits(15, 0);
+ im_val = SIGN_EXT_IMM16(im_val);
+ int cr = instr->Bits(25, 23);
+ uint32_t bf = 0;
+#if V8_TARGET_ARCH_PPC64
+ int L = instr->Bit(21);
+ if (L) {
+#endif
+ intptr_t ra_val = get_register(ra);
+ if (ra_val < im_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > im_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == im_val) {
+ bf |= 0x20000000;
+ }
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ int32_t ra_val = get_register(ra);
+ if (ra_val < im_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > im_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == im_val) {
+ bf |= 0x20000000;
+ }
+ }
+#endif
+ uint32_t condition_mask = 0xF0000000U >> (cr * 4);
+ uint32_t condition = bf >> (cr * 4);
+ condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+ break;
+ }
+ case ADDIC: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ uintptr_t ra_val = get_register(ra);
+ uintptr_t im_val = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ uintptr_t alu_out = ra_val + im_val;
+ // Check overflow
+ if (~ra_val < im_val) {
+ special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
+ } else {
+ special_reg_xer_ &= ~0xF0000000;
+ }
+ set_register(rt, alu_out);
+ break;
+ }
+ case ADDI: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int32_t im_val = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t alu_out;
+ if (ra == 0) {
+ alu_out = im_val;
+ } else {
+ intptr_t ra_val = get_register(ra);
+ alu_out = ra_val + im_val;
+ }
+ set_register(rt, alu_out);
+ // todo - handle RC bit
+ break;
+ }
+ case ADDIS: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int32_t im_val = (instr->Bits(15, 0) << 16);
+ intptr_t alu_out;
+ if (ra == 0) { // treat r0 as zero
+ alu_out = im_val;
+ } else {
+ intptr_t ra_val = get_register(ra);
+ alu_out = ra_val + im_val;
+ }
+ set_register(rt, alu_out);
+ break;
+ }
+ case BCX: {
+ ExecuteBranchConditional(instr, BC_OFFSET);
+ break;
+ }
+ case BX: {
+ int offset = (instr->Bits(25, 2) << 8) >> 6;
+ if (instr->Bit(0) == 1) { // LK flag set
+ special_reg_lr_ = get_pc() + 4;
+ }
+ set_pc(get_pc() + offset);
+ // todo - AA flag
+ break;
+ }
case MCRF:
UNIMPLEMENTED(); // Not used by V8.
case BCLRX:
@@ -1668,19 +1945,142 @@ void Simulator::ExecuteExt1(Instruction* instr) {
case CRNAND:
case CRAND:
case CRORC:
- case CROR:
- default: {
+ case CROR: {
UNIMPLEMENTED(); // Not used by V8.
+ break;
+ }
+ case RLWIMIX: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ uint32_t rs_val = get_register(rs);
+ int32_t ra_val = get_register(ra);
+ int sh = instr->Bits(15, 11);
+ int mb = instr->Bits(10, 6);
+ int me = instr->Bits(5, 1);
+ uint32_t result = base::bits::RotateLeft32(rs_val, sh);
+ int mask = 0;
+ if (mb < me + 1) {
+ int bit = 0x80000000 >> mb;
+ for (; mb <= me; mb++) {
+ mask |= bit;
+ bit >>= 1;
+ }
+ } else if (mb == me + 1) {
+ mask = 0xffffffff;
+ } else { // mb > me+1
+ int bit = 0x80000000 >> (me + 1); // needs to be tested
+ mask = 0xffffffff;
+ for (; me < mb; me++) {
+ mask ^= bit;
+ bit >>= 1;
+ }
+ }
+ result &= mask;
+ ra_val &= ~mask;
+ result |= ra_val;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+ case RLWINMX:
+ case RLWNMX: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ uint32_t rs_val = get_register(rs);
+ int sh = 0;
+ if (opcode == RLWINMX) {
+ sh = instr->Bits(15, 11);
+ } else {
+ int rb = instr->RBValue();
+ uint32_t rb_val = get_register(rb);
+ sh = (rb_val & 0x1f);
+ }
+ int mb = instr->Bits(10, 6);
+ int me = instr->Bits(5, 1);
+ uint32_t result = base::bits::RotateLeft32(rs_val, sh);
+ int mask = 0;
+ if (mb < me + 1) {
+ int bit = 0x80000000 >> mb;
+ for (; mb <= me; mb++) {
+ mask |= bit;
+ bit >>= 1;
+ }
+ } else if (mb == me + 1) {
+ mask = 0xffffffff;
+ } else { // mb > me+1
+ int bit = 0x80000000 >> (me + 1); // needs to be tested
+ mask = 0xffffffff;
+ for (; me < mb; me++) {
+ mask ^= bit;
+ bit >>= 1;
+ }
+ }
+ result &= mask;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+ case ORI: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val | im_val;
+ set_register(ra, alu_out);
+ break;
+ }
+ case ORIS: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val | (im_val << 16);
+ set_register(ra, alu_out);
+ break;
+ }
+ case XORI: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val ^ im_val;
+ set_register(ra, alu_out);
+ // todo - set condition based SO bit
+ break;
+ }
+ case XORIS: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val ^ (im_val << 16);
+ set_register(ra, alu_out);
+ break;
+ }
+ case ANDIx: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val & im_val;
+ set_register(ra, alu_out);
+ SetCR0(alu_out);
+ break;
+ }
+ case ANDISx: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val & (im_val << 16);
+ set_register(ra, alu_out);
+ SetCR0(alu_out);
+ break;
}
- }
-}
-
-
-bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
- bool found = true;
-
- uint32_t opcode = EXT2 | instr->BitField(10, 1);
- switch (opcode) {
case SRWX: {
int rs = instr->RSValue();
int ra = instr->RAValue();
@@ -1941,17 +2341,49 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
// todo - simulate icbi
break;
}
- default: {
- found = false;
+
+ case LWZU:
+ case LWZ: {
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ set_register(rt, ReadWU(ra_val + offset, instr));
+ if (opcode == LWZU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
break;
}
- }
- if (found) return found;
+ case LBZU:
+ case LBZ: {
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ set_register(rt, ReadB(ra_val + offset) & 0xFF);
+ if (opcode == LBZU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
- found = true;
- opcode = EXT2 | instr->BitField(10, 2);
- switch (opcode) {
+ case STWU:
+ case STW: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int32_t rs_val = get_register(rs);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ WriteW(ra_val + offset, rs_val, instr);
+ if (opcode == STWU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
case SRADIX: {
int ra = instr->RAValue();
int rs = instr->RSValue();
@@ -1964,21 +2396,36 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
}
break;
}
- default: {
- found = false;
+ case STBCX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int8_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ SetCR0(WriteExB(ra_val + rb_val, rs_val));
+ break;
+ }
+ case STHCX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int16_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ SetCR0(WriteExH(ra_val + rb_val, rs_val, instr));
+ break;
+ }
+ case STWCX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int32_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ SetCR0(WriteExW(ra_val + rb_val, rs_val, instr));
break;
}
- }
-
- return found;
-}
-
-
-bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
- bool found = true;
-
- uint32_t opcode = EXT2 | instr->BitField(9, 1);
- switch (opcode) {
case TW: {
// used for call redirection in simulation mode
SoftwareInterrupt(instr);
@@ -2223,20 +2670,6 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
break;
}
#endif
- default: {
- found = false;
- break;
- }
- }
-
- return found;
-}
-
-
-bool Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
- bool found = true;
- uint32_t opcode = EXT2 | instr->BitField(9, 1);
- switch (opcode) {
case CNTLZWX: {
int rs = instr->RSValue();
int ra = instr->RAValue();
@@ -2724,37 +3157,46 @@ bool Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
}
break;
}
- case LHAX:
- case LHAUX: {
+ case LHAX: {
int rt = instr->RTValue();
int ra = instr->RAValue();
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
set_register(rt, ReadH(ra_val + rb_val, instr));
- if (opcode == LHAUX) {
- DCHECK(ra != 0 && ra != rt);
- set_register(ra, ra_val + rb_val);
- }
break;
}
- case DCBF: {
- // todo - simulate dcbf
+ case LBARX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadExBU(ra_val + rb_val) & 0xFF);
break;
}
- default: {
- found = false;
+ case LHARX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadExHU(ra_val + rb_val, instr));
+ break;
+ }
+ case LWARX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadExWU(ra_val + rb_val, instr));
+ break;
+ }
+ case DCBF: {
+ // todo - simulate dcbf
break;
}
- }
-
- return found;
-}
-
-
-void Simulator::ExecuteExt2_5bit(Instruction* instr) {
- uint32_t opcode = EXT2 | instr->BitField(5, 1);
- switch (opcode) {
case ISEL: {
int rt = instr->RTValue();
int ra = instr->RAValue();
@@ -2767,27 +3209,159 @@ void Simulator::ExecuteExt2_5bit(Instruction* instr) {
set_register(rt, value);
break;
}
- default: {
- PrintF("Unimplemented: %08x\n", instr->InstructionBits());
- UNIMPLEMENTED(); // Not used by V8.
+
+ case STBU:
+ case STB: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int8_t rs_val = get_register(rs);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ WriteB(ra_val + offset, rs_val);
+ if (opcode == STBU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
}
- }
-}
+ case LHZU:
+ case LHZ: {
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ uintptr_t result = ReadHU(ra_val + offset, instr) & 0xffff;
+ set_register(rt, result);
+ if (opcode == LHZU) {
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
-void Simulator::ExecuteExt2(Instruction* instr) {
- // Check first the 10-1 bit versions
- if (ExecuteExt2_10bit(instr)) return;
- // Now look at the lesser encodings
- if (ExecuteExt2_9bit_part1(instr)) return;
- if (ExecuteExt2_9bit_part2(instr)) return;
- ExecuteExt2_5bit(instr);
-}
+ case LHA:
+ case LHAU: {
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t result = ReadH(ra_val + offset, instr);
+ set_register(rt, result);
+ if (opcode == LHAU) {
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+ case STHU:
+ case STH: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int16_t rs_val = get_register(rs);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ WriteH(ra_val + offset, rs_val, instr);
+ if (opcode == STHU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case LMW:
+ case STMW: {
+ UNIMPLEMENTED();
+ break;
+ }
+
+ case LFSU:
+ case LFS: {
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int32_t val = ReadW(ra_val + offset, instr);
+ float* fptr = reinterpret_cast<float*>(&val);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ // Conversion using double changes sNan to qNan on ia32/x64
+ if ((val & 0x7f800000) == 0x7f800000) {
+ int64_t dval = static_cast<int64_t>(val);
+ dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29) | 0x0;
+ set_d_register(frt, dval);
+ } else {
+ set_d_register_from_double(frt, static_cast<double>(*fptr));
+ }
+#else
+ set_d_register_from_double(frt, static_cast<double>(*fptr));
+#endif
+ if (opcode == LFSU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case LFDU:
+ case LFD: {
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + offset));
+ set_d_register(frt, *dptr);
+ if (opcode == LFDU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case STFSU: {
+ case STFS:
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ float frs_val = static_cast<float>(get_double_from_d_register(frs));
+ int32_t* p;
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ // Conversion using double changes sNan to qNan on ia32/x64
+ int32_t sval = 0;
+ int64_t dval = get_d_register(frs);
+ if ((dval & 0x7ff0000000000000) == 0x7ff0000000000000) {
+ sval = ((dval & 0xc000000000000000) >> 32) |
+ ((dval & 0x07ffffffe0000000) >> 29);
+ p = &sval;
+ } else {
+ p = reinterpret_cast<int32_t*>(&frs_val);
+ }
+#else
+ p = reinterpret_cast<int32_t*>(&frs_val);
+#endif
+ WriteW(ra_val + offset, *p, instr);
+ if (opcode == STFSU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case STFDU:
+ case STFD: {
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int64_t frs_val = get_d_register(frs);
+ WriteDW(ra_val + offset, frs_val);
+ if (opcode == STFDU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
-void Simulator::ExecuteExt3(Instruction* instr) {
- uint32_t opcode = EXT3 | instr->BitField(10, 1);
- switch (opcode) {
case FCFIDS: {
// fcfids
int frt = instr->RTValue();
@@ -2806,14 +3380,7 @@ void Simulator::ExecuteExt3(Instruction* instr) {
set_d_register_from_double(frt, frt_val);
return;
}
- }
- UNIMPLEMENTED(); // Not used by V8.
-}
-
-void Simulator::ExecuteExt4(Instruction* instr) {
- uint32_t opcode = EXT4 | instr->BitField(5, 1);
- switch (opcode) {
case FDIV: {
int frt = instr->RTValue();
int fra = instr->RAValue();
@@ -2899,9 +3466,6 @@ void Simulator::ExecuteExt4(Instruction* instr) {
set_d_register_from_double(frt, frt_val);
return;
}
- }
- opcode = EXT4 | instr->BitField(10, 1);
- switch (opcode) {
case FCMPU: {
int fra = instr->RAValue();
int frb = instr->RBValue();
@@ -3232,14 +3796,9 @@ void Simulator::ExecuteExt4(Instruction* instr) {
set_d_register_from_double(frt, frt_val);
return;
}
- }
- UNIMPLEMENTED(); // Not used by V8.
-}
+
#if V8_TARGET_ARCH_PPC64
-void Simulator::ExecuteExt5(Instruction* instr) {
- uint32_t opcode = EXT5 | instr->BitField(4, 2);
- switch (opcode) {
case RLDICL: {
int ra = instr->RAValue();
int rs = instr->RSValue();
@@ -3326,9 +3885,6 @@ void Simulator::ExecuteExt5(Instruction* instr) {
}
return;
}
- }
- opcode = EXT5 | instr->BitField(4, 1);
- switch (opcode) {
case RLDCL: {
int ra = instr->RAValue();
int rs = instr->RSValue();
@@ -3348,14 +3904,52 @@ void Simulator::ExecuteExt5(Instruction* instr) {
}
return;
}
- }
- UNIMPLEMENTED(); // Not used by V8.
-}
+
+ case LD:
+ case LDU:
+ case LWA: {
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ int64_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
+ switch (instr->Bits(1, 0)) {
+ case 0: { // ld
+ intptr_t* result = ReadDW(ra_val + offset);
+ set_register(rt, *result);
+ break;
+ }
+ case 1: { // ldu
+ intptr_t* result = ReadDW(ra_val + offset);
+ set_register(rt, *result);
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ break;
+ }
+ case 2: { // lwa
+ intptr_t result = ReadW(ra_val + offset, instr);
+ set_register(rt, result);
+ break;
+ }
+ }
+ break;
+ }
+
+ case STD:
+ case STDU: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ int64_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int64_t rs_val = get_register(rs);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
+ WriteDW(ra_val + offset, rs_val);
+ if (opcode == STDU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
#endif
-void Simulator::ExecuteExt6(Instruction* instr) {
- uint32_t opcode = EXT6 | instr->BitField(10, 3);
- switch (opcode) {
case XSADDDP: {
int frt = instr->RTValue();
int fra = instr->RAValue();
@@ -3396,549 +3990,6 @@ void Simulator::ExecuteExt6(Instruction* instr) {
set_d_register_from_double(frt, frt_val);
return;
}
- }
- UNIMPLEMENTED(); // Not used by V8.
-}
-
-void Simulator::ExecuteGeneric(Instruction* instr) {
- uint32_t opcode = instr->OpcodeField();
- switch (opcode) {
- case SUBFIC: {
- int rt = instr->RTValue();
- int ra = instr->RAValue();
- intptr_t ra_val = get_register(ra);
- int32_t im_val = instr->Bits(15, 0);
- im_val = SIGN_EXT_IMM16(im_val);
- intptr_t alu_out = im_val - ra_val;
- set_register(rt, alu_out);
- // todo - handle RC bit
- break;
- }
- case CMPLI: {
- int ra = instr->RAValue();
- uint32_t im_val = instr->Bits(15, 0);
- int cr = instr->Bits(25, 23);
- uint32_t bf = 0;
-#if V8_TARGET_ARCH_PPC64
- int L = instr->Bit(21);
- if (L) {
-#endif
- uintptr_t ra_val = get_register(ra);
- if (ra_val < im_val) {
- bf |= 0x80000000;
- }
- if (ra_val > im_val) {
- bf |= 0x40000000;
- }
- if (ra_val == im_val) {
- bf |= 0x20000000;
- }
-#if V8_TARGET_ARCH_PPC64
- } else {
- uint32_t ra_val = get_register(ra);
- if (ra_val < im_val) {
- bf |= 0x80000000;
- }
- if (ra_val > im_val) {
- bf |= 0x40000000;
- }
- if (ra_val == im_val) {
- bf |= 0x20000000;
- }
- }
-#endif
- uint32_t condition_mask = 0xF0000000U >> (cr * 4);
- uint32_t condition = bf >> (cr * 4);
- condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
- break;
- }
- case CMPI: {
- int ra = instr->RAValue();
- int32_t im_val = instr->Bits(15, 0);
- im_val = SIGN_EXT_IMM16(im_val);
- int cr = instr->Bits(25, 23);
- uint32_t bf = 0;
-#if V8_TARGET_ARCH_PPC64
- int L = instr->Bit(21);
- if (L) {
-#endif
- intptr_t ra_val = get_register(ra);
- if (ra_val < im_val) {
- bf |= 0x80000000;
- }
- if (ra_val > im_val) {
- bf |= 0x40000000;
- }
- if (ra_val == im_val) {
- bf |= 0x20000000;
- }
-#if V8_TARGET_ARCH_PPC64
- } else {
- int32_t ra_val = get_register(ra);
- if (ra_val < im_val) {
- bf |= 0x80000000;
- }
- if (ra_val > im_val) {
- bf |= 0x40000000;
- }
- if (ra_val == im_val) {
- bf |= 0x20000000;
- }
- }
-#endif
- uint32_t condition_mask = 0xF0000000U >> (cr * 4);
- uint32_t condition = bf >> (cr * 4);
- condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
- break;
- }
- case ADDIC: {
- int rt = instr->RTValue();
- int ra = instr->RAValue();
- uintptr_t ra_val = get_register(ra);
- uintptr_t im_val = SIGN_EXT_IMM16(instr->Bits(15, 0));
- uintptr_t alu_out = ra_val + im_val;
- // Check overflow
- if (~ra_val < im_val) {
- special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
- } else {
- special_reg_xer_ &= ~0xF0000000;
- }
- set_register(rt, alu_out);
- break;
- }
- case ADDI: {
- int rt = instr->RTValue();
- int ra = instr->RAValue();
- int32_t im_val = SIGN_EXT_IMM16(instr->Bits(15, 0));
- intptr_t alu_out;
- if (ra == 0) {
- alu_out = im_val;
- } else {
- intptr_t ra_val = get_register(ra);
- alu_out = ra_val + im_val;
- }
- set_register(rt, alu_out);
- // todo - handle RC bit
- break;
- }
- case ADDIS: {
- int rt = instr->RTValue();
- int ra = instr->RAValue();
- int32_t im_val = (instr->Bits(15, 0) << 16);
- intptr_t alu_out;
- if (ra == 0) { // treat r0 as zero
- alu_out = im_val;
- } else {
- intptr_t ra_val = get_register(ra);
- alu_out = ra_val + im_val;
- }
- set_register(rt, alu_out);
- break;
- }
- case BCX: {
- ExecuteBranchConditional(instr, BC_OFFSET);
- break;
- }
- case BX: {
- int offset = (instr->Bits(25, 2) << 8) >> 6;
- if (instr->Bit(0) == 1) { // LK flag set
- special_reg_lr_ = get_pc() + 4;
- }
- set_pc(get_pc() + offset);
- // todo - AA flag
- break;
- }
- case EXT1: {
- ExecuteExt1(instr);
- break;
- }
- case RLWIMIX: {
- int ra = instr->RAValue();
- int rs = instr->RSValue();
- uint32_t rs_val = get_register(rs);
- int32_t ra_val = get_register(ra);
- int sh = instr->Bits(15, 11);
- int mb = instr->Bits(10, 6);
- int me = instr->Bits(5, 1);
- uint32_t result = base::bits::RotateLeft32(rs_val, sh);
- int mask = 0;
- if (mb < me + 1) {
- int bit = 0x80000000 >> mb;
- for (; mb <= me; mb++) {
- mask |= bit;
- bit >>= 1;
- }
- } else if (mb == me + 1) {
- mask = 0xffffffff;
- } else { // mb > me+1
- int bit = 0x80000000 >> (me + 1); // needs to be tested
- mask = 0xffffffff;
- for (; me < mb; me++) {
- mask ^= bit;
- bit >>= 1;
- }
- }
- result &= mask;
- ra_val &= ~mask;
- result |= ra_val;
- set_register(ra, result);
- if (instr->Bit(0)) { // RC bit set
- SetCR0(result);
- }
- break;
- }
- case RLWINMX:
- case RLWNMX: {
- int ra = instr->RAValue();
- int rs = instr->RSValue();
- uint32_t rs_val = get_register(rs);
- int sh = 0;
- if (opcode == RLWINMX) {
- sh = instr->Bits(15, 11);
- } else {
- int rb = instr->RBValue();
- uint32_t rb_val = get_register(rb);
- sh = (rb_val & 0x1f);
- }
- int mb = instr->Bits(10, 6);
- int me = instr->Bits(5, 1);
- uint32_t result = base::bits::RotateLeft32(rs_val, sh);
- int mask = 0;
- if (mb < me + 1) {
- int bit = 0x80000000 >> mb;
- for (; mb <= me; mb++) {
- mask |= bit;
- bit >>= 1;
- }
- } else if (mb == me + 1) {
- mask = 0xffffffff;
- } else { // mb > me+1
- int bit = 0x80000000 >> (me + 1); // needs to be tested
- mask = 0xffffffff;
- for (; me < mb; me++) {
- mask ^= bit;
- bit >>= 1;
- }
- }
- result &= mask;
- set_register(ra, result);
- if (instr->Bit(0)) { // RC bit set
- SetCR0(result);
- }
- break;
- }
- case ORI: {
- int rs = instr->RSValue();
- int ra = instr->RAValue();
- intptr_t rs_val = get_register(rs);
- uint32_t im_val = instr->Bits(15, 0);
- intptr_t alu_out = rs_val | im_val;
- set_register(ra, alu_out);
- break;
- }
- case ORIS: {
- int rs = instr->RSValue();
- int ra = instr->RAValue();
- intptr_t rs_val = get_register(rs);
- uint32_t im_val = instr->Bits(15, 0);
- intptr_t alu_out = rs_val | (im_val << 16);
- set_register(ra, alu_out);
- break;
- }
- case XORI: {
- int rs = instr->RSValue();
- int ra = instr->RAValue();
- intptr_t rs_val = get_register(rs);
- uint32_t im_val = instr->Bits(15, 0);
- intptr_t alu_out = rs_val ^ im_val;
- set_register(ra, alu_out);
- // todo - set condition based SO bit
- break;
- }
- case XORIS: {
- int rs = instr->RSValue();
- int ra = instr->RAValue();
- intptr_t rs_val = get_register(rs);
- uint32_t im_val = instr->Bits(15, 0);
- intptr_t alu_out = rs_val ^ (im_val << 16);
- set_register(ra, alu_out);
- break;
- }
- case ANDIx: {
- int rs = instr->RSValue();
- int ra = instr->RAValue();
- intptr_t rs_val = get_register(rs);
- uint32_t im_val = instr->Bits(15, 0);
- intptr_t alu_out = rs_val & im_val;
- set_register(ra, alu_out);
- SetCR0(alu_out);
- break;
- }
- case ANDISx: {
- int rs = instr->RSValue();
- int ra = instr->RAValue();
- intptr_t rs_val = get_register(rs);
- uint32_t im_val = instr->Bits(15, 0);
- intptr_t alu_out = rs_val & (im_val << 16);
- set_register(ra, alu_out);
- SetCR0(alu_out);
- break;
- }
- case EXT2: {
- ExecuteExt2(instr);
- break;
- }
-
- case LWZU:
- case LWZ: {
- int ra = instr->RAValue();
- int rt = instr->RTValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- set_register(rt, ReadWU(ra_val + offset, instr));
- if (opcode == LWZU) {
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- }
- break;
- }
-
- case LBZU:
- case LBZ: {
- int ra = instr->RAValue();
- int rt = instr->RTValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- set_register(rt, ReadB(ra_val + offset) & 0xFF);
- if (opcode == LBZU) {
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- }
- break;
- }
-
- case STWU:
- case STW: {
- int ra = instr->RAValue();
- int rs = instr->RSValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int32_t rs_val = get_register(rs);
- int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- WriteW(ra_val + offset, rs_val, instr);
- if (opcode == STWU) {
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- }
- // printf("r%d %08x -> %08x\n", rs, rs_val, offset); // 0xdead
- break;
- }
-
- case STBU:
- case STB: {
- int ra = instr->RAValue();
- int rs = instr->RSValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int8_t rs_val = get_register(rs);
- int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- WriteB(ra_val + offset, rs_val);
- if (opcode == STBU) {
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- }
- break;
- }
-
- case LHZU:
- case LHZ: {
- int ra = instr->RAValue();
- int rt = instr->RTValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- uintptr_t result = ReadHU(ra_val + offset, instr) & 0xffff;
- set_register(rt, result);
- if (opcode == LHZU) {
- set_register(ra, ra_val + offset);
- }
- break;
- }
-
- case LHA:
- case LHAU: {
- int ra = instr->RAValue();
- int rt = instr->RTValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- intptr_t result = ReadH(ra_val + offset, instr);
- set_register(rt, result);
- if (opcode == LHAU) {
- set_register(ra, ra_val + offset);
- }
- break;
- }
-
- case STHU:
- case STH: {
- int ra = instr->RAValue();
- int rs = instr->RSValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int16_t rs_val = get_register(rs);
- int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- WriteH(ra_val + offset, rs_val, instr);
- if (opcode == STHU) {
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- }
- break;
- }
-
- case LMW:
- case STMW: {
- UNIMPLEMENTED();
- break;
- }
-
- case LFSU:
- case LFS: {
- int frt = instr->RTValue();
- int ra = instr->RAValue();
- int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int32_t val = ReadW(ra_val + offset, instr);
- float* fptr = reinterpret_cast<float*>(&val);
-// Conversion using double changes sNan to qNan on ia32/x64
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- if (val == 0x7fa00000) {
- set_d_register(frt, 0x7ff4000000000000);
- } else {
-#endif
- set_d_register_from_double(frt, static_cast<double>(*fptr));
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- }
-#endif
- if (opcode == LFSU) {
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- }
- break;
- }
-
- case LFDU:
- case LFD: {
- int frt = instr->RTValue();
- int ra = instr->RAValue();
- int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + offset));
- set_d_register(frt, *dptr);
- if (opcode == LFDU) {
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- }
- break;
- }
-
- case STFSU: {
- case STFS:
- int frs = instr->RSValue();
- int ra = instr->RAValue();
- int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- float frs_val = static_cast<float>(get_double_from_d_register(frs));
- int32_t* p;
-// Conversion using double changes sNan to qNan on ia32/x64
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- int64_t frs_isnan = get_d_register(frs);
- int32_t frs_nan_single = 0x7fa00000;
- if (frs_isnan == 0x7ff4000000000000) {
- p = &frs_nan_single;
- } else {
-#endif
- p = reinterpret_cast<int32_t*>(&frs_val);
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- }
-#endif
- WriteW(ra_val + offset, *p, instr);
- if (opcode == STFSU) {
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- }
- break;
- }
-
- case STFDU:
- case STFD: {
- int frs = instr->RSValue();
- int ra = instr->RAValue();
- int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int64_t frs_val = get_d_register(frs);
- WriteDW(ra_val + offset, frs_val);
- if (opcode == STFDU) {
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- }
- break;
- }
-
- case EXT3: {
- ExecuteExt3(instr);
- break;
- }
- case EXT4: {
- ExecuteExt4(instr);
- break;
- }
-
-#if V8_TARGET_ARCH_PPC64
- case EXT5: {
- ExecuteExt5(instr);
- break;
- }
- case LD: {
- int ra = instr->RAValue();
- int rt = instr->RTValue();
- int64_t ra_val = ra == 0 ? 0 : get_register(ra);
- int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
- switch (instr->Bits(1, 0)) {
- case 0: { // ld
- intptr_t* result = ReadDW(ra_val + offset);
- set_register(rt, *result);
- break;
- }
- case 1: { // ldu
- intptr_t* result = ReadDW(ra_val + offset);
- set_register(rt, *result);
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- break;
- }
- case 2: { // lwa
- intptr_t result = ReadW(ra_val + offset, instr);
- set_register(rt, result);
- break;
- }
- }
- break;
- }
-
- case STD: {
- int ra = instr->RAValue();
- int rs = instr->RSValue();
- int64_t ra_val = ra == 0 ? 0 : get_register(ra);
- int64_t rs_val = get_register(rs);
- int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
- WriteDW(ra_val + offset, rs_val);
- if (instr->Bit(0) == 1) { // This is the STDU form
- DCHECK(ra != 0);
- set_register(ra, ra_val + offset);
- }
- break;
- }
-#endif
- case EXT6: {
- ExecuteExt6(instr);
- break;
- }
default: {
UNIMPLEMENTED();
@@ -4212,6 +4263,169 @@ uintptr_t Simulator::PopAddress() {
set_register(sp, current_sp + sizeof(uintptr_t));
return address;
}
+
+Simulator::LocalMonitor::LocalMonitor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ size_(TransactionSize::None) {}
+
+void Simulator::LocalMonitor::Clear() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+ size_ = TransactionSize::None;
+}
+
+void Simulator::LocalMonitor::NotifyLoad(int32_t addr) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // A load could cause a cache eviction which will affect the monitor. As a
+ // result, it's most strict to unconditionally clear the local monitor on
+ // load.
+ Clear();
+ }
+}
+
+void Simulator::LocalMonitor::NotifyLoadExcl(int32_t addr,
+ TransactionSize size) {
+ access_state_ = MonitorAccess::Exclusive;
+ tagged_addr_ = addr;
+ size_ = size;
+}
+
+void Simulator::LocalMonitor::NotifyStore(int32_t addr) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // A store could cause a cache eviction which will affect the
+ // monitor. As a result, it's most strict to unconditionally clear the
+ // local monitor on store.
+ Clear();
+ }
+}
+
+bool Simulator::LocalMonitor::NotifyStoreExcl(int32_t addr,
+ TransactionSize size) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ if (addr == tagged_addr_ && size_ == size) {
+ Clear();
+ return true;
+ } else {
+ Clear();
+ return false;
+ }
+ } else {
+ DCHECK(access_state_ == MonitorAccess::Open);
+ return false;
+ }
+}
+
+Simulator::GlobalMonitor::Processor::Processor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ next_(nullptr),
+ prev_(nullptr) {}
+
+void Simulator::GlobalMonitor::Processor::Clear_Locked() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+}
+void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(int32_t addr) {
+ access_state_ = MonitorAccess::Exclusive;
+ tagged_addr_ = addr;
+}
+
+void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
+ int32_t addr, bool is_requesting_processor) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ // It is possible that a store caused a cache eviction,
+ // which can affect the montior, so conservatively,
+ // we always clear the monitor.
+ Clear_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
+ int32_t addr, bool is_requesting_processor) {
+ if (access_state_ == MonitorAccess::Exclusive) {
+ if (is_requesting_processor) {
+ if (addr == tagged_addr_) {
+ Clear_Locked();
+ return true;
+ }
+ } else if (addr == tagged_addr_) {
+ Clear_Locked();
+ return false;
+ }
+ }
+ return false;
+}
+
+Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
+
+void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(int32_t addr,
+ Processor* processor) {
+ processor->NotifyLoadExcl_Locked(addr);
+ PrependProcessor_Locked(processor);
+}
+
+void Simulator::GlobalMonitor::NotifyStore_Locked(int32_t addr,
+ Processor* processor) {
+ // Notify each processor of the store operation.
+ for (Processor* iter = head_; iter; iter = iter->next_) {
+ bool is_requesting_processor = iter == processor;
+ iter->NotifyStore_Locked(addr, is_requesting_processor);
+ }
+}
+
+bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(int32_t addr,
+ Processor* processor) {
+ DCHECK(IsProcessorInLinkedList_Locked(processor));
+ if (processor->NotifyStoreExcl_Locked(addr, true)) {
+ // Notify the other processors that this StoreExcl succeeded.
+ for (Processor* iter = head_; iter; iter = iter->next_) {
+ if (iter != processor) {
+ iter->NotifyStoreExcl_Locked(addr, false);
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
+ Processor* processor) const {
+ return head_ == processor || processor->next_ || processor->prev_;
+}
+
+void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
+ if (IsProcessorInLinkedList_Locked(processor)) {
+ return;
+ }
+
+ if (head_) {
+ head_->prev_ = processor;
+ }
+ processor->prev_ = nullptr;
+ processor->next_ = head_;
+ head_ = processor;
+}
+
+void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
+ base::LockGuard<base::Mutex> lock_guard(&mutex);
+ if (!IsProcessorInLinkedList_Locked(processor)) {
+ return;
+ }
+
+ if (processor->prev_) {
+ processor->prev_->next_ = processor->next_;
+ } else {
+ head_ = processor->next_;
+ }
+ if (processor->next_) {
+ processor->next_->prev_ = processor->prev_;
+ }
+ processor->prev_ = nullptr;
+ processor->next_ = nullptr;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index 91e7f05ea5..0c23b04a37 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -289,19 +289,25 @@ class Simulator {
// Read and write memory.
inline uint8_t ReadBU(intptr_t addr);
+ inline uint8_t ReadExBU(intptr_t addr);
inline int8_t ReadB(intptr_t addr);
inline void WriteB(intptr_t addr, uint8_t value);
+ inline int WriteExB(intptr_t addr, uint8_t value);
inline void WriteB(intptr_t addr, int8_t value);
inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
+ inline uint16_t ReadExHU(intptr_t addr, Instruction* instr);
inline int16_t ReadH(intptr_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
+ inline int WriteExH(intptr_t addr, uint16_t value, Instruction* instr);
inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
+ inline uint32_t ReadExWU(intptr_t addr, Instruction* instr);
inline int32_t ReadW(intptr_t addr, Instruction* instr);
inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
+ inline int WriteExW(intptr_t addr, uint32_t value, Instruction* instr);
inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
intptr_t* ReadDW(intptr_t addr);
@@ -311,7 +317,8 @@ class Simulator {
void SetCR0(intptr_t result, bool setSO = false);
void ExecuteBranchConditional(Instruction* instr, BCType type);
void ExecuteExt1(Instruction* instr);
- bool ExecuteExt2_10bit(Instruction* instr);
+ bool ExecuteExt2_10bit_part1(Instruction* instr);
+ bool ExecuteExt2_10bit_part2(Instruction* instr);
bool ExecuteExt2_9bit_part1(Instruction* instr);
bool ExecuteExt2_9bit_part2(Instruction* instr);
void ExecuteExt2_5bit(Instruction* instr);
@@ -398,6 +405,84 @@ class Simulator {
char* desc;
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];
+
+ // Syncronization primitives. See ARM DDI 0406C.b, A2.9.
+ enum class MonitorAccess {
+ Open,
+ Exclusive,
+ };
+
+ enum class TransactionSize {
+ None = 0,
+ Byte = 1,
+ HalfWord = 2,
+ Word = 4,
+ };
+
+ class LocalMonitor {
+ public:
+ LocalMonitor();
+
+ // These functions manage the state machine for the local monitor, but do
+ // not actually perform loads and stores. NotifyStoreExcl only returns
+ // true if the exclusive store is allowed; the global monitor will still
+ // have to be checked to see whether the memory should be updated.
+ void NotifyLoad(int32_t addr);
+ void NotifyLoadExcl(int32_t addr, TransactionSize size);
+ void NotifyStore(int32_t addr);
+ bool NotifyStoreExcl(int32_t addr, TransactionSize size);
+
+ private:
+ void Clear();
+
+ MonitorAccess access_state_;
+ int32_t tagged_addr_;
+ TransactionSize size_;
+ };
+
+ class GlobalMonitor {
+ public:
+ GlobalMonitor();
+
+ class Processor {
+ public:
+ Processor();
+
+ private:
+ friend class GlobalMonitor;
+ // These functions manage the state machine for the global monitor, but do
+ // not actually perform loads and stores.
+ void Clear_Locked();
+ void NotifyLoadExcl_Locked(int32_t addr);
+ void NotifyStore_Locked(int32_t addr, bool is_requesting_processor);
+ bool NotifyStoreExcl_Locked(int32_t addr, bool is_requesting_processor);
+
+ MonitorAccess access_state_;
+ int32_t tagged_addr_;
+ Processor* next_;
+ Processor* prev_;
+ };
+
+ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
+ base::Mutex mutex;
+
+ void NotifyLoadExcl_Locked(int32_t addr, Processor* processor);
+ void NotifyStore_Locked(int32_t addr, Processor* processor);
+ bool NotifyStoreExcl_Locked(int32_t addr, Processor* processor);
+
+ // Called when the simulator is destroyed.
+ void RemoveProcessor(Processor* processor);
+
+ private:
+ bool IsProcessorInLinkedList_Locked(Processor* processor) const;
+ void PrependProcessor_Locked(Processor* processor);
+
+ Processor* head_;
+ };
+
+ LocalMonitor local_monitor_;
+ GlobalMonitor::Processor global_monitor_processor_;
+ static base::LazyInstance<GlobalMonitor>::type global_monitor_;
};