summaryrefslogtreecommitdiff
path: root/deps/v8/src/ppc
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2018-01-24 20:16:06 +0100
committerMyles Borins <mylesborins@google.com>2018-01-24 15:02:20 -0800
commit4c4af643e5042d615a60c6bbc05aee9d81b903e5 (patch)
tree3fb0a97988fe4439ae3ae06f26915d1dcf8cab92 /deps/v8/src/ppc
parentfa9f31a4fda5a3782c652e56e394465805ebb50f (diff)
downloadandroid-node-v8-4c4af643e5042d615a60c6bbc05aee9d81b903e5.tar.gz
android-node-v8-4c4af643e5042d615a60c6bbc05aee9d81b903e5.tar.bz2
android-node-v8-4c4af643e5042d615a60c6bbc05aee9d81b903e5.zip
deps: update V8 to 6.4.388.40
PR-URL: https://github.com/nodejs/node/pull/17489 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/ppc')
-rw-r--r--deps/v8/src/ppc/OWNERS3
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h20
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc37
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h20
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc710
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h242
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc117
-rw-r--r--deps/v8/src/ppc/codegen-ppc.h28
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc3
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc14
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.cc5
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc18
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc401
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h96
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc130
15 files changed, 335 insertions, 1509 deletions
diff --git a/deps/v8/src/ppc/OWNERS b/deps/v8/src/ppc/OWNERS
index 752e8e3d81..cf60da5cc7 100644
--- a/deps/v8/src/ppc/OWNERS
+++ b/deps/v8/src/ppc/OWNERS
@@ -3,4 +3,5 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
+jbarboza@ca.ibm.com
+mmallick@ca.ibm.com
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index e458364027..d9b12ac8db 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -86,12 +86,12 @@ Address RelocInfo::target_internal_reference_address() {
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
if (FLAG_enable_embedded_constant_pool &&
@@ -131,14 +131,14 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : nullptr;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@@ -193,7 +193,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@@ -226,14 +226,14 @@ void RelocInfo::WipeOut(Isolate* isolate) {
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
- Memory::Address_at(pc_) = NULL;
+ Memory::Address_at(pc_) = nullptr;
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, host_, NULL,
+ Assembler::set_target_address_at(isolate, pc_, host_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
}
}
@@ -258,7 +258,7 @@ Operand::Operand(Register rm) : rm_(rm), rmode_(kRelocInfo_NONEPTR) {}
void Assembler::UntrackBranch() {
DCHECK(!trampoline_emitted_);
- DCHECK(tracked_branch_count_ > 0);
+ DCHECK_GT(tracked_branch_count_, 0);
int count = --tracked_branch_count_;
if (count == 0) {
// Reset
@@ -435,7 +435,7 @@ void Assembler::deserialization_set_special_target_at(
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- Code* code = NULL;
+ Code* code = nullptr;
set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 32758092c4..0c4a518772 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -111,7 +111,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
void CpuFeatures::PrintTarget() {
- const char* ppc_arch = NULL;
+ const char* ppc_arch = nullptr;
#if V8_TARGET_ARCH_PPC64
ppc_arch = "ppc64";
@@ -155,7 +155,7 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() {
- if (FLAG_enable_embedded_constant_pool && host_ != NULL) {
+ if (FLAG_enable_embedded_constant_pool && host_ != nullptr) {
Address constant_pool = host_->constant_pool();
return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
}
@@ -182,6 +182,17 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
reinterpret_cast<Address>(size), flush_mode);
}
+void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ set_embedded_address(isolate, address, icache_flush_mode);
+}
+
+Address RelocInfo::js_to_wasm_address() const {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
+ return embedded_address();
+}
+
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
// See assembler-ppc-inl.h for inlined constructors
@@ -228,7 +239,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
Address pc = buffer_ + request.offset();
- Address constant_pool = NULL;
+ Address constant_pool = nullptr;
set_target_address_at(nullptr, pc, constant_pool,
reinterpret_cast<Address>(object.location()),
SKIP_ICACHE_FLUSH);
@@ -277,7 +288,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
- DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
+ DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -569,7 +580,7 @@ void Assembler::bind_to(Label* L, int pos) {
if (maxReach && is_intn(offset, maxReach) == false) {
if (trampoline_pos == kInvalidSlotPos) {
trampoline_pos = get_trampoline_entry();
- CHECK(trampoline_pos != kInvalidSlotPos);
+ CHECK_NE(trampoline_pos, kInvalidSlotPos);
target_at_put(trampoline_pos, pos);
}
target_at_put(fixup_pos, trampoline_pos);
@@ -601,7 +612,7 @@ void Assembler::next(Label* L) {
if (link == kEndOfChain) {
L->Unuse();
} else {
- DCHECK(link >= 0);
+ DCHECK_GE(link, 0);
L->link_to(link);
}
}
@@ -1228,7 +1239,7 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
void Assembler::function_descriptor() {
if (ABI_USES_FUNCTION_DESCRIPTORS) {
Label instructions;
- DCHECK(pc_offset() == 0);
+ DCHECK_EQ(pc_offset(), 0);
emit_label_addr(&instructions);
dp(0);
dp(0);
@@ -1288,7 +1299,7 @@ void Assembler::EnsureSpaceFor(int space_needed) {
bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
- if (assembler != NULL && assembler->predictable_code_size()) return true;
+ if (assembler != nullptr && assembler->predictable_code_size()) return true;
return assembler->serializer_enabled();
} else if (RelocInfo::IsNone(rmode_)) {
return false;
@@ -1507,7 +1518,7 @@ void Assembler::mov_label_addr(Register dst, Label* label) {
BlockTrampolinePoolScope block_trampoline_pool(this);
emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
emit(dst.code());
- DCHECK(kMovInstructionsNoConstantPool >= 2);
+ DCHECK_GE(kMovInstructionsNoConstantPool, 2);
for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
}
}
@@ -1573,7 +1584,7 @@ void Assembler::mtxer(Register src) {
void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
- DCHECK(static_cast<int>(bit) < 32);
+ DCHECK_LT(static_cast<int>(bit), 32);
int bf = cr.code();
int bfa = bit / CRWIDTH;
emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
@@ -1879,14 +1890,14 @@ void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
- DCHECK(static_cast<int>(bit) < 32);
+ DCHECK_LT(static_cast<int>(bit), 32);
int bt = bit;
emit(EXT4 | MTFSB0 | bt * B21 | rc);
}
void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
- DCHECK(static_cast<int>(bit) < 32);
+ DCHECK_LT(static_cast<int>(bit), 32);
int bt = bit;
emit(EXT4 | MTFSB1 | bt * B21 | rc);
}
@@ -2074,7 +2085,7 @@ void Assembler::EmitRelocations() {
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
- Code* code = NULL;
+ Code* code = nullptr;
RelocInfo rinfo(pc, rmode, it->data(), code);
// Fix up internal references now that they are guaranteed to be bound.
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index d1411c142d..77c1422424 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -341,7 +341,7 @@ typedef DoubleRegister Simd128Register;
constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
DOUBLE_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
-constexpr Register no_dreg = Register::no_reg();
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
@@ -461,12 +461,10 @@ class MemOperand BASE_EMBEDDED {
// PowerPC - base register
Register ra() const {
- DCHECK(ra_ != no_reg);
return ra_;
}
Register rb() const {
- DCHECK(offset_ == 0 && rb_ != no_reg);
return rb_;
}
@@ -503,14 +501,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer, and buffer_size determines the initial buffer size. The buffer
+ // is owned by the assembler and deallocated upon destruction of the
+ // assembler.
//
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
+ // If the provided buffer is not nullptr, the assembler uses the provided
+ // buffer for code generation and assumes its size to be buffer_size. If the
+ // buffer is too small, a fatal error occurs. No deallocation of the buffer is
+ // done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@@ -1641,7 +1640,6 @@ class Assembler : public AssemblerBase {
friend class RegExpMacroAssemblerPPC;
friend class RelocInfo;
- friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 7dcc543b87..d5af6bfec0 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -9,11 +9,9 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/double.h"
#include "src/frame-constants.h"
#include "src/frames.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@@ -40,52 +38,45 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
- Register input_reg = source();
Register result_reg = destination();
- DCHECK(is_truncating());
-
- int double_offset = offset();
// Immediate values for this stub fit in instructions, so it's safe to use ip.
- Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
- Register scratch_low =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch = GetRegisterThatIsNotOneOf(result_reg);
+ Register scratch_low = GetRegisterThatIsNotOneOf(result_reg, scratch);
Register scratch_high =
- GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+ GetRegisterThatIsNotOneOf(result_reg, scratch, scratch_low);
DoubleRegister double_scratch = kScratchDoubleReg;
__ push(scratch);
- // Account for saved regs if input is sp.
- if (input_reg == sp) double_offset += kPointerSize;
+ // Account for saved regs.
+ int argument_offset = 1 * kPointerSize;
- if (!skip_fastpath()) {
- // Load double input.
- __ lfd(double_scratch, MemOperand(input_reg, double_offset));
+ // Load double input.
+ __ lfd(double_scratch, MemOperand(sp, argument_offset));
- // Do fast-path convert from double to int.
- __ ConvertDoubleToInt64(double_scratch,
+ // Do fast-path convert from double to int.
+ __ ConvertDoubleToInt64(double_scratch,
#if !V8_TARGET_ARCH_PPC64
- scratch,
+ scratch,
#endif
- result_reg, d0);
+ result_reg, d0);
// Test for overflow
#if V8_TARGET_ARCH_PPC64
- __ TestIfInt32(result_reg, r0);
+ __ TestIfInt32(result_reg, r0);
#else
- __ TestIfInt32(scratch, result_reg, r0);
+ __ TestIfInt32(scratch, result_reg, r0);
#endif
- __ beq(&fastpath_done);
- }
+ __ beq(&fastpath_done);
__ Push(scratch_high, scratch_low);
- // Account for saved regs if input is sp.
- if (input_reg == sp) double_offset += 2 * kPointerSize;
+ // Account for saved regs.
+ argument_offset += 2 * kPointerSize;
__ lwz(scratch_high,
- MemOperand(input_reg, double_offset + Register::kExponentOffset));
+ MemOperand(sp, argument_offset + Register::kExponentOffset));
__ lwz(scratch_low,
- MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+ MemOperand(sp, argument_offset + Register::kMantissaOffset));
__ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
// Load scratch with exponent - 1. This is faster than loading
@@ -157,46 +148,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ mflr(r0);
- __ MultiPush(kJSCallerSaved | r0.bit());
- if (save_doubles()) {
- __ MultiPushDoubles(kCallerSavedDoubles);
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
- const Register scratch = r4;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
- argument_count);
- if (save_doubles()) {
- __ MultiPopDoubles(kCallerSavedDoubles);
- }
- __ MultiPop(kJSCallerSaved | r0.bit());
- __ mtlr(r0);
- __ Ret();
-}
-
-
-void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
- __ PushSafepointRegisters();
- __ blr();
-}
-
-
-void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
- __ PopSafepointRegisters();
- __ blr();
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent == r5);
@@ -307,37 +258,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-bool CEntryStub::NeedsImmovableCode() { return true; }
-
+Movability CEntryStub::NeedsImmovableCode() { return kImmovable; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreRegistersStateStub::GenerateAheadOfTime(isolate);
- RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
-void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- StoreRegistersStateStub stub(isolate);
- stub.GetCode();
-}
-
-
-void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- RestoreRegistersStateStub stub(isolate);
- stub.GetCode();
-}
-
-
void CodeStub::GenerateFPStubs(Isolate* isolate) {
// Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
- StoreBufferOverflowStub(isolate, mode).GetCode();
}
@@ -470,7 +403,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
? no_reg
// r14: still holds argc (callee-saved).
: r14;
- __ LeaveExitFrame(save_doubles(), argc, true);
+ __ LeaveExitFrame(save_doubles(), argc);
__ blr();
// Handling of exception.
@@ -478,10 +411,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- IsolateAddressId::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_entrypoint_address(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
+ ExternalReference pending_handler_constant_pool_address(
+ IsolateAddressId::kPendingHandlerConstantPoolAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@@ -518,15 +451,13 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ mov(r4, Operand(pending_handler_code_address));
- __ LoadP(r4, MemOperand(r4));
- __ mov(r5, Operand(pending_handler_offset_address));
- __ LoadP(r5, MemOperand(r5));
- __ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
+ __ mov(ip, Operand(pending_handler_entrypoint_address));
+ __ LoadP(ip, MemOperand(ip));
if (FLAG_enable_embedded_constant_pool) {
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r4);
+ __ mov(kConstantPoolRegister,
+ Operand(pending_handler_constant_pool_address));
+ __ LoadP(kConstantPoolRegister, MemOperand(kConstantPoolRegister));
}
- __ add(ip, r4, r5);
__ Jump(ip);
}
@@ -677,125 +608,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ blr();
}
-void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
- __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ cmp(length, scratch2);
- __ beq(&check_zero_length);
- __ bind(&strings_not_equal);
- __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL));
- __ Ret();
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ cmpi(length, Operand::Zero());
- __ bne(&compare_chars);
- __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
- __ Ret();
-
- // Compare characters.
- __ bind(&compare_chars);
- GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal);
-
- // Characters are equal.
- __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
- __ Ret();
-}
-
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3) {
- Label result_not_equal, compare_lengths;
- // Find minimum length and length difference.
- __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC);
- Register length_delta = scratch3;
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(gt, scratch1, scratch2, scratch1, cr0);
- } else {
- Label skip;
- __ ble(&skip, cr0);
- __ mr(scratch1, scratch2);
- __ bind(&skip);
- }
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ cmpi(min_length, Operand::Zero());
- __ beq(&compare_lengths);
-
- // Compare loop.
- GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use length_delta as result if it's zero.
- __ mr(r3, length_delta);
- __ cmpi(r3, Operand::Zero());
- __ bind(&result_not_equal);
- // Conditionally update the result based either on length_delta or
- // the last comparion performed in the loop above.
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
- __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
- __ isel(eq, r3, r0, r4);
- __ isel(lt, r3, r5, r3);
- __ Ret();
- } else {
- Label less_equal, equal;
- __ ble(&less_equal);
- __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
- __ Ret();
- __ bind(&less_equal);
- __ beq(&equal);
- __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
- __ bind(&equal);
- __ Ret();
- }
-}
-
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Label* chars_not_equal) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ addi(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ add(left, left, scratch1);
- __ add(right, right, scratch1);
- __ subfic(length, length, Operand::Zero());
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ lbzx(scratch1, MemOperand(left, index));
- __ lbzx(r0, MemOperand(right, index));
- __ cmp(scratch1, r0);
- __ bne(chars_not_equal);
- __ addi(index, index, Operand(1));
- __ cmpi(index, Operand::Zero());
- __ bne(&loop);
-}
-
-
// This stub is paired with DirectCEntryStub::GenerateCall
void DirectCEntryStub::Generate(MacroAssembler* masm) {
// Place the return address on the stack, making the call
@@ -826,390 +638,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
}
-void NameDictionaryLookupStub::GenerateNegativeLookup(
- MacroAssembler* masm, Label* miss, Label* done, Register receiver,
- Register properties, Handle<Name> name, Register scratch0) {
- DCHECK(name->IsUniqueName());
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
- __ subi(index, index, Operand(1));
- __ LoadSmiLiteral(
- ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
- __ and_(index, index, ip);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ ShiftLeftImm(ip, index, Operand(1));
- __ add(index, index, ip); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- Register tmp = properties;
- __ SmiToPtrArrayOffset(ip, index);
- __ add(tmp, properties, ip);
- __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- DCHECK(tmp != entity_name);
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ cmp(entity_name, tmp);
- __ beq(done);
-
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
- // Stop if found the property.
- __ Cmpi(entity_name, Operand(Handle<Name>(name)), r0);
- __ beq(miss);
-
- Label good;
- __ cmp(entity_name, tmp);
- __ beq(&good);
-
- // Check if the entry name is not a unique name.
- __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ lbz(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
- __ bind(&good);
-
- // Restore the properties.
- __ LoadP(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- }
-
- const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
- r5.bit() | r4.bit() | r3.bit());
-
- __ mflr(r0);
- __ MultiPush(spill_mask);
-
- __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- __ mov(r4, Operand(Handle<Name>(name)));
- NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmpi(r3, Operand::Zero());
-
- __ MultiPop(spill_mask); // MultiPop does not touch condition flags
- __ mtlr(r0);
-
- __ beq(done);
- __ bne(miss);
-}
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Registers:
- // result: NameDictionary to probe
- // r4: key
- // dictionary: NameDictionary to probe.
- // index: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Register result = r3;
- Register dictionary = r3;
- Register key = r4;
- Register index = r5;
- Register mask = r6;
- Register hash = r7;
- Register undefined = r8;
- Register entry_key = r9;
- Register scratch = r9;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ SmiUntag(mask);
- __ subi(mask, mask, Operand(1));
-
- __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ addi(index, hash,
- Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- } else {
- __ mr(index, hash);
- }
- __ srwi(r0, index, Operand(Name::kHashShift));
- __ and_(index, mask, r0);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ ShiftLeftImm(scratch, index, Operand(1));
- __ add(index, index, scratch); // index *= 3.
-
- __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2));
- __ add(index, dictionary, scratch);
- __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ cmp(entry_key, undefined);
- __ beq(&not_in_dictionary);
-
- // Stop if found the property.
- __ cmp(entry_key, key);
- __ beq(&in_dictionary);
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a unique name.
- __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ lbz(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ li(result, Operand::Zero());
- __ Ret();
- }
-
- __ bind(&in_dictionary);
- __ li(result, Operand(1));
- __ Ret();
-
- __ bind(&not_in_dictionary);
- __ li(result, Operand::Zero());
- __ Ret();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- // Hydrogen code stubs need stub2 at snapshot time.
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-
-RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
- Instr first_instruction =
- Assembler::instr_at(stub->instruction_start() + Assembler::kInstrSize);
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- (Assembler::kInstrSize * 2));
-
- // Consider adding DCHECK here to catch unexpected instruction sequence
- if (BF == (first_instruction & kBOfieldMask)) {
- return INCREMENTAL;
- }
-
- if (BF == (second_instruction & kBOfieldMask)) {
- return INCREMENTAL_COMPACTION;
- }
-
- return STORE_BUFFER_ONLY;
-}
-
-void RecordWriteStub::Patch(Code* stub, Mode mode) {
- MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
- stub->instruction_size(), CodeObjectRequired::kNo);
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
-
- PatchBranchIntoNop(&masm, Assembler::kInstrSize);
- PatchBranchIntoNop(&masm, Assembler::kInstrSize * 2);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize * 2);
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(),
- stub->instruction_start() + Assembler::kInstrSize,
- 2 * Assembler::kInstrSize);
-}
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two branch instructions are generated with labels so as to
- // get the offset fixed up correctly by the bind(Label*) call. We patch
- // it back and forth between branch condition True and False
- // when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
-
- // Clear the bit, branch on True for NOP action initially
- __ crclr(Assembler::encode_crbit(cr2, CR_LT));
- __ blt(&skip_to_incremental_noncompacting, cr2);
- __ blt(&skip_to_incremental_compacting, cr2);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- // patching not required on PPC as the initial path is effectively NOP
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(), &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address = r3 == regs_.address() ? regs_.scratch0() : regs_.address();
- DCHECK(address != regs_.object());
- DCHECK(address != r3);
- __ mr(address, regs_.address());
- __ mr(r3, regs_.object());
- __ mr(r4, address);
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(isolate()),
- argument_count);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-void RecordWriteStub::Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-}
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label need_incremental;
- Label need_incremental_pop_scratch;
-#ifndef V8_CONCURRENT_MARKING
- Label on_black;
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-#endif
-
- // Get the value from the slot.
- __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask, eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ JumpIfWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != NULL) {
+ if (tasm->isolate()->function_entry_hook() != nullptr) {
PredictableCodeSizeScope predictable(tasm,
#if V8_TARGET_ARCH_PPC64
14 * Assembler::kInstrSize);
@@ -1225,7 +656,7 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
+ if (masm->isolate()->function_entry_hook() != nullptr) {
PredictableCodeSizeScope predictable(masm,
#if V8_TARGET_ARCH_PPC64
14 * Assembler::kInstrSize);
@@ -1473,7 +904,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ TestIfSmi(r7, r0);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r7, r7, r8, MAP_TYPE);
@@ -1554,7 +985,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ TestIfSmi(r6, r0);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r6, r6, r7, MAP_TYPE);
@@ -1600,8 +1031,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference thunk_ref,
int stack_space,
MemOperand* stack_space_operand,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
+ MemOperand return_value_operand) {
Isolate* isolate = masm->isolate();
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate);
@@ -1695,17 +1125,13 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Leave the API exit frame.
__ bind(&leave_exit_frame);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ LoadP(cp, *context_restore_operand);
- }
// LeaveExitFrame expects unwind space to be in a register.
- if (stack_space_operand != NULL) {
+ if (stack_space_operand != nullptr) {
__ lwz(r14, *stack_space_operand);
} else {
__ mov(r14, Operand(stack_space));
}
- __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
+ __ LeaveExitFrame(false, r14, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
@@ -1734,7 +1160,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r3 : callee
// -- r7 : call_data
// -- r5 : holder
// -- r4 : api_function_address
@@ -1744,21 +1169,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1)* 4] : first argument
// -- sp[argc * 4] : receiver
- // -- sp[(argc + 1)* 4] : accessor_holder
// -----------------------------------
- Register callee = r3;
Register call_data = r7;
Register holder = r5;
Register api_function_address = r4;
- Register context = cp;
typedef FunctionCallbackArguments FCA;
- STATIC_ASSERT(FCA::kArgsLength == 8);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@@ -1768,12 +1188,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
- // context save
- __ push(context);
-
- // callee
- __ push(callee);
-
// call data
__ push(call_data);
@@ -1789,38 +1203,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// holder
__ push(holder);
- // Enter a new context
- if (is_lazy()) {
- // ----------- S t a t e -------------------------------------
- // -- sp[0] : holder
- // -- ...
- // -- sp[(FCA::kArgsLength - 1) * 4] : new_target
- // -- sp[FCA::kArgsLength * 4] : last argument
- // -- ...
- // -- sp[(FCA::kArgsLength + argc - 1) * 4] : first argument
- // -- sp[(FCA::kArgsLength + argc) * 4] : receiver
- // -- sp[(FCA::kArgsLength + argc + 1) * 4] : accessor_holder
- // -----------------------------------------------------------
-
- // Load context from accessor_holder
- Register accessor_holder = context;
- Register scratch2 = callee;
- __ LoadP(accessor_holder,
- MemOperand(sp, (FCA::kArgsLength + 1 + argc()) * kPointerSize));
- // Look for the constructor if |accessor_holder| is not a function.
- Label skip_looking_for_constructor;
- __ LoadP(scratch, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
- __ lbz(scratch2, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ andi(r0, scratch2, Operand(1 << Map::kIsConstructor));
- __ bne(&skip_looking_for_constructor, cr0);
- __ GetMapConstructor(context, scratch, scratch, scratch2);
- __ bind(&skip_looking_for_constructor);
- __ LoadP(context, FieldMemOperand(context, JSFunction::kContextOffset));
- } else {
- // Load context from callee
- __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
- }
-
// Prepare arguments.
__ mr(scratch, sp);
@@ -1855,21 +1237,13 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
- int return_value_offset = 0;
- if (is_store()) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
+ int return_value_offset = 2 + FCA::kReturnValueOffset;
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 2;
+ const int stack_space = argc() + FCA::kArgsLength + 1;
MemOperand* stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_operand, return_value_operand,
- &context_restore_operand);
+ stack_space_operand, return_value_operand);
}
@@ -1962,7 +1336,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, NULL, return_value_operand, NULL);
+ kStackUnwindSpace, nullptr, return_value_operand);
}
#undef __
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index 70da70831c..80284587db 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -8,210 +8,6 @@
namespace v8 {
namespace internal {
-
-class StringHelper : public AllStatic {
- public:
- // Compares two flat one-byte strings and returns result in r0.
- static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- // Compares two flat one-byte strings for equality and returns result in r0.
- static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2);
-
- private:
- static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
- Register left, Register right,
- Register length,
- Register scratch1,
- Label* chars_not_equal);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class StoreRegistersStateStub : public PlatformCodeStub {
- public:
- explicit StoreRegistersStateStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
-};
-
-
-class RestoreRegistersStateStub : public PlatformCodeStub {
- public:
- explicit RestoreRegistersStateStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
-};
-
-
-class RecordWriteStub : public PlatformCodeStub {
- public:
- RecordWriteStub(Isolate* isolate, Register object, Register value,
- Register address, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : PlatformCodeStub(isolate),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- minor_key_ = ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(remembered_set_action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- RecordWriteStub(uint32_t key, Isolate* isolate)
- : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
-
- enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- // Consider adding DCHECK here to catch bad patching
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BT);
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- // Consider adding DCHECK here to catch bad patching
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BF);
- }
-
- static Mode GetMode(Code* stub);
-
- static void Patch(Code* stub, Mode mode);
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object, Register address, Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0),
- scratch1_(no_reg) {
- DCHECK(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->mflr(r0);
- masm->push(r0);
- masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- // Save all volatile FP registers except d0.
- masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- // Restore all volatile FP registers except d0.
- masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
- }
- masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
- masm->pop(r0);
- masm->mtlr(r0);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- inline Major MajorKey() const final { return RecordWrite; }
-
- void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm);
-
- void Activate(Code* code) override;
-
- Register object() const {
- return Register::from_code(ObjectBits::decode(minor_key_));
- }
-
- Register value() const {
- return Register::from_code(ValueBits::decode(minor_key_));
- }
-
- Register address() const {
- return Register::from_code(AddressBits::decode(minor_key_));
- }
-
- RememberedSetAction remembered_set_action() const {
- return RememberedSetActionBits::decode(minor_key_);
- }
-
- SaveFPRegsMode save_fp_regs_mode() const {
- return SaveFPRegsModeBits::decode(minor_key_);
- }
-
- class ObjectBits : public BitField<int, 0, 5> {};
- class ValueBits : public BitField<int, 5, 5> {};
- class AddressBits : public BitField<int, 10, 5> {};
- class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
- };
- class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
-
- Label slow_;
- RegisterAllocation regs_;
-
- DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
-};
-
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
@@ -223,48 +19,12 @@ class DirectCEntryStub : public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
- bool NeedsImmovableCode() override { return true; }
+ Movability NeedsImmovableCode() override { return kImmovable; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
-
-class NameDictionaryLookupStub : public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = LookupModeBits::encode(mode);
- }
-
- static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
- Label* done, Register receiver,
- Register properties, Handle<Name> name,
- Register scratch0);
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
-
- class LookupModeBits : public BitField<LookupMode, 0, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
-};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 695ae6beb6..13c9af7e22 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ppc/codegen-ppc.h"
-
#if V8_TARGET_ARCH_PPC
#include <memory>
@@ -15,22 +13,21 @@
namespace v8 {
namespace internal {
-
#define __ masm.
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
- size_t actual_size;
+ size_t allocated = 0;
byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
-// Called from C
+ // Called from C
__ function_descriptor();
__ MovFromFloatParameter(d1);
@@ -43,115 +40,15 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, allocated);
+ CHECK(base::OS::SetPermissions(buffer, allocated,
+ base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
#undef __
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-// assume ip can be used as a scratch register below
-void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
- Register index, Register result,
- Label* call_runtime) {
- Label indirect_string_loaded;
- __ bind(&indirect_string_loaded);
-
- // Fetch the instance type of the receiver into result register.
- __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ andi(r0, result, Operand(kIsIndirectStringMask));
- __ beq(&check_sequential, cr0);
-
- // Dispatch on the indirect string shape: slice or cons or thin.
- Label cons_string, thin_string;
- __ andi(ip, result, Operand(kStringRepresentationMask));
- __ cmpi(ip, Operand(kConsStringTag));
- __ beq(&cons_string);
- __ cmpi(ip, Operand(kThinStringTag));
- __ beq(&thin_string);
-
- // Handle slices.
- __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ SmiUntag(ip, result);
- __ add(index, index, ip);
- __ b(&indirect_string_loaded);
-
- // Handle thin strings.
- __ bind(&thin_string);
- __ LoadP(string, FieldMemOperand(string, ThinString::kActualOffset));
- __ b(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kempty_stringRootIndex);
- __ bne(call_runtime);
- // Get the first of the two strings and load its instance type.
- __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
- __ b(&indirect_string_loaded);
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ andi(r0, result, Operand(kStringRepresentationMask));
- __ bne(&external_string, cr0);
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ addi(string, string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ b(&check_encoding);
-
- // Handle external strings.
- __ bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ andi(r0, result, Operand(kIsIndirectStringMask));
- __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
- }
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ andi(r0, result, Operand(kShortExternalStringMask));
- __ bne(call_runtime, cr0);
- __ LoadP(string,
- FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label one_byte, done;
- __ bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ andi(r0, result, Operand(kStringEncodingMask));
- __ bne(&one_byte, cr0);
- // Two-byte string.
- __ ShiftLeftImm(result, index, Operand(1));
- __ lhzx(result, MemOperand(string, result));
- __ b(&done);
- __ bind(&one_byte);
- // One-byte string.
- __ lbzx(result, MemOperand(string, index));
- __ bind(&done);
-}
-
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/codegen-ppc.h b/deps/v8/src/ppc/codegen-ppc.h
deleted file mode 100644
index b0d344a013..0000000000
--- a/deps/v8/src/ppc/codegen-ppc.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PPC_CODEGEN_PPC_H_
-#define V8_PPC_CODEGEN_PPC_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm, Register string, Register index,
- Register result, Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PPC_CODEGEN_PPC_H_
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 7bc47d4644..caa1a24354 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/assembler-inl.h"
-#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -100,7 +99,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ LoadP(r5, MemOperand(sp, i * kPointerSize));
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 2a1044f7ad..7e962e7849 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -149,7 +149,7 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'r');
+ DCHECK_EQ(format[0], 'r');
if ((format[1] == 't') || (format[1] == 's')) { // 'rt & 'rs register
int reg = instr->RTValue();
@@ -172,7 +172,7 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
// Handle all FP register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatFPRegister(Instruction* instr, const char* format) {
- DCHECK(format[0] == 'D');
+ DCHECK_EQ(format[0], 'D');
int retval = 2;
int reg = -1;
@@ -270,7 +270,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 8;
}
case 's': {
- DCHECK(format[1] == 'h');
+ DCHECK_EQ(format[1], 'h');
int32_t value = 0;
int32_t opcode = instr->OpcodeValue() << 26;
int32_t sh = instr->Bits(15, 11);
@@ -601,19 +601,19 @@ void Decoder::DecodeExt2(Instruction* instr) {
return;
}
case LFSX: {
- Format(instr, "lfsx 'rt, 'ra, 'rb");
+ Format(instr, "lfsx 'Dt, 'ra, 'rb");
return;
}
case LFSUX: {
- Format(instr, "lfsux 'rt, 'ra, 'rb");
+ Format(instr, "lfsux 'Dt, 'ra, 'rb");
return;
}
case LFDX: {
- Format(instr, "lfdx 'rt, 'ra, 'rb");
+ Format(instr, "lfdx 'Dt, 'ra, 'rb");
return;
}
case LFDUX: {
- Format(instr, "lfdux 'rt, 'ra, 'rb");
+ Format(instr, "lfdux 'Dt, 'ra, 'rb");
return;
}
case STFSX: {
diff --git a/deps/v8/src/ppc/frame-constants-ppc.cc b/deps/v8/src/ppc/frame-constants-ppc.cc
index 6497ad440d..f49296292a 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.cc
+++ b/deps/v8/src/ppc/frame-constants-ppc.cc
@@ -27,6 +27,11 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 7f0b8a5961..9c4fe5fd6a 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -56,9 +56,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return r7; }
const Register StoreTransitionDescriptor::VectorRegister() { return r6; }
const Register StoreTransitionDescriptor::MapRegister() { return r8; }
-const Register StringCompareDescriptor::LeftRegister() { return r4; }
-const Register StringCompareDescriptor::RightRegister() { return r3; }
-
const Register ApiGetterDescriptor::HolderRegister() { return r3; }
const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
@@ -215,7 +212,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {r4, r6, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -235,7 +232,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// r4 -- function
// r5 -- allocation site with elements kind
Register registers[] = {r4, r5, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
@@ -279,10 +276,10 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- r3, // callee
- r7, // call_data
- r5, // holder
- r4, // api_function_address
+ JavaScriptFrame::context_register(), // callee context
+ r7, // call_data
+ r5, // holder
+ r4, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -331,8 +328,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // the value to pass to the generator
- r4, // the JSGeneratorObject to resume
- r5 // the resume mode (tagged)
+ r4 // the JSGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index efb6c2bab9..75e176c09c 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -11,7 +11,7 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
@@ -329,12 +329,6 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
}
-void MacroAssembler::InNewSpace(Register object, Register scratch,
- Condition cond, Label* branch) {
- DCHECK(cond == eq || cond == ne);
- CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
-}
-
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
LinkRegisterStatus lr_status,
@@ -377,7 +371,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -389,7 +383,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK(NumRegs(registers) > 0);
+ DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -479,13 +473,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
mflr(r0);
push(r0);
}
-#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
-#else
- RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
- fp_mode);
- CallStub(&stub);
-#endif
if (lr_status == kLRHasNotBeenSaved) {
pop(r0);
mtlr(r0);
@@ -506,41 +494,6 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address, Register scratch,
- SaveFPRegsMode fp_mode) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(ip, Operand(store_buffer));
- LoadP(scratch, MemOperand(ip));
- // Store pointer to buffer and increment buffer top.
- StoreP(address, MemOperand(scratch));
- addi(scratch, scratch, Operand(kPointerSize));
- // Write back new top of buffer.
- StoreP(scratch, MemOperand(ip));
- // Call stub on end of buffer.
- // Check for end of buffer.
- TestBitMask(scratch, StoreBuffer::kStoreBufferMask, r0);
-
- Ret(ne, cr0);
- mflr(r0);
- push(r0);
- StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
- CallStub(&store_buffer_overflow);
- pop(r0);
- mtlr(r0);
- bind(&done);
- Ret();
-}
-
void TurboAssembler::PushCommonFrame(Register marker_reg) {
int fp_delta = 0;
mflr(r0);
@@ -603,7 +556,7 @@ void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK(num_unsaved >= 0);
+ DCHECK_GE(num_unsaved, 0);
if (num_unsaved > 0) {
subi(sp, sp, Operand(num_unsaved * kPointerSize));
}
@@ -958,50 +911,6 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
return frame_ends;
}
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- int fp_delta = 0;
- mflr(r0);
- if (FLAG_enable_embedded_constant_pool) {
- if (target.is_valid()) {
- Push(r0, fp, kConstantPoolRegister, context, target);
- fp_delta = 3;
- } else {
- Push(r0, fp, kConstantPoolRegister, context);
- fp_delta = 2;
- }
- } else {
- if (target.is_valid()) {
- Push(r0, fp, context, target);
- fp_delta = 2;
- } else {
- Push(r0, fp, context);
- fp_delta = 1;
- }
- }
- addi(fp, sp, Operand(fp_delta * kPointerSize));
- Push(argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(argc);
- if (FLAG_enable_embedded_constant_pool) {
- if (target.is_valid()) {
- Pop(r0, fp, kConstantPoolRegister, context, target);
- } else {
- Pop(r0, fp, kConstantPoolRegister, context);
- }
- } else {
- if (target.is_valid()) {
- Pop(r0, fp, context, target);
- } else {
- Pop(r0, fp, context);
- }
- }
- mtlr(r0);
-}
-
// ExitFrame layout (probably wrongish.. needs updating)
//
// SP -> previousSP
@@ -1026,7 +935,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- DCHECK(stack_space > 0);
+ DCHECK_GT(stack_space, 0);
// This is an opportunity to build a frame to wrap
// all of the pushes that have happened inside of V8
@@ -1101,7 +1010,6 @@ int TurboAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
// Optionally restore all double registers.
@@ -1121,11 +1029,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
StoreP(r6, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
- if (restore_context) {
- mov(ip, Operand(ExternalReference(IsolateAddressId::kContextAddress,
- isolate())));
- LoadP(cp, MemOperand(ip));
- }
+ mov(ip,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
+ LoadP(cp, MemOperand(ip));
+
#ifdef DEBUG
mov(ip,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
@@ -1458,8 +1365,8 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE < 256);
- lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(LAST_TYPE <= 0xffff);
+ lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmpi(type_reg, Operand(type));
}
@@ -1559,31 +1466,6 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
}
-void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
- mov(value, Operand(cell));
- LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
-}
-
-
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
- GetWeakValue(value, cell);
- JumpIfSmi(value, miss);
-}
-
-void MacroAssembler::GetMapConstructor(Register result, Register map,
- Register temp, Register temp2) {
- Label done, loop;
- LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
- bind(&loop);
- JumpIfSmi(result, &done);
- CompareObjectType(result, temp, temp2, MAP_TYPE);
- bne(&done);
- LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
- b(&loop);
- bind(&done);
-}
-
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
@@ -1608,11 +1490,6 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
- SmiUntag(ip, smi);
- ConvertIntToDouble(ip, value);
-}
-
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DoubleRegister double_input,
Register scratch,
@@ -1651,7 +1528,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
// Put input on stack.
stfdu(double_input, MemOperand(sp, -kDoubleSize));
- CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, result));
addi(sp, sp, Operand(kDoubleSize));
pop(r0);
@@ -1747,7 +1624,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference(counter)));
lwz(scratch1, MemOperand(scratch2));
@@ -1759,7 +1636,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- DCHECK(value > 0);
+ DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference(counter)));
lwz(scratch1, MemOperand(scratch2));
@@ -1786,7 +1663,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
bind(&abort_start);
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
+ if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
@@ -1926,18 +1803,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
- Label* not_unique_name) {
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- Label succeed;
- andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
- beq(&succeed, cr0);
- cmpi(reg, Operand(SYMBOL_TYPE));
- bne(not_unique_name);
-
- bind(&succeed);
-}
-
static const int kRegisterPassedArguments = 8;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -2076,78 +1941,6 @@ void TurboAssembler::CheckPageFlag(
}
}
-
-void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
- Register scratch1, Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
- Register mask_scratch, Label* has_color,
- int first_bit, int second_bit) {
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- // Test the first bit
- and_(r0, ip, mask_scratch, SetRC);
- b(first_bit == 1 ? eq : ne, &other_color, cr0);
- // Shift left 1
- // May need to load the next cell
- slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
- beq(&word_boundary, cr0);
- // Test the second bit
- and_(r0, ip, mask_scratch, SetRC);
- b(second_bit == 1 ? ne : eq, has_color, cr0);
- b(&other_color);
-
- bind(&word_boundary);
- lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
- andi(r0, ip, Operand(1));
- b(second_bit == 1 ? ne : eq, has_color, cr0);
- bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
- Register mask_reg) {
- DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
- lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
- and_(bitmap_reg, addr_reg, r0);
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
- ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
- ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
- add(bitmap_reg, bitmap_reg, ip);
- li(ip, Operand(1));
- slw(mask_reg, ip, mask_reg);
-}
-
-
-void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
- Register mask_scratch, Register load_scratch,
- Label* value_is_white) {
- DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- and_(r0, mask_scratch, load_scratch, SetRC);
- beq(value_is_white, cr0);
-}
-
void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
void TurboAssembler::ResetRoundingMode() {
@@ -2155,24 +1948,6 @@ void TurboAssembler::ResetRoundingMode() {
}
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-void MacroAssembler::LoadAccessor(Register dst, Register holder,
- int accessor_index,
- AccessorComponent accessor) {
- LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
- LoadInstanceDescriptors(dst, dst);
- LoadP(dst,
- FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
- const int getterOffset = AccessorPair::kGetterOffset;
- const int setterOffset = AccessorPair::kSetterOffset;
- int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
- LoadP(dst, FieldMemOperand(dst, offset));
-}
-
////////////////////////////////////////////////////////////////////////////////
//
// New MacroAssembler Interfaces added for PPC
@@ -3007,6 +2782,90 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
+void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
+ if (src == dst) return;
+ DCHECK(!AreAliased(src, dst, scratch));
+ mr(scratch, src);
+ mr(src, dst);
+ mr(dst, scratch);
+}
+
+void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
+ if (dst.ra() != r0) DCHECK(!AreAliased(src, dst.ra(), scratch));
+ if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
+ DCHECK(!AreAliased(src, scratch));
+ mr(scratch, src);
+ LoadP(src, dst);
+ StoreP(scratch, dst);
+}
+
+void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
+ Register scratch_1) {
+ if (src.ra() != r0) DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1));
+ if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
+ if (dst.ra() != r0) DCHECK(!AreAliased(dst.ra(), scratch_0, scratch_1));
+ if (dst.rb() != r0) DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
+ DCHECK(!AreAliased(scratch_0, scratch_1));
+ LoadP(scratch_0, src);
+ LoadP(scratch_1, dst);
+ StoreP(scratch_0, dst);
+ StoreP(scratch_1, src);
+}
+
+void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch) {
+ if (src == dst) return;
+ DCHECK(!AreAliased(src, dst, scratch));
+ fmr(scratch, src);
+ fmr(src, dst);
+ fmr(dst, scratch);
+}
+
+void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
+ DoubleRegister scratch) {
+ DCHECK(!AreAliased(src, scratch));
+ fmr(scratch, src);
+ LoadSingle(src, dst);
+ StoreSingle(scratch, dst);
+}
+
+void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
+ DoubleRegister scratch_0,
+ DoubleRegister scratch_1) {
+ DCHECK(!AreAliased(scratch_0, scratch_1));
+ LoadSingle(scratch_0, src);
+ LoadSingle(scratch_1, dst);
+ StoreSingle(scratch_0, dst);
+ StoreSingle(scratch_1, src);
+}
+
+void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch) {
+ if (src == dst) return;
+ DCHECK(!AreAliased(src, dst, scratch));
+ fmr(scratch, src);
+ fmr(src, dst);
+ fmr(dst, scratch);
+}
+
+void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
+ DoubleRegister scratch) {
+ DCHECK(!AreAliased(src, scratch));
+ fmr(scratch, src);
+ LoadDouble(src, dst);
+ StoreDouble(scratch, dst);
+}
+
+void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
+ DoubleRegister scratch_0,
+ DoubleRegister scratch_1) {
+ DCHECK(!AreAliased(scratch_0, scratch_1));
+ LoadDouble(scratch_0, src);
+ LoadDouble(scratch_1, dst);
+ StoreDouble(scratch_0, dst);
+ StoreDouble(scratch_1, src);
+}
+
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
Register reg5, Register reg6, Register reg7, Register reg8,
@@ -3031,52 +2890,34 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-#endif
-
-
-CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache)
- : address_(address),
- size_(instructions * Assembler::kInstrSize),
- masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
- flush_cache_(flush_cache) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
+bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
+ DoubleRegister reg4, DoubleRegister reg5, DoubleRegister reg6,
+ DoubleRegister reg7, DoubleRegister reg8, DoubleRegister reg9,
+ DoubleRegister reg10) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- if (flush_cache_ == FLUSH) {
- Assembler::FlushICache(masm_.isolate(), address_, size_);
- }
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
- // Check that the code was patched as expected.
- DCHECK(masm_.pc_ == address_ + size_);
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ return n_of_valid_regs != n_of_non_aliasing_regs;
}
+#endif
-void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
-
-
-void CodePatcher::EmitCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- switch (cond) {
- case eq:
- instr = (instr & ~kCondMask) | BT;
- break;
- case ne:
- instr = (instr & ~kCondMask) | BF;
- break;
- default:
- UNIMPLEMENTED();
- }
- masm_.emit(instr);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index cc1d7a151e..c508ae128a 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -66,6 +66,11 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg6 = no_reg, Register reg7 = no_reg,
Register reg8 = no_reg, Register reg9 = no_reg,
Register reg10 = no_reg);
+bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
+ DoubleRegister reg3 = no_dreg, DoubleRegister reg4 = no_dreg,
+ DoubleRegister reg5 = no_dreg, DoubleRegister reg6 = no_dreg,
+ DoubleRegister reg7 = no_dreg, DoubleRegister reg8 = no_dreg,
+ DoubleRegister reg9 = no_dreg, DoubleRegister reg10 = no_dreg);
#endif
// These exist to provide portability between 32 and 64bit
@@ -339,6 +344,21 @@ class TurboAssembler : public Assembler {
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);
+ void SwapP(Register src, Register dst, Register scratch);
+ void SwapP(Register src, MemOperand dst, Register scratch);
+ void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
+ Register scratch_1);
+ void SwapFloat32(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch);
+ void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
+ void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
+ DoubleRegister scratch_1);
+ void SwapDouble(DoubleRegister src, DoubleRegister dst,
+ DoubleRegister scratch);
+ void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
+ void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
+ DoubleRegister scratch_1);
+
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
@@ -649,18 +669,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
-
// ---------------------------------------------------------------------------
// GC Support
@@ -743,7 +751,6 @@ class MacroAssembler : public TurboAssembler {
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context,
bool argument_count_is_length = false);
// Load the global proxy from the current context.
@@ -842,11 +849,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Support functions.
- // Machine code version of Map::GetConstructor().
- // |temp| holds |result|'s map when done, and |temp2| its instance type.
- void GetMapConstructor(Register result, Register map, Register temp,
- Register temp2);
-
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -863,12 +865,6 @@ class MacroAssembler : public TurboAssembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
- void GetWeakValue(Register value, Handle<WeakCell> cell);
-
- // Load the value of the weak cell in the value register. Branch to the given
- // miss label if the weak cell was cleared.
- void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
-
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, Heap::RootListIndex index);
@@ -890,9 +886,6 @@ class MacroAssembler : public TurboAssembler {
bne(if_not_equal);
}
- // Load the value of a smi object into a double register.
- void SmiToDouble(DoubleRegister value, Register smi);
-
// Try to convert a double to a signed 32-bit integer.
// CR_EQ in cr7 is set and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
@@ -1015,17 +1008,8 @@ class MacroAssembler : public TurboAssembler {
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// ---------------------------------------------------------------------------
- // String utilities
-
- void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
-
- // ---------------------------------------------------------------------------
// Patching helpers.
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void LoadAccessor(Register dst, Register holder, int accessor_index,
- AccessorComponent accessor);
-
template <typename Field>
void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
@@ -1037,9 +1021,6 @@ class MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg, rc);
}
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -1053,12 +1034,6 @@ class MacroAssembler : public TurboAssembler {
Condition cond, // eq for new space, ne otherwise.
Label* branch);
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
- Register mask_reg);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
@@ -1067,37 +1042,6 @@ class MacroAssembler : public TurboAssembler {
friend class StandardFrame;
};
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- enum FlushICache { FLUSH, DONT_FLUSH };
-
- CodePatcher(Isolate* isolate, byte* address, int instructions,
- FlushICache flush_cache = FLUSH);
- ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- // Emit an instruction directly.
- void Emit(Instr instr);
-
- // Emit the condition part of an instruction leaving the rest of the current
- // instruction unchanged.
- void EmitCondition(Condition cond);
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
- FlushICache flush_cache_; // Whether to flush the I cache after patching.
-};
-
-
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 0f90700c81..ff62c4a56e 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -12,6 +12,8 @@
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/ostreams.h"
#include "src/ppc/constants-ppc.h"
#include "src/ppc/frame-constants-ppc.h"
#include "src/ppc/simulator-ppc.h"
@@ -132,7 +134,7 @@ bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
bool PPCDebugger::SetBreakpoint(Instruction* break_pc) {
// Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
return false;
}
@@ -146,25 +148,25 @@ bool PPCDebugger::SetBreakpoint(Instruction* break_pc) {
bool PPCDebugger::DeleteBreakpoint(Instruction* break_pc) {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
- sim_->break_pc_ = NULL;
+ sim_->break_pc_ = nullptr;
sim_->break_instr_ = 0;
return true;
}
void PPCDebugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
void PPCDebugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
+ if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
@@ -208,11 +210,11 @@ void PPCDebugger::Debug() {
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
- if (line == NULL) {
+ if (line == nullptr) {
break;
} else {
char* last_input = sim_->last_debugger_input();
- if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
line = last_input;
} else {
// Ownership is transferred to sim_;
@@ -371,8 +373,8 @@ void PPCDebugger::Debug() {
}
sim_->set_pc(value);
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- intptr_t* cur = NULL;
- intptr_t* end = NULL;
+ intptr_t* cur = nullptr;
+ intptr_t* end = nullptr;
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
@@ -422,9 +424,9 @@ void PPCDebugger::Debug() {
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
- byte* prev = NULL;
- byte* cur = NULL;
- byte* end = NULL;
+ byte* prev = nullptr;
+ byte* cur = nullptr;
+ byte* end = nullptr;
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
@@ -481,7 +483,7 @@ void PPCDebugger::Debug() {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
+ if (!DeleteBreakpoint(nullptr)) {
PrintF("deleting breakpoint failed\n");
}
} else if (strcmp(cmd, "cr") == 0) {
@@ -639,8 +641,8 @@ void PPCDebugger::Debug() {
static bool ICacheMatch(void* one, void* two) {
- DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
}
@@ -686,7 +688,7 @@ void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
CachePage* new_page = new CachePage();
entry->value = new_page;
}
@@ -697,10 +699,10 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, int size) {
- DCHECK(size <= CachePage::kPageSize);
+ DCHECK_LE(size, CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
- DCHECK((start & CachePage::kLineMask) == 0);
- DCHECK((size & CachePage::kLineMask) == 0);
+ DCHECK_EQ(start & CachePage::kLineMask, 0);
+ DCHECK_EQ(size & CachePage::kLineMask, 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page);
@@ -741,7 +743,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
+ if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
@@ -757,7 +759,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
stack_ = reinterpret_cast<char*>(malloc(stack_size));
pc_modified_ = false;
icount_ = 0;
- break_pc_ = NULL;
+ break_pc_ = nullptr;
break_instr_ = 0;
// Set up architecture state.
@@ -782,7 +784,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[sp] =
reinterpret_cast<intptr_t>(stack_) + stack_size - stack_protection_size_;
- last_debugger_input_ = NULL;
+ last_debugger_input_ = nullptr;
}
Simulator::~Simulator() {
@@ -804,7 +806,7 @@ class Redirection {
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr | kCallRtRedirected),
type_(type),
- next_(NULL) {
+ next_(nullptr) {
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->FlushICache(
isolate->simulator_i_cache(),
@@ -831,9 +833,9 @@ class Redirection {
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) {
- DCHECK_EQ(current->type(), type);
+ for (; current != nullptr; current = current->next_) {
+ if (current->external_function_ == external_function &&
+ current->type_ == type) {
return current;
}
}
@@ -905,10 +907,10 @@ void* Simulator::RedirectExternalReference(Isolate* isolate,
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
- DCHECK(isolate_data != NULL);
+ DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
+ if (sim == nullptr) {
// TODO(146): delete the simulator object when a thread/isolate goes away.
sim = new Simulator(isolate);
isolate_data->set_simulator(sim);
@@ -1607,13 +1609,13 @@ bool Simulator::isStopInstruction(Instruction* instr) {
bool Simulator::isWatchedStop(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
return code < kNumOfWatchedStops;
}
bool Simulator::isEnabledStop(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
// Unwatched stops are always enabled.
return !isWatchedStop(code) ||
!(watched_stops_[code].count & kStopDisabledBit);
@@ -1637,7 +1639,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF(
@@ -1654,7 +1656,7 @@ void Simulator::IncreaseStopCounter(uint32_t code) {
// Print a stop status.
void Simulator::PrintStopInfo(uint32_t code) {
- DCHECK(code <= kMaxStopCode);
+ DCHECK_LE(code, kMaxStopCode);
if (!isWatchedStop(code)) {
PrintF("Stop not watched.");
} else {
@@ -2240,9 +2242,21 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
int32_t val = ReadW(ra_val + rb_val, instr);
float* fptr = reinterpret_cast<float*>(&val);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ // Conversion using double changes sNan to qNan on ia32/x64
+ if ((val & 0x7f800000) == 0x7f800000) {
+ int64_t dval = static_cast<int64_t>(val);
+ dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29) | 0x0;
+ set_d_register(frt, dval);
+ } else {
+ set_d_register_from_double(frt, static_cast<double>(*fptr));
+ }
+#else
set_d_register_from_double(frt, static_cast<double>(*fptr));
+#endif
if (opcode == LFSUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -2257,7 +2271,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + rb_val));
set_d_register(frt, *dptr);
if (opcode == LFDUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -2271,9 +2285,23 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
float frs_val = static_cast<float>(get_double_from_d_register(frs));
int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ // Conversion using double changes sNan to qNan on ia32/x64
+ int32_t sval = 0;
+ int64_t dval = get_d_register(frs);
+ if ((dval & 0x7ff0000000000000) == 0x7ff0000000000000) {
+ sval = ((dval & 0xc000000000000000) >> 32) |
+ ((dval & 0x07ffffffe0000000) >> 29);
+ p = &sval;
+ } else {
+ p = reinterpret_cast<int32_t*>(&frs_val);
+ }
+#else
+ p = reinterpret_cast<int32_t*>(&frs_val);
+#endif
WriteW(ra_val + rb_val, *p, instr);
if (opcode == STFSUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -2288,7 +2316,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int64_t frs_val = get_d_register(frs);
WriteDW(ra_val + rb_val, frs_val);
if (opcode == STFDUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -2340,7 +2368,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
set_register(rt, ReadWU(ra_val + offset, instr));
if (opcode == LWZU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -2354,7 +2382,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
set_register(rt, ReadB(ra_val + offset) & 0xFF);
if (opcode == LBZU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -2369,7 +2397,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
WriteW(ra_val + offset, rs_val, instr);
if (opcode == STWU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3029,7 +3057,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
WriteW(ra_val + rb_val, rs_val, instr);
if (opcode == STWUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -3044,7 +3072,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
WriteB(ra_val + rb_val, rs_val);
if (opcode == STBUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -3059,7 +3087,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
WriteH(ra_val + rb_val, rs_val, instr);
if (opcode == STHUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -3113,7 +3141,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t rb_val = get_register(rb);
WriteDW(ra_val + rb_val, rs_val);
if (opcode == STDUX) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
}
break;
@@ -3209,7 +3237,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
WriteB(ra_val + offset, rs_val);
if (opcode == STBU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3252,7 +3280,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
WriteH(ra_val + offset, rs_val, instr);
if (opcode == STHU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3286,7 +3314,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
set_d_register_from_double(frt, static_cast<double>(*fptr));
#endif
if (opcode == LFSU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3301,7 +3329,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + offset));
set_d_register(frt, *dptr);
if (opcode == LFDU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3331,7 +3359,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#endif
WriteW(ra_val + offset, *p, instr);
if (opcode == STFSU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3346,7 +3374,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int64_t frs_val = get_d_register(frs);
WriteDW(ra_val + offset, frs_val);
if (opcode == STFDU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3911,7 +3939,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case 1: { // ldu
intptr_t* result = ReadDW(ra_val + offset);
set_register(rt, *result);
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
break;
}
@@ -3933,7 +3961,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
WriteDW(ra_val + offset, rs_val);
if (opcode == STDU) {
- DCHECK(ra != 0);
+ DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
}
break;