aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/ppc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-10-18 15:03:02 -0700
committerMichaël Zasso <targos@protonmail.com>2017-10-18 17:01:41 -0700
commit3d1b3df9486c0e7708065257f7311902f6b7b366 (patch)
treecb051bdeaead11e06dcd97725783e0f113afb1bf /deps/v8/src/ppc
parente2cddbb8ccdb7b3c4a40c8acc630f68703bc77b5 (diff)
downloadandroid-node-v8-3d1b3df9486c0e7708065257f7311902f6b7b366.tar.gz
android-node-v8-3d1b3df9486c0e7708065257f7311902f6b7b366.tar.bz2
android-node-v8-3d1b3df9486c0e7708065257f7311902f6b7b366.zip
deps: update V8 to 6.2.414.32
PR-URL: https://github.com/nodejs/node/pull/15362 Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8/src/ppc')
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h111
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc25
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h157
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc1212
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h5
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc83
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc75
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc4
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.cc (renamed from deps/v8/src/ppc/frames-ppc.cc)10
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.h50
-rw-r--r--deps/v8/src/ppc/frames-ppc.h188
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc94
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc1019
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h401
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc6
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h2
16 files changed, 336 insertions, 3106 deletions
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index c187c2517d..08336cb310 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -220,85 +220,6 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
-
-Handle<Cell> RelocInfo::target_cell_handle() {
- DCHECK(rmode_ == RelocInfo::CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<Cell>(reinterpret_cast<Cell**>(address));
-}
-
-
-Cell* RelocInfo::target_cell() {
- DCHECK(rmode_ == RelocInfo::CELL);
- return Cell::FromValueAddress(Memory::Address_at(pc_));
-}
-
-
-void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::CELL);
- Address address = cell->address() + Cell::kValueOffset;
- Memory::Address_at(pc_) = address;
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- cell);
- }
-}
-
-
-static const int kNoCodeAgeInstructions =
- FLAG_enable_embedded_constant_pool ? 7 : 6;
-static const int kCodeAgingInstructions =
- Assembler::kMovInstructionsNoConstantPool + 3;
-static const int kNoCodeAgeSequenceInstructions =
- ((kNoCodeAgeInstructions >= kCodeAgingInstructions)
- ? kNoCodeAgeInstructions
- : kCodeAgingInstructions);
-static const int kNoCodeAgeSequenceNops =
- (kNoCodeAgeSequenceInstructions - kNoCodeAgeInstructions);
-static const int kCodeAgingSequenceNops =
- (kNoCodeAgeSequenceInstructions - kCodeAgingInstructions);
-static const int kCodeAgingTargetDelta = 1 * Assembler::kInstrSize;
-static const int kNoCodeAgeSequenceLength =
- (kNoCodeAgeSequenceInstructions * Assembler::kInstrSize);
-
-Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
- UNREACHABLE(); // This should never be reached on PPC.
- return Handle<Code>();
-}
-
-
-Code* RelocInfo::code_age_stub() {
- DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + kCodeAgingTargetDelta, host_));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(
- stub->GetIsolate(), pc_ + kCodeAgingTargetDelta, host_,
- stub->instruction_start(), icache_flush_mode);
-}
-
-
-Address RelocInfo::debug_call_address() {
- DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
- return Assembler::target_address_at(pc_, host_);
-}
-
-void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
- DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
- Assembler::set_target_address_at(isolate, pc_, host_, target);
- if (host() != NULL) {
- Code* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target_code);
- }
-}
-
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -323,48 +244,16 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(host(), this);
- } else if (mode == RelocInfo::CELL) {
- visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
visitor->VisitInternalReference(host(), this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(host(), this);
- } else if (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(host(), this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
}
-
-template <typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::CELL) {
- StaticVisitor::VisitCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE ||
- mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
- StaticVisitor::VisitInternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
- } else if (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()) {
- StaticVisitor::VisitDebugTarget(heap, this);
- } else if (IsRuntimeEntry(mode)) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
value_.immediate = immediate;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 3a58578524..98f5451cdb 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -162,35 +162,22 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
+Address RelocInfo::embedded_address() const {
return Assembler::target_address_at(pc_, host_);
}
-uint32_t RelocInfo::wasm_memory_size_reference() {
- DCHECK(IsWasmMemorySizeReference(rmode_));
+uint32_t RelocInfo::embedded_size() const {
return static_cast<uint32_t>(
reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
}
-Address RelocInfo::wasm_global_reference() {
- DCHECK(IsWasmGlobalReference(rmode_));
- return Assembler::target_address_at(pc_, host_);
-}
-
-uint32_t RelocInfo::wasm_function_table_size_reference() {
- DCHECK(IsWasmFunctionTableSizeReference(rmode_));
- return static_cast<uint32_t>(
- reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
-}
-
-void RelocInfo::unchecked_update_wasm_memory_reference(
- Isolate* isolate, Address address, ICacheFlushMode flush_mode) {
+void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+ ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
}
-void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
+ ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 38e6f2bb46..abd306c33f 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -120,6 +120,136 @@ namespace internal {
V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
// clang-format on
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 32;
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = 1 << 3 | // r3 a1
+ 1 << 4 | // r4 a2
+ 1 << 5 | // r5 a3
+ 1 << 6 | // r6 a4
+ 1 << 7 | // r7 a5
+ 1 << 8 | // r8 a6
+ 1 << 9 | // r9 a7
+ 1 << 10 | // r10 a8
+ 1 << 11;
+
+const int kNumJSCallerSaved = 9;
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved = 1 << 14 | // r14
+ 1 << 15 | // r15
+ 1 << 16 | // r16
+ 1 << 17 | // r17
+ 1 << 18 | // r18
+ 1 << 19 | // r19
+ 1 << 20 | // r20
+ 1 << 21 | // r21
+ 1 << 22 | // r22
+ 1 << 23 | // r23
+ 1 << 24 | // r24
+ 1 << 25 | // r25
+ 1 << 26 | // r26
+ 1 << 27 | // r27
+ 1 << 28 | // r28
+ 1 << 29 | // r29
+ 1 << 30 | // r20
+ 1 << 31; // r31
+
+const int kNumCalleeSaved = 18;
+
+const RegList kCallerSavedDoubles = 1 << 0 | // d0
+ 1 << 1 | // d1
+ 1 << 2 | // d2
+ 1 << 3 | // d3
+ 1 << 4 | // d4
+ 1 << 5 | // d5
+ 1 << 6 | // d6
+ 1 << 7 | // d7
+ 1 << 8 | // d8
+ 1 << 9 | // d9
+ 1 << 10 | // d10
+ 1 << 11 | // d11
+ 1 << 12 | // d12
+ 1 << 13; // d13
+
+const int kNumCallerSavedDoubles = 14;
+
+const RegList kCalleeSavedDoubles = 1 << 14 | // d14
+ 1 << 15 | // d15
+ 1 << 16 | // d16
+ 1 << 17 | // d17
+ 1 << 18 | // d18
+ 1 << 19 | // d19
+ 1 << 20 | // d20
+ 1 << 21 | // d21
+ 1 << 22 | // d22
+ 1 << 23 | // d23
+ 1 << 24 | // d24
+ 1 << 25 | // d25
+ 1 << 26 | // d26
+ 1 << 27 | // d27
+ 1 << 28 | // d28
+ 1 << 29 | // d29
+ 1 << 30 | // d30
+ 1 << 31; // d31
+
+const int kNumCalleeSavedDoubles = 18;
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+const int kNumSafepointRegisters = 32;
+
+// The following constants describe the stack frame linkage area as
+// defined by the ABI. Note that kNumRequiredStackFrameSlots must
+// satisfy alignment requirements (rounding up if required).
+#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
+// [0] back chain
+// [1] condition register save area
+// [2] link register save area
+// [3] TOC save area
+// [4] Parameter1 save area
+// ...
+// [11] Parameter8 save area
+// [12] Parameter9 slot (if necessary)
+// ...
+const int kNumRequiredStackFrameSlots = 12;
+const int kStackFrameLRSlot = 2;
+const int kStackFrameExtraParamSlot = 12;
+#elif V8_OS_AIX || V8_TARGET_ARCH_PPC64
+// [0] back chain
+// [1] condition register save area
+// [2] link register save area
+// [3] reserved for compiler
+// [4] reserved by binder
+// [5] TOC save area
+// [6] Parameter1 save area
+// ...
+// [13] Parameter8 save area
+// [14] Parameter9 slot (if necessary)
+// ...
+#if V8_TARGET_ARCH_PPC64
+const int kNumRequiredStackFrameSlots = 14;
+#else
+const int kNumRequiredStackFrameSlots = 16;
+#endif
+const int kStackFrameLRSlot = 2;
+const int kStackFrameExtraParamSlot = 14;
+#else
+// [0] back chain
+// [1] link register save area
+// [2] Parameter9 slot (if necessary)
+// ...
+const int kNumRequiredStackFrameSlots = 4;
+const int kStackFrameLRSlot = 1;
+const int kStackFrameExtraParamSlot = 2;
+#endif
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -558,21 +688,6 @@ class Assembler : public AssemblerBase {
static constexpr int kCallTargetAddressOffset =
(kMovInstructions + 2) * kInstrSize;
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
- // Patched debug break slot code is a FIXED_SEQUENCE:
- // mov r0, <address>
- // mtlr r0
- // blrl
- static constexpr int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
-
- // This is the length of the code sequence from SetDebugBreakAtSlot()
- // FIXED_SEQUENCE
- static constexpr int kDebugBreakSlotInstructions =
- kMovInstructionsNoConstantPool + 2;
- static constexpr int kDebugBreakSlotLength =
- kDebugBreakSlotInstructions * kInstrSize;
-
static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
return ((cr.code() * CRWIDTH) + crbit);
}
@@ -741,8 +856,8 @@ class Assembler : public AssemblerBase {
void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L), lk); }
inline CRegister cmpi_optimization(CRegister cr) {
- // Check whether the branch is preceeded by an optimizable cmpi against 0.
- // The cmpi can be deleted if it is also preceeded by an instruction that
+ // Check whether the branch is preceded by an optimizable cmpi against 0.
+ // The cmpi can be deleted if it is also preceded by an instruction that
// sets the register used by the compare and supports a dot form.
unsigned int sradi_mask = kOpcodeMask | kExt2OpcodeVariant2Mask;
unsigned int srawi_mask = kOpcodeMask | kExt2OpcodeMask;
@@ -1283,11 +1398,6 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstantPoolEntrySharingScope);
};
- // Debugging
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode);
-
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@@ -1395,7 +1505,8 @@ class Assembler : public AssemblerBase {
ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
intptr_t value) {
bool sharing_ok = RelocInfo::IsNone(rmode) ||
- !(serializer_enabled() || rmode < RelocInfo::CELL ||
+ !(serializer_enabled() ||
+ rmode < RelocInfo::FIRST_SHAREABLE_RELOC_MODE ||
is_constant_pool_entry_sharing_blocked());
return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index f259a393ae..8523ade0cb 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -4,21 +4,24 @@
#if V8_TARGET_ARCH_PPC
-#include "src/code-stubs.h"
#include "src/api-arguments.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/double.h"
+#include "src/frame-constants.h"
+#include "src/frames.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/ppc/code-stubs-ppc.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
+#include "src/ppc/code-stubs-ppc.h" // Cannot be the first include.
+
namespace v8 {
namespace internal {
@@ -33,14 +36,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cond);
-static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
- Register rhs, Label* lhs_not_nan,
- Label* slow, bool strict);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
- Register rhs);
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
@@ -162,502 +157,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-// Handle the case where the lhs and rhs are the same object.
-// Equality is almost reflexive (everything but NaN), so this is a test
-// for "identity and not NaN".
-static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cond) {
- Label not_identical;
- Label heap_number, return_equal;
- __ cmp(r3, r4);
- __ bne(&not_identical);
-
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- // Call runtime on identical JSObjects.
- __ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE);
- __ bge(slow);
- // Call runtime on identical symbols since we need to throw a TypeError.
- __ cmpi(r7, Operand(SYMBOL_TYPE));
- __ beq(slow);
- } else {
- __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
- __ beq(&heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmpi(r7, Operand(FIRST_JS_RECEIVER_TYPE));
- __ bge(slow);
- // Call runtime on identical symbols since we need to throw a TypeError.
- __ cmpi(r7, Operand(SYMBOL_TYPE));
- __ beq(slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmpi(r7, Operand(ODDBALL_TYPE));
- __ bne(&return_equal);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, r5);
- __ bne(&return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ li(r3, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ li(r3, Operand(LESS));
- }
- __ Ret();
- }
- }
- }
-
- __ bind(&return_equal);
- if (cond == lt) {
- __ li(r3, Operand(GREATER)); // Things aren't less than themselves.
- } else if (cond == gt) {
- __ li(r3, Operand(LESS)); // Things aren't greater than themselves.
- } else {
- __ li(r3, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
- }
- __ Ret();
-
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
- __ ExtractBitMask(r6, r5, HeapNumber::kExponentMask);
- __ cmpli(r6, Operand(0x7ff));
- __ bne(&return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ slwi(r5, r5, Operand(HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
- __ orx(r3, r6, r5);
- __ cmpi(r3, Operand::Zero());
- // For equal we already have the right value in r3: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ li(r4, Operand((cond == le) ? GREATER : LESS));
- __ isel(eq, r3, r3, r4);
- } else {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ li(r3, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ li(r3, Operand(LESS)); // NaN >= NaN should fail.
- }
- }
- }
- __ Ret();
- }
- // No fall through here.
-
- __ bind(&not_identical);
-}
-
-
-// See comment at call site.
-static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
- Register rhs, Label* lhs_not_nan,
- Label* slow, bool strict) {
- DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
-
- Label rhs_is_smi;
- __ JumpIfSmi(rhs, &rhs_is_smi);
-
- // Lhs is a Smi. Check whether the rhs is a heap number.
- __ CompareObjectType(rhs, r6, r7, HEAP_NUMBER_TYPE);
- if (strict) {
- // If rhs is not a number and lhs is a Smi then strict equality cannot
- // succeed. Return non-equal
- // If rhs is r3 then there is already a non zero value in it.
- if (!rhs.is(r3)) {
- Label skip;
- __ beq(&skip);
- __ mov(r3, Operand(NOT_EQUAL));
- __ Ret();
- __ bind(&skip);
- } else {
- __ Ret(ne);
- }
- } else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
- // the runtime.
- __ bne(slow);
- }
-
- // Lhs is a smi, rhs is a number.
- // Convert lhs to a double in d7.
- __ SmiToDouble(d7, lhs);
- // Load the double from rhs, tagged HeapNumber r3, to d6.
- __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
-
- // We now have both loaded as doubles but we can skip the lhs nan check
- // since it's a smi.
- __ b(lhs_not_nan);
-
- __ bind(&rhs_is_smi);
- // Rhs is a smi. Check whether the non-smi lhs is a heap number.
- __ CompareObjectType(lhs, r7, r7, HEAP_NUMBER_TYPE);
- if (strict) {
- // If lhs is not a number and rhs is a smi then strict equality cannot
- // succeed. Return non-equal.
- // If lhs is r3 then there is already a non zero value in it.
- if (!lhs.is(r3)) {
- Label skip;
- __ beq(&skip);
- __ mov(r3, Operand(NOT_EQUAL));
- __ Ret();
- __ bind(&skip);
- } else {
- __ Ret(ne);
- }
- } else {
- // Smi compared non-strictly with a non-smi non-heap-number. Call
- // the runtime.
- __ bne(slow);
- }
-
- // Rhs is a smi, lhs is a heap number.
- // Load the double from lhs, tagged HeapNumber r4, to d7.
- __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- // Convert rhs to a double in d6.
- __ SmiToDouble(d6, rhs);
- // Fall through to both_loaded_as_doubles.
-}
-
-
-// See comment at call site.
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
- Register rhs) {
- DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
-
- // If either operand is a JS object or an oddball value, then they are
- // not equal since their pointers are different.
- // There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Label first_non_object;
- // Get the type of the first operand into r5 and compare it with
- // FIRST_JS_RECEIVER_TYPE.
- __ CompareObjectType(rhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
- __ blt(&first_non_object);
-
- // Return non-zero (r3 is not zero)
- Label return_not_equal;
- __ bind(&return_not_equal);
- __ Ret();
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ cmpi(r5, Operand(ODDBALL_TYPE));
- __ beq(&return_not_equal);
-
- __ CompareObjectType(lhs, r6, r6, FIRST_JS_RECEIVER_TYPE);
- __ bge(&return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ cmpi(r6, Operand(ODDBALL_TYPE));
- __ beq(&return_not_equal);
-
- // Now that we have the types we might as well check for
- // internalized-internalized.
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ orx(r5, r5, r6);
- __ andi(r0, r5, Operand(kIsNotStringMask | kIsNotInternalizedMask));
- __ beq(&return_not_equal, cr0);
-}
-
-
-// See comment at call site.
-static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
- Register rhs,
- Label* both_loaded_as_doubles,
- Label* not_heap_numbers, Label* slow) {
- DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
-
- __ CompareObjectType(rhs, r6, r5, HEAP_NUMBER_TYPE);
- __ bne(not_heap_numbers);
- __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ cmp(r5, r6);
- __ bne(slow); // First was a heap number, second wasn't. Go slow case.
-
- // Both are heap numbers. Load them up then jump to the code we have
- // for that.
- __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
-
- __ b(both_loaded_as_doubles);
-}
-
-// Fast negative check for internalized-to-internalized equality or receiver
-// equality. Also handles the undetectable receiver to null/undefined
-// comparison.
-static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register lhs, Register rhs,
- Label* possible_strings,
- Label* runtime_call) {
- DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
-
- // r5 is object type of rhs.
- Label object_test, return_equal, return_unequal, undetectable;
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ andi(r0, r5, Operand(kIsNotStringMask));
- __ bne(&object_test, cr0);
- __ andi(r0, r5, Operand(kIsNotInternalizedMask));
- __ bne(possible_strings, cr0);
- __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE);
- __ bge(runtime_call);
- __ andi(r0, r6, Operand(kIsNotInternalizedMask));
- __ bne(possible_strings, cr0);
-
- // Both are internalized. We already checked they weren't the same pointer so
- // they are not equal. Return non-equal by returning the non-zero object
- // pointer in r3.
- __ Ret();
-
- __ bind(&object_test);
- __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ lbz(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
- __ lbz(r8, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ andi(r0, r7, Operand(1 << Map::kIsUndetectable));
- __ bne(&undetectable, cr0);
- __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
- __ bne(&return_unequal, cr0);
-
- __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
- __ blt(runtime_call);
- __ CompareInstanceType(r6, r6, FIRST_JS_RECEIVER_TYPE);
- __ blt(runtime_call);
-
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in r3.
- __ Ret();
-
- __ bind(&undetectable);
- __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
- __ beq(&return_unequal, cr0);
-
- // If both sides are JSReceivers, then the result is false according to
- // the HTML specification, which says that only comparisons with null or
- // undefined are affected by special casing for document.all.
- __ CompareInstanceType(r5, r5, ODDBALL_TYPE);
- __ beq(&return_equal);
- __ CompareInstanceType(r6, r6, ODDBALL_TYPE);
- __ bne(&return_unequal);
-
- __ bind(&return_equal);
- __ li(r3, Operand(EQUAL));
- __ Ret();
-}
-
-
-static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
- Register scratch,
- CompareICState::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareICState::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareICState::NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
- }
- // We could be strict about internalized/non-internalized here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-// On entry r4 and r5 are the values to be compared.
-// On exit r3 is 0, positive or negative to indicate the result of
-// the comparison.
-void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
- Register lhs = r4;
- Register rhs = r3;
- Condition cc = GetCondition();
-
- Label miss;
- CompareICStub_CheckInputType(masm, lhs, r5, left(), &miss);
- CompareICStub_CheckInputType(masm, rhs, r6, right(), &miss);
-
- Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles, lhs_not_nan;
-
- Label not_two_smis, smi_done;
- __ orx(r5, r4, r3);
- __ JumpIfNotSmi(r5, &not_two_smis);
- __ SmiUntag(r4);
- __ SmiUntag(r3);
- __ sub(r3, r4, r3);
- __ Ret();
- __ bind(&not_two_smis);
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Handle the case where the objects are identical. Either returns the answer
- // or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
-
- // If either is a Smi (we know that not both are), then they can only
- // be strictly equal if the other is a HeapNumber.
- STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
- __ and_(r5, lhs, rhs);
- __ JumpIfNotSmi(r5, &not_smis);
- // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
- // 1) Return the answer.
- // 2) Go to slow.
- // 3) Fall through to both_loaded_as_doubles.
- // 4) Jump to lhs_not_nan.
- // In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison. The double values of the numbers have been loaded
- // into d7 and d6.
- EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
-
- __ bind(&both_loaded_as_doubles);
- // The arguments have been converted to doubles and stored in d6 and d7
- __ bind(&lhs_not_nan);
- Label no_nan;
- __ fcmpu(d7, d6);
-
- Label nan, equal, less_than;
- __ bunordered(&nan);
- if (CpuFeatures::IsSupported(ISELECT)) {
- DCHECK(EQUAL == 0);
- __ li(r4, Operand(GREATER));
- __ li(r5, Operand(LESS));
- __ isel(eq, r3, r0, r4);
- __ isel(lt, r3, r5, r3);
- __ Ret();
- } else {
- __ beq(&equal);
- __ blt(&less_than);
- __ li(r3, Operand(GREATER));
- __ Ret();
- __ bind(&equal);
- __ li(r3, Operand(EQUAL));
- __ Ret();
- __ bind(&less_than);
- __ li(r3, Operand(LESS));
- __ Ret();
- }
-
- __ bind(&nan);
- // If one of the sides was a NaN then the v flag is set. Load r3 with
- // whatever it takes to make the comparison fail, since comparisons with NaN
- // always fail.
- if (cc == lt || cc == le) {
- __ li(r3, Operand(GREATER));
- } else {
- __ li(r3, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&not_smis);
- // At this point we know we are dealing with two different objects,
- // and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict()) {
- // This returns non-equal for some object types, or falls through if it
- // was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
- }
-
- Label check_for_internalized_strings;
- Label flat_string_check;
- // Check for heap-number-heap-number comparison. Can jump to slow case,
- // or load both doubles into r3, r4, r5, r6 and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to
- // check_for_internalized_strings.
- // In this case r5 will contain the type of rhs_. Never falls through.
- EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
- &check_for_internalized_strings,
- &flat_string_check);
-
- __ bind(&check_for_internalized_strings);
- // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
- // internalized strings.
- if (cc == eq && !strict()) {
- // Returns an answer for two internalized strings or two detectable objects.
- // Otherwise jumps to string case or not both strings case.
- // Assumes that r5 is the type of rhs_ on entry.
- EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
- &slow);
- }
-
- // Check for both being sequential one-byte strings,
- // and inline if that is the case.
- __ bind(&flat_string_check);
-
- __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r5, r6, &slow);
-
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
- r6);
- if (cc == eq) {
- StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r5, r6);
- } else {
- StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r5, r6, r7);
- }
- // Never falls through to here.
-
- __ bind(&slow);
-
- if (cc == eq) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(cp);
- __ Call(strict() ? isolate()->builtins()->StrictEqual()
- : isolate()->builtins()->Equal(),
- RelocInfo::CODE_TARGET);
- __ Pop(cp);
- }
- // Turn true into 0 and false into some non-zero value.
- STATIC_ASSERT(EQUAL == 0);
- __ LoadRoot(r4, Heap::kTrueValueRootIndex);
- __ sub(r3, r3, r4);
- __ Ret();
- } else {
- __ Push(lhs, rhs);
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- DCHECK(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
- }
- __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
- __ push(r3);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ TailCallRuntime(Runtime::kCompare);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@@ -815,8 +314,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- CreateWeakCellStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
@@ -1142,21 +639,12 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r5: receiver
// r6: argc
// r7: argv
- if (type() == StackFrame::ENTRY_CONSTRUCT) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate());
- __ mov(ip, Operand(construct_entry));
+ if (type() == StackFrame::CONSTRUCT_ENTRY) {
+ __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
+ RelocInfo::CODE_TARGET);
} else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
- __ mov(ip, Operand(entry));
+ __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
}
- __ LoadP(ip, MemOperand(ip)); // deref address
-
- // Branch and link to JSEntryTrampoline.
- // the address points to the start of the code object, skip the header
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mtctr(ip);
- __ bctrl(); // make the call
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -1193,264 +681,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ blr();
}
-
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
- // r3 : number of arguments to the construct function
- // r4 : the function to call
- // r5 : feedback vector
- // r6 : slot in feedback vector (Smi)
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Number-of-arguments register must be smi-tagged to call out.
- __ SmiTag(r3);
- __ Push(r6, r5, r4, r3);
- __ Push(cp);
-
- __ CallStub(stub);
-
- __ Pop(cp);
- __ Pop(r6, r5, r4, r3);
- __ SmiUntag(r3);
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a feedback vector slot. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // r3 : number of arguments to the construct function
- // r4 : the function to call
- // r5 : feedback vector
- // r6 : slot in feedback vector (Smi)
- Label initialize, done, miss, megamorphic, not_array_function;
-
- DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->megamorphic_symbol());
- DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->uninitialized_symbol());
-
- const int count_offset = FixedArray::kHeaderSize + kPointerSize;
-
- // Load the cache state into r8.
- __ SmiToPtrArrayOffset(r8, r6);
- __ add(r8, r5, r8);
- __ LoadP(r8, FieldMemOperand(r8, FixedArray::kHeaderSize));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- // We don't know if r8 is a WeakCell or a Symbol, but it's harmless to read at
- // this position in a symbol (see static asserts in feedback-vector.h).
- Label check_allocation_site;
- Register feedback_map = r9;
- Register weak_value = r10;
- __ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
- __ cmp(r4, weak_value);
- __ beq(&done);
- __ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
- __ beq(&done);
- __ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
- __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
- __ bne(&check_allocation_site);
-
- // If the weak cell is cleared, we have a new chance to become monomorphic.
- __ JumpIfSmi(weak_value, &initialize);
- __ b(&megamorphic);
-
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
- __ bne(&miss);
-
- // Make sure the function is the Array() function
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
- __ cmp(r4, r8);
- __ bne(&megamorphic);
- __ b(&done);
-
- __ bind(&miss);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ CompareRoot(r8, Heap::kuninitialized_symbolRootIndex);
- __ beq(&initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ bind(&megamorphic);
- __ SmiToPtrArrayOffset(r8, r6);
- __ add(r8, r5, r8);
- __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
- __ jmp(&done);
-
- // An uninitialized cache is patched with the function
- __ bind(&initialize);
-
- // Make sure the function is the Array() function.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
- __ cmp(r4, r8);
- __ bne(&not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
- __ b(&done);
-
- __ bind(&not_array_function);
-
- CreateWeakCellStub weak_cell_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &weak_cell_stub);
-
- __ bind(&done);
-
- // Increment the call count for all function calls.
- __ SmiToPtrArrayOffset(r8, r6);
- __ add(r8, r5, r8);
-
- __ LoadP(r7, FieldMemOperand(r8, count_offset));
- __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
- // r3 : number of arguments
- // r4 : the function to call
- // r5 : feedback vector
- // r6 : slot in feedback vector (Smi, for RecordCallTarget)
-
- Label non_function;
- // Check that the function is not a smi.
- __ JumpIfSmi(r4, &non_function);
- // Check that the function is a JSFunction.
- __ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
- __ bne(&non_function);
-
- GenerateRecordCallTarget(masm);
-
- __ SmiToPtrArrayOffset(r8, r6);
- __ add(r8, r5, r8);
- // Put the AllocationSite from the feedback vector into r5, or undefined.
- __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
- __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
- __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ isel(eq, r5, r5, r8);
- } else {
- Label feedback_register_initialized;
- __ beq(&feedback_register_initialized);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
-
- __ AssertUndefinedOrAllocationSite(r5, r8);
-
- // Pass function as new target.
- __ mr(r6, r4);
-
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
- __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
-
- __ bind(&non_function);
- __ mr(r6, r4);
- __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-
-// StringCharCodeAtGenerator
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- // If the receiver is a smi trigger the non-string case.
- if (check_mode_ == RECEIVER_IS_UNKNOWN) {
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ andi(r0, result_, Operand(kIsNotStringMask));
- __ bne(receiver_not_string_, cr0);
- }
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmpl(ip, index_);
- __ ble(index_out_of_range_);
-
- __ SmiUntag(index_);
-
- StringCharLoadGenerator::Generate(masm, object_, index_, result_,
- &call_runtime_);
-
- __ SmiTag(result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, EmbedMode embed_mode,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
- DONT_DO_SMI_CHECK);
- call_helper.BeforeCall(masm);
- if (embed_mode == PART_OF_IC_HANDLER) {
- __ Push(LoadWithVectorDescriptor::VectorRegister(),
- LoadWithVectorDescriptor::SlotRegister(), object_, index_);
- } else {
- // index_ is consumed by runtime conversion function.
- __ Push(object_, index_);
- }
- __ CallRuntime(Runtime::kNumberToSmi);
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ Move(index_, r3);
- if (embed_mode == PART_OF_IC_HANDLER) {
- __ Pop(LoadWithVectorDescriptor::VectorRegister(),
- LoadWithVectorDescriptor::SlotRegister(), object_);
- } else {
- __ pop(object_);
- }
- // Reload the instance type.
- __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(index_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ b(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ SmiTag(index_);
- __ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT);
- __ Move(result_, r3);
- call_helper.AfterCall(masm);
- __ b(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
-}
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -1570,381 +800,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
- DCHECK_EQ(CompareICState::BOOLEAN, state());
- Label miss;
-
- __ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
- __ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
- if (!Token::IsEqualityOp(op())) {
- __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
- __ AssertSmi(r4);
- __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
- __ AssertSmi(r3);
- }
- __ sub(r3, r4, r3);
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateSmis(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::SMI);
- Label miss;
- __ orx(r5, r4, r3);
- __ JumpIfNotSmi(r5, &miss);
-
- if (GetCondition() == eq) {
- // For equality we do not care about the sign of the result.
- // __ sub(r3, r3, r4, SetCC);
- __ sub(r3, r3, r4);
- } else {
- // Untag before subtracting to avoid handling overflow.
- __ SmiUntag(r4);
- __ SmiUntag(r3);
- __ sub(r3, r4, r3);
- }
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::NUMBER);
-
- Label generic_stub;
- Label unordered, maybe_undefined1, maybe_undefined2;
- Label miss;
- Label equal, less_than;
-
- if (left() == CompareICState::SMI) {
- __ JumpIfNotSmi(r4, &miss);
- }
- if (right() == CompareICState::SMI) {
- __ JumpIfNotSmi(r3, &miss);
- }
-
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved.
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(r3, &right_smi);
- __ CheckMap(r3, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ lfd(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
- __ b(&left);
- __ bind(&right_smi);
- __ SmiToDouble(d1, r3);
-
- __ bind(&left);
- __ JumpIfSmi(r4, &left_smi);
- __ CheckMap(r4, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
- __ lfd(d0, FieldMemOperand(r4, HeapNumber::kValueOffset));
- __ b(&done);
- __ bind(&left_smi);
- __ SmiToDouble(d0, r4);
-
- __ bind(&done);
-
- // Compare operands
- __ fcmpu(d0, d1);
-
- // Don't base result on status bits when a NaN is involved.
- __ bunordered(&unordered);
-
- // Return a result of -1, 0, or 1, based on status bits.
- if (CpuFeatures::IsSupported(ISELECT)) {
- DCHECK(EQUAL == 0);
- __ li(r4, Operand(GREATER));
- __ li(r5, Operand(LESS));
- __ isel(eq, r3, r0, r4);
- __ isel(lt, r3, r5, r3);
- __ Ret();
- } else {
- __ beq(&equal);
- __ blt(&less_than);
- // assume greater than
- __ li(r3, Operand(GREATER));
- __ Ret();
- __ bind(&equal);
- __ li(r3, Operand(EQUAL));
- __ Ret();
- __ bind(&less_than);
- __ li(r3, Operand(LESS));
- __ Ret();
- }
-
- __ bind(&unordered);
- __ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
- CompareICState::GENERIC, CompareICState::GENERIC);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-
- __ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op())) {
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ bne(&miss);
- __ JumpIfSmi(r4, &unordered);
- __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE);
- __ bne(&maybe_undefined2);
- __ b(&unordered);
- }
-
- __ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op())) {
- __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
- __ beq(&unordered);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::INTERNALIZED_STRING);
- Label miss, not_equal;
-
- // Registers containing left and right operands respectively.
- Register left = r4;
- Register right = r3;
- Register tmp1 = r5;
- Register tmp2 = r6;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are symbols.
- __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ orx(tmp1, tmp1, tmp2);
- __ andi(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
- __ bne(&miss, cr0);
-
- // Internalized strings are compared by identity.
- __ cmp(left, right);
- __ bne(&not_equal);
- // Make sure r3 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- DCHECK(right.is(r3));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
- __ bind(&not_equal);
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::UNIQUE_NAME);
- DCHECK(GetCondition() == eq);
- Label miss;
-
- // Registers containing left and right operands respectively.
- Register left = r4;
- Register right = r3;
- Register tmp1 = r5;
- Register tmp2 = r6;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are unique names. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
-
- __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
- __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
-
- // Unique names are compared by identity.
- __ cmp(left, right);
- __ bne(&miss);
- // Make sure r3 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- DCHECK(right.is(r3));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateStrings(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::STRING);
- Label miss, not_identical, is_symbol;
-
- bool equality = Token::IsEqualityOp(op());
-
- // Registers containing left and right operands respectively.
- Register left = r4;
- Register right = r3;
- Register tmp1 = r5;
- Register tmp2 = r6;
- Register tmp3 = r7;
- Register tmp4 = r8;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are strings. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ orx(tmp3, tmp1, tmp2);
- __ andi(r0, tmp3, Operand(kIsNotStringMask));
- __ bne(&miss, cr0);
-
- // Fast check for identical strings.
- __ cmp(left, right);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ bne(&not_identical);
- __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
- __ Ret();
- __ bind(&not_identical);
-
- // Handle not identical strings.
-
- // Check that both strings are internalized strings. If they are, we're done
- // because we already know they are not identical. We know they are both
- // strings.
- if (equality) {
- DCHECK(GetCondition() == eq);
- STATIC_ASSERT(kInternalizedTag == 0);
- __ orx(tmp3, tmp1, tmp2);
- __ andi(r0, tmp3, Operand(kIsNotInternalizedMask));
- // Make sure r3 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- DCHECK(right.is(r3));
- __ Ret(eq, cr0);
- }
-
- // Check that both strings are sequential one-byte.
- Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
- &runtime);
-
- // Compare flat one-byte strings. Returns when done.
- if (equality) {
- StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
- tmp2);
- } else {
- StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
- tmp2, tmp3);
- }
-
- // Handle more complex cases in runtime.
- __ bind(&runtime);
- if (equality) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(left, right);
- __ CallRuntime(Runtime::kStringEqual);
- }
- __ LoadRoot(r4, Heap::kTrueValueRootIndex);
- __ sub(r3, r3, r4);
- __ Ret();
- } else {
- __ Push(left, right);
- __ TailCallRuntime(Runtime::kStringCompare);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
- DCHECK_EQ(CompareICState::RECEIVER, state());
- Label miss;
- __ and_(r5, r4, r3);
- __ JumpIfSmi(r5, &miss);
-
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- __ CompareObjectType(r3, r5, r5, FIRST_JS_RECEIVER_TYPE);
- __ blt(&miss);
- __ CompareObjectType(r4, r5, r5, FIRST_JS_RECEIVER_TYPE);
- __ blt(&miss);
-
- DCHECK(GetCondition() == eq);
- __ sub(r3, r3, r4);
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
- Label miss;
- Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
- __ and_(r5, r4, r3);
- __ JumpIfSmi(r5, &miss);
- __ GetWeakValue(r7, cell);
- __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ cmp(r5, r7);
- __ bne(&miss);
- __ cmp(r6, r7);
- __ bne(&miss);
-
- if (Token::IsEqualityOp(op())) {
- __ sub(r3, r3, r4);
- __ Ret();
- } else {
- if (op() == Token::LT || op() == Token::LTE) {
- __ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
- } else {
- __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
- }
- __ Push(r4, r3, r5);
- __ TailCallRuntime(Runtime::kCompare);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateMiss(MacroAssembler* masm) {
- {
- // Call the runtime system in a fresh internal frame.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4, r3);
- __ Push(r4, r3);
- __ LoadSmiLiteral(r0, Smi::FromInt(op()));
- __ push(r0);
- __ CallRuntime(Runtime::kCompareIC_Miss);
- // Compute the entry point of the rewritten stub.
- __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore registers.
- __ Pop(r4, r3);
- }
-
- __ JumpToJSEntry(r5);
-}
-
-
// This stub is paired with DirectCEntryStub::GenerateCall
void DirectCEntryStub::Generate(MacroAssembler* masm) {
// Place the return address on the stack, making the call
@@ -2852,6 +1707,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1)* 4] : first argument
// -- sp[argc * 4] : receiver
+ // -- sp[(argc + 1)* 4] : accessor_holder
// -----------------------------------
Register callee = r3;
@@ -2862,6 +1718,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
typedef FunctionCallbackArguments FCA;
+ STATIC_ASSERT(FCA::kArgsLength == 8);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
STATIC_ASSERT(FCA::kCalleeIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
@@ -2869,18 +1727,12 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kArgsLength == 8);
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
// context save
__ push(context);
- if (!is_lazy()) {
- // load context from callee
- __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
- }
// callee
__ push(callee);
@@ -2900,6 +1752,38 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// holder
__ push(holder);
+ // Enter a new context
+ if (is_lazy()) {
+ // ----------- S t a t e -------------------------------------
+ // -- sp[0] : holder
+ // -- ...
+ // -- sp[(FCA::kArgsLength - 1) * 4] : new_target
+ // -- sp[FCA::kArgsLength * 4] : last argument
+ // -- ...
+ // -- sp[(FCA::kArgsLength + argc - 1) * 4] : first argument
+ // -- sp[(FCA::kArgsLength + argc) * 4] : receiver
+ // -- sp[(FCA::kArgsLength + argc + 1) * 4] : accessor_holder
+ // -----------------------------------------------------------
+
+ // Load context from accessor_holder
+ Register accessor_holder = context;
+ Register scratch2 = callee;
+ __ LoadP(accessor_holder,
+ MemOperand(sp, (FCA::kArgsLength + 1 + argc()) * kPointerSize));
+ // Look for the constructor if |accessor_holder| is not a function.
+ Label skip_looking_for_constructor;
+ __ LoadP(scratch, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
+ __ lbz(scratch2, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ andi(r0, scratch2, Operand(1 << Map::kIsConstructor));
+ __ bne(&skip_looking_for_constructor, cr0);
+ __ GetMapConstructor(context, scratch, scratch, scratch2);
+ __ bind(&skip_looking_for_constructor);
+ __ LoadP(context, FieldMemOperand(context, JSFunction::kContextOffset));
+ } else {
+ // Load context from callee
+ __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ }
+
// Prepare arguments.
__ mr(scratch, sp);
@@ -2944,12 +1828,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- int stack_space = 0;
- MemOperand length_operand =
- MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize);
- MemOperand* stack_space_operand = &length_operand;
- stack_space = argc() + FCA::kArgsLength + 1;
- stack_space_operand = NULL;
+ const int stack_space = argc() + FCA::kArgsLength + 2;
+ MemOperand* stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
stack_space_operand, return_value_operand,
&context_restore_operand);
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index f873f93679..967e97303a 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -5,15 +5,10 @@
#ifndef V8_PPC_CODE_STUBS_PPC_H_
#define V8_PPC_CODE_STUBS_PPC_H_
-#include "src/ppc/frames-ppc.h"
-
namespace v8 {
namespace internal {
-void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-
-
class StringHelper : public AllStatic {
public:
// Compares two flat one-byte strings and returns result in r0.
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 6c8ffe6898..695ae6beb6 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -51,24 +51,6 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#undef __
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- DCHECK(!masm->has_frame());
- masm->set_has_frame(true);
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- DCHECK(masm->has_frame());
- masm->set_has_frame(false);
-}
-
-
// -------------------------------------------------------------------------
// Code generators
@@ -170,71 +152,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
#undef __
-CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
- USE(isolate);
- DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
- // Since patcher is a large object, allocate it dynamically when needed,
- // to avoid overloading the stack in stress conditions.
- // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
- // the process, before ARM simulator ICache is setup.
- std::unique_ptr<CodePatcher> patcher(
- new CodePatcher(isolate, young_sequence_.start(),
- young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
- PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
- patcher->masm()->PushStandardFrame(r4);
- for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
- patcher->masm()->nop();
- }
-}
-
-
-#ifdef DEBUG
-bool CodeAgingHelper::IsOld(byte* candidate) const {
- return Assembler::IsNop(Assembler::instr_at(candidate));
-}
-#endif
-
-
-bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
- bool result = isolate->code_aging_helper()->IsYoung(sequence);
- DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
- return result;
-}
-
-Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
- if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-
- Code* code = NULL;
- Address target_address =
- Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
- Code* stub = GetCodeFromTargetAddress(target_address);
- return GetAgeOfCodeAgeStub(stub);
-}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
- Code::Age age) {
- uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
- if (age == kNoAgeCodeAge) {
- isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- Assembler::FlushICache(isolate, sequence, young_length);
- } else {
- // FIXED_SEQUENCE
- Code* stub = GetCodeAgeStub(isolate, age);
- CodePatcher patcher(isolate, sequence,
- young_length / Assembler::kInstrSize);
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
- intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
- // Don't use Call -- we need to preserve ip and lr.
- // GenerateMakeCodeYoungAgainCommon for the stub code.
- patcher.masm()->nop(); // marker to detect sequence (see IsOld)
- patcher.masm()->mov(r3, Operand(target));
- patcher.masm()->Jump(r3);
- for (int i = 0; i < kCodeAgingSequenceNops; i++) {
- patcher.masm()->nop();
- }
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index f8cfe70c8f..75206ee4a4 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -4,7 +4,6 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -13,78 +12,6 @@ namespace internal {
const int Deoptimizer::table_entry_size_ = 8;
-
-int Deoptimizer::patch_size() {
-#if V8_TARGET_ARCH_PPC64
- const int kCallInstructionSizeInWords = 7;
-#else
- const int kCallInstructionSizeInWords = 4;
-#endif
- return kCallInstructionSizeInWords * Assembler::kInstrSize;
-}
-
-
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- // Empty because there is no need for relocation information for the code
- // patching in Deoptimizer::PatchCodeForDeoptimization below.
-}
-
-
-void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
- Address code_start_address = code->instruction_start();
-
- // Invalidate the relocation information, as it will become invalid by the
- // code patching below, and is not needed any more.
- code->InvalidateRelocation();
-
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 1);
- patcher.masm()->bkpt(0);
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
- osr_patcher.masm()->bkpt(0);
- }
-
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
-#ifdef DEBUG
- Address prev_call_address = NULL;
-#endif
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
- // We need calls to have a predictable size in the unoptimized code, but
- // this is optimized code, so we don't have to have a predictable size.
- int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
- deopt_entry, kRelocInfo_NONEPTR);
- int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
- DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
- DCHECK(call_size_in_bytes <= patch_size());
- CodePatcher patcher(isolate, call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
- DCHECK(prev_call_address == NULL ||
- call_address >= prev_call_address + patch_size());
- DCHECK(call_address + patch_size() <= code->instruction_end());
-#ifdef DEBUG
- prev_call_address = call_address;
-#endif
- }
-}
-
-
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -104,7 +31,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all double registers before messing with them.
__ subi(sp, sp, Operand(kDoubleRegsSize));
- const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 6933e302a4..2a1044f7ad 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -39,7 +39,7 @@
namespace v8 {
namespace internal {
-const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+const auto GetRegConfig = RegisterConfiguration::Default;
//------------------------------------------------------------------------------
@@ -345,7 +345,7 @@ void Decoder::Format(Instruction* instr, const char* format) {
// The disassembler may end up decoding data inlined in the code. We do not want
-// it to crash if the data does not ressemble any known instruction.
+// it to crash if the data does not resemble any known instruction.
#define VERIFY(condition) \
if (!(condition)) { \
Unknown(instr); \
diff --git a/deps/v8/src/ppc/frames-ppc.cc b/deps/v8/src/ppc/frame-constants-ppc.cc
index 228ef1998f..bc6a649f9b 100644
--- a/deps/v8/src/ppc/frames-ppc.cc
+++ b/deps/v8/src/ppc/frame-constants-ppc.cc
@@ -5,18 +5,17 @@
#if V8_TARGET_ARCH_PPC
#include "src/assembler.h"
-#include "src/frames.h"
+#include "src/frame-constants.h"
#include "src/macro-assembler.h"
-
-#include "src/ppc/assembler-ppc.h"
#include "src/ppc/assembler-ppc-inl.h"
-#include "src/ppc/frames-ppc.h"
+#include "src/ppc/assembler-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
+#include "src/ppc/frame-constants-ppc.h"
+
namespace v8 {
namespace internal {
-
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
@@ -24,7 +23,6 @@ Register JavaScriptFrame::constant_pool_pointer_register() {
return kConstantPoolRegister;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/frame-constants-ppc.h b/deps/v8/src/ppc/frame-constants-ppc.h
new file mode 100644
index 0000000000..ee7f29937b
--- /dev/null
+++ b/deps/v8/src/ppc/frame-constants-ppc.h
@@ -0,0 +1,50 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_FRAMES_PPC_H_
+#define V8_PPC_FRAMES_PPC_H_
+
+#include "src/frame-constants.h"
+
+namespace v8 {
+namespace internal {
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+class ExitFrameConstants : public TypedFrameConstants {
+ public:
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
+
+ // The caller fields are below the frame pointer on the stack.
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ // The calling JS function is below FP.
+ static const int kCallerPCOffset = 1 * kPointerSize;
+
+ // FP-relative displacement of the caller's SP. It points just
+ // below the saved PC.
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
+};
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLastParameterOffset = +2 * kPointerSize;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+
+ // Caller SP-relative.
+ static const int kParam0Offset = -2 * kPointerSize;
+ static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PPC_FRAMES_PPC_H_
diff --git a/deps/v8/src/ppc/frames-ppc.h b/deps/v8/src/ppc/frames-ppc.h
deleted file mode 100644
index fd4abe2e4d..0000000000
--- a/deps/v8/src/ppc/frames-ppc.h
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PPC_FRAMES_PPC_H_
-#define V8_PPC_FRAMES_PPC_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 32;
-
-
-// Caller-saved/arguments registers
-const RegList kJSCallerSaved = 1 << 3 | // r3 a1
- 1 << 4 | // r4 a2
- 1 << 5 | // r5 a3
- 1 << 6 | // r6 a4
- 1 << 7 | // r7 a5
- 1 << 8 | // r8 a6
- 1 << 9 | // r9 a7
- 1 << 10 | // r10 a8
- 1 << 11;
-
-const int kNumJSCallerSaved = 9;
-
-// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0
-int JSCallerSavedCode(int n);
-
-
-// Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved = 1 << 14 | // r14
- 1 << 15 | // r15
- 1 << 16 | // r16
- 1 << 17 | // r17
- 1 << 18 | // r18
- 1 << 19 | // r19
- 1 << 20 | // r20
- 1 << 21 | // r21
- 1 << 22 | // r22
- 1 << 23 | // r23
- 1 << 24 | // r24
- 1 << 25 | // r25
- 1 << 26 | // r26
- 1 << 27 | // r27
- 1 << 28 | // r28
- 1 << 29 | // r29
- 1 << 30 | // r20
- 1 << 31; // r31
-
-
-const int kNumCalleeSaved = 18;
-
-const RegList kCallerSavedDoubles = 1 << 0 | // d0
- 1 << 1 | // d1
- 1 << 2 | // d2
- 1 << 3 | // d3
- 1 << 4 | // d4
- 1 << 5 | // d5
- 1 << 6 | // d6
- 1 << 7 | // d7
- 1 << 8 | // d8
- 1 << 9 | // d9
- 1 << 10 | // d10
- 1 << 11 | // d11
- 1 << 12 | // d12
- 1 << 13; // d13
-
-const int kNumCallerSavedDoubles = 14;
-
-const RegList kCalleeSavedDoubles = 1 << 14 | // d14
- 1 << 15 | // d15
- 1 << 16 | // d16
- 1 << 17 | // d17
- 1 << 18 | // d18
- 1 << 19 | // d19
- 1 << 20 | // d20
- 1 << 21 | // d21
- 1 << 22 | // d22
- 1 << 23 | // d23
- 1 << 24 | // d24
- 1 << 25 | // d25
- 1 << 26 | // d26
- 1 << 27 | // d27
- 1 << 28 | // d28
- 1 << 29 | // d29
- 1 << 30 | // d30
- 1 << 31; // d31
-
-const int kNumCalleeSavedDoubles = 18;
-
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-const int kNumSafepointRegisters = 32;
-
-// The following constants describe the stack frame linkage area as
-// defined by the ABI. Note that kNumRequiredStackFrameSlots must
-// satisfy alignment requirements (rounding up if required).
-#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
-// [0] back chain
-// [1] condition register save area
-// [2] link register save area
-// [3] TOC save area
-// [4] Parameter1 save area
-// ...
-// [11] Parameter8 save area
-// [12] Parameter9 slot (if necessary)
-// ...
-const int kNumRequiredStackFrameSlots = 12;
-const int kStackFrameLRSlot = 2;
-const int kStackFrameExtraParamSlot = 12;
-#elif V8_OS_AIX || V8_TARGET_ARCH_PPC64
-// [0] back chain
-// [1] condition register save area
-// [2] link register save area
-// [3] reserved for compiler
-// [4] reserved by binder
-// [5] TOC save area
-// [6] Parameter1 save area
-// ...
-// [13] Parameter8 save area
-// [14] Parameter9 slot (if necessary)
-// ...
-#if V8_TARGET_ARCH_PPC64
-const int kNumRequiredStackFrameSlots = 14;
-#else
-const int kNumRequiredStackFrameSlots = 16;
-#endif
-const int kStackFrameLRSlot = 2;
-const int kStackFrameExtraParamSlot = 14;
-#else
-// [0] back chain
-// [1] link register save area
-// [2] Parameter9 slot (if necessary)
-// ...
-const int kNumRequiredStackFrameSlots = 4;
-const int kStackFrameLRSlot = 1;
-const int kStackFrameExtraParamSlot = 2;
-#endif
-
-// ----------------------------------------------------
-
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset =
- -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
-};
-
-class ExitFrameConstants : public TypedFrameConstants {
- public:
- static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- DEFINE_TYPED_FRAME_SIZES(2);
-
- // The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = 0 * kPointerSize;
- // The calling JS function is below FP.
- static const int kCallerPCOffset = 1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = 2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PPC_FRAMES_PPC_H_
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index bb14f091b4..90ed9670fd 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -20,6 +20,13 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
default_stub_registers);
}
+void RecordWriteDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(albertnetymk): Use default for now; should call
+ // RestrictAllocatableRegisters like src/x64/interface-descriptors-x64.cc
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
return r4;
}
@@ -47,8 +54,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return r8; }
const Register StringCompareDescriptor::LeftRegister() { return r4; }
const Register StringCompareDescriptor::RightRegister() { return r3; }
-const Register StringConcatDescriptor::ArgumentsCountRegister() { return r3; }
-
const Register ApiGetterDescriptor::HolderRegister() { return r3; }
const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
@@ -99,54 +104,12 @@ void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r5, r6};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CreateWeakCellDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r5, r6, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallICTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r3, r6};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallICDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r3, r6, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CallConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments
- // r4 : the function to call
- // r5 : feedback vector
- // r6 : slot in feedback vector (Smi, for RecordCallTarget)
- // r7 : new target (for IsSuperConstructorCall)
- // TODO(turbofan): So far we don't gather type feedback and hence skip the
- // slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {r3, r4, r7, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments
@@ -299,14 +262,6 @@ void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void VarArgFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (arg count)
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3};
@@ -320,30 +275,6 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r5, r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // r4 -- lhs
- // r3 -- rhs
- // r7 -- slot id
- // r6 -- vector
- Register registers[] = {r4, r3, r7, r6};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CountOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3};
@@ -402,17 +333,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterPushArgsThenConstructArrayDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // argument count (not including receiver)
- r4, // target to call checked to be Array function
- r5, // allocation site feedback if available, undefined otherwise
- r6 // address of the first argument
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index ed925001a0..2bd14f09fd 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -13,6 +13,7 @@
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
+#include "src/frames-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -25,6 +26,46 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
+void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1, Register exclusion2,
+ Register exclusion3) {
+ RegList exclusions = 0;
+ if (!exclusion1.is(no_reg)) {
+ exclusions |= exclusion1.bit();
+ if (!exclusion2.is(no_reg)) {
+ exclusions |= exclusion2.bit();
+ if (!exclusion3.is(no_reg)) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ MultiPush(kJSCallerSaved & ~exclusions);
+
+ if (fp_mode == kSaveFPRegs) {
+ MultiPushDoubles(kCallerSavedDoubles);
+ }
+}
+
+void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ if (fp_mode == kSaveFPRegs) {
+ MultiPopDoubles(kCallerSavedDoubles);
+ }
+
+ RegList exclusions = 0;
+ if (!exclusion1.is(no_reg)) {
+ exclusions |= exclusion1.bit();
+ if (!exclusion2.is(no_reg)) {
+ exclusions |= exclusion2.bit();
+ if (!exclusion3.is(no_reg)) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ MultiPop(kJSCallerSaved & ~exclusions);
+}
void TurboAssembler::Jump(Register target) {
mtctr(target);
bctr();
@@ -165,14 +206,6 @@ void TurboAssembler::Push(Smi* smi) {
push(r0);
}
-void MacroAssembler::PushObject(Handle<Object> handle) {
- if (handle->IsHeapObject()) {
- Push(Handle<HeapObject>::cast(handle));
- } else {
- Push(Smi::cast(*handle));
- }
-}
-
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
mov(dst, Operand(value));
}
@@ -191,7 +224,7 @@ void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
}
void TurboAssembler::MultiPush(RegList regs, Register location) {
- int16_t num_to_push = NumberOfBitsSet(regs);
+ int16_t num_to_push = base::bits::CountPopulation(regs);
int16_t stack_offset = num_to_push * kPointerSize;
subi(location, location, Operand(stack_offset));
@@ -216,7 +249,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
}
void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
- int16_t num_to_push = NumberOfBitsSet(dregs);
+ int16_t num_to_push = base::bits::CountPopulation(dregs);
int16_t stack_offset = num_to_push * kDoubleSize;
subi(location, location, Operand(stack_offset));
@@ -430,69 +463,6 @@ void MacroAssembler::RecordWrite(
}
}
-void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
- Register code_entry,
- Register scratch) {
- const int offset = JSFunction::kCodeEntryOffset;
-
- // Since a code entry (value) is always in old space, we don't need to update
- // remembered set. If incremental marking is off, there is nothing for us to
- // do.
- if (!FLAG_incremental_marking) return;
-
- DCHECK(js_function.is(r4));
- DCHECK(code_entry.is(r7));
- DCHECK(scratch.is(r8));
- AssertNotSmi(js_function);
-
- if (emit_debug_code()) {
- addi(scratch, js_function, Operand(offset - kHeapObjectTag));
- LoadP(ip, MemOperand(scratch));
- cmp(ip, code_entry);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
- }
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
- Label done;
-
- CheckPageFlag(code_entry, scratch,
- MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
- CheckPageFlag(js_function, scratch,
- MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
-
- const Register dst = scratch;
- addi(dst, js_function, Operand(offset - kHeapObjectTag));
-
- // Save caller-saved registers. js_function and code_entry are in the
- // caller-saved register list.
- DCHECK(kJSCallerSaved & js_function.bit());
- DCHECK(kJSCallerSaved & code_entry.bit());
- mflr(r0);
- MultiPush(kJSCallerSaved | r0.bit());
-
- int argument_count = 3;
- PrepareCallCFunction(argument_count, code_entry);
-
- mr(r3, js_function);
- mr(r4, dst);
- mov(r5, Operand(ExternalReference::isolate_address(isolate())));
-
- {
- AllowExternalCallThatCantCauseGC scope(this);
- CallCFunction(
- ExternalReference::incremental_marking_record_write_code_entry_function(
- isolate()),
- argument_count);
- }
-
- // Restore caller-saved registers (including js_function and code_entry).
- MultiPop(kJSCallerSaved | r0.bit());
- mtlr(r0);
-
- bind(&done);
-}
-
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address, Register scratch,
SaveFPRegsMode fp_mode,
@@ -559,23 +529,6 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
addi(fp, sp, Operand(fp_delta * kPointerSize));
}
-void MacroAssembler::PopCommonFrame(Register marker_reg) {
- if (FLAG_enable_embedded_constant_pool) {
- if (marker_reg.is_valid()) {
- Pop(r0, fp, kConstantPoolRegister, marker_reg);
- } else {
- Pop(r0, fp, kConstantPoolRegister);
- }
- } else {
- if (marker_reg.is_valid()) {
- Pop(r0, fp, marker_reg);
- } else {
- Pop(r0, fp);
- }
- }
- mtlr(r0);
-}
-
void TurboAssembler::PushStandardFrame(Register function_reg) {
int fp_delta = 0;
mflr(r0);
@@ -635,17 +588,6 @@ void MacroAssembler::PopSafepointRegisters() {
}
}
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
- StoreP(src, SafepointRegisterSlot(dst));
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- LoadP(dst, SafepointRegisterSlot(src));
-}
-
-
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
@@ -664,19 +606,6 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
}
-MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
- // General purpose registers are pushed last on the stack.
- const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
- int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
- int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
- return MemOperand(sp, doubles_size + register_offset);
-}
-
void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN.
@@ -947,35 +876,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
}
}
-void TurboAssembler::Prologue(bool code_pre_aging, Register base,
- int prologue_offset) {
+void TurboAssembler::Prologue(Register base, int prologue_offset) {
DCHECK(!base.is(no_reg));
- {
- PredictableCodeSizeScope predictible_code_size_scope(
- this, kNoCodeAgeSequenceLength);
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
- // The following instructions must remain together and unmodified
- // for code aging to work properly.
- if (code_pre_aging) {
- // Pre-age the code.
- // This matches the code found in PatchPlatformCodeAge()
- Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
- intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
- // Don't use Call -- we need to preserve ip and lr
- nop(); // marker to detect sequence (see IsOld)
- mov(r3, Operand(target));
- Jump(r3);
- for (int i = 0; i < kCodeAgingSequenceNops; i++) {
- nop();
- }
- } else {
- // This matches the code found in GetNoCodeAgeSequence()
- PushStandardFrame(r4);
- for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
- nop();
- }
- }
- }
+ PushStandardFrame(r4);
if (FLAG_enable_embedded_constant_pool) {
// base contains prologue address
LoadConstantPoolPointerRegister(base, -prologue_offset);
@@ -983,12 +886,6 @@ void TurboAssembler::Prologue(bool code_pre_aging, Register base,
}
}
-void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
- LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- LoadP(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
- LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
-}
-
void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
@@ -1288,8 +1185,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
bool* definitely_mismatches,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+ InvokeFlag flag) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label regular_invoke;
@@ -1338,11 +1234,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor));
Call(adaptor);
- call_wrapper.AfterCall();
if (!*definitely_mismatches) {
b(done);
}
@@ -1396,20 +1290,17 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
bind(&skip_hook);
}
-
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+ InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(function.is(r4));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
- if (call_wrapper.NeedsDebugHookCheck()) {
- CheckDebugHook(function, new_target, expected, actual);
- }
+ // On function call, call into the debugger if necessary.
+ CheckDebugHook(function, new_target, expected, actual);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -1418,18 +1309,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
- call_wrapper);
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
if (!definitely_mismatches) {
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = ip;
- LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
+ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
CallJSEntry(code);
- call_wrapper.AfterCall();
} else {
DCHECK(flag == JUMP_FUNCTION);
JumpToJSEntry(code);
@@ -1441,11 +1330,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
}
-
void MacroAssembler::InvokeFunction(Register fun, Register new_target,
const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+ InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
@@ -1462,15 +1349,13 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
- InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(fun, new_target, expected, actual, flag);
}
-
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+ InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
@@ -1480,28 +1365,15 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- InvokeFunctionCode(r4, no_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(r4, no_reg, expected, actual, flag);
}
-
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+ InvokeFlag flag) {
Move(r4, function);
- InvokeFunction(r4, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
- Label* fail) {
- DCHECK(kNotStringTag != 0);
-
- LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- andi(r0, scratch, Operand(kIsNotStringMask));
- bne(fail, cr0);
+ InvokeFunction(r4, expected, actual, flag);
}
void MacroAssembler::MaybeDropFrames() {
@@ -1511,7 +1383,7 @@ void MacroAssembler::MaybeDropFrames() {
mov(r4, Operand(restart_fp));
LoadP(r4, MemOperand(r4));
cmpi(r4, Operand::Zero());
- Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+ Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
ne);
}
@@ -1543,46 +1415,6 @@ void MacroAssembler::PopStackHandler() {
}
-// Compute the hash code from the untagged key. This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
-// code-stub-hydrogen.cc
-void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
- // First of all we assign the hash seed to scratch.
- LoadRoot(scratch, Heap::kHashSeedRootIndex);
- SmiUntag(scratch);
-
- // Xor original key with a seed.
- xor_(t0, t0, scratch);
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- notx(scratch, t0);
- slwi(t0, t0, Operand(15));
- add(t0, scratch, t0);
- // hash = hash ^ (hash >> 12);
- srwi(scratch, t0, Operand(12));
- xor_(t0, t0, scratch);
- // hash = hash + (hash << 2);
- slwi(scratch, t0, Operand(2));
- add(t0, t0, scratch);
- // hash = hash ^ (hash >> 4);
- srwi(scratch, t0, Operand(4));
- xor_(t0, t0, scratch);
- // hash = hash * 2057;
- mr(r0, t0);
- slwi(scratch, t0, Operand(3));
- add(t0, t0, scratch);
- slwi(scratch, r0, Operand(11));
- add(t0, t0, scratch);
- // hash = hash ^ (hash >> 16);
- srwi(scratch, t0, Operand(16));
- xor_(t0, t0, scratch);
- // hash & 0x3fffffff
- ExtractBitRange(t0, t0, 29, 0);
-}
-
void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
@@ -1679,105 +1511,6 @@ void MacroAssembler::Allocate(int object_size, Register result,
addi(result, result, Operand(kHeapObjectTag));
}
-
-void MacroAssembler::Allocate(Register object_size, Register result,
- Register result_end, Register scratch,
- Label* gc_required, AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- li(result, Operand(0x7091));
- li(scratch, Operand(0x7191));
- li(result_end, Operand(0x7291));
- }
- b(gc_required);
- return;
- }
-
- // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
- // is not specified. Other registers must not overlap.
- DCHECK(!AreAliased(object_size, result, scratch, ip));
- DCHECK(!AreAliased(result_end, result, scratch, ip));
- DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
-
- // Check relative positions of allocation top and limit addresses.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
- DCHECK((limit - top) == kPointerSize);
-
- // Set up allocation top address and allocation limit registers.
- Register top_address = scratch;
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- Register alloc_limit = ip;
- mov(top_address, Operand(allocation_top));
-
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into alloc_limit..
- LoadP(result, MemOperand(top_address));
- LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry.
- LoadP(alloc_limit, MemOperand(top_address));
- cmp(result, alloc_limit);
- Check(eq, kUnexpectedAllocationTop);
- }
- // Load allocation limit. Result already contains allocation top.
- LoadP(alloc_limit, MemOperand(top_address, limit - top));
- }
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
-#if V8_TARGET_ARCH_PPC64
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-#else
- STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- andi(result_end, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- beq(&aligned, cr0);
- if ((flags & PRETENURE) != 0) {
- cmpl(result, alloc_limit);
- bge(gc_required);
- }
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- stw(result_end, MemOperand(result));
- addi(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
-#endif
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top. Object size may be in words so a shift is
- // required to get the number of bytes.
- sub(r0, alloc_limit, result);
- if ((flags & SIZE_IN_WORDS) != 0) {
- ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
- cmp(r0, result_end);
- blt(gc_required);
- add(result_end, result, result_end);
- } else {
- cmp(r0, object_size);
- blt(gc_required);
- add(result_end, result, object_size);
- }
-
- // Update allocation top. result temporarily holds the new top.
- if (emit_debug_code()) {
- andi(r0, result_end, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, cr0);
- }
- StoreP(result_end, MemOperand(top_address));
-
- // Tag object.
- addi(result, result, Operand(kHeapObjectTag));
-}
-
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
@@ -1985,49 +1718,6 @@ void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
ConvertIntToDouble(ip, value);
}
-
-void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
- Register scratch1, Register scratch2,
- DoubleRegister double_scratch) {
- TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
-}
-
-void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
- Register scratch1,
- Register scratch2) {
-#if V8_TARGET_ARCH_PPC64
- MovDoubleToInt64(scratch1, input);
- rotldi(scratch1, scratch1, 1);
- cmpi(scratch1, Operand(1));
-#else
- MovDoubleToInt64(scratch1, scratch2, input);
- Label done;
- cmpi(scratch2, Operand::Zero());
- bne(&done);
- lis(scratch2, Operand(SIGN_EXT_IMM16(0x8000)));
- cmp(scratch1, scratch2);
- bind(&done);
-#endif
-}
-
-void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
-#if V8_TARGET_ARCH_PPC64
- MovDoubleToInt64(scratch, input);
-#else
- MovDoubleHighToInt(scratch, input);
-#endif
- cmpi(scratch, Operand::Zero());
-}
-
-void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
-#if V8_TARGET_ARCH_PPC64
- LoadP(scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
-#else
- lwz(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset));
-#endif
- cmpi(scratch, Operand::Zero());
-}
-
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DoubleRegister double_input,
Register scratch,
@@ -2053,6 +1743,7 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
fcmpu(double_scratch, double_input);
bind(&done);
}
+
void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
DoubleRegister double_input) {
Label done;
@@ -2097,145 +1788,6 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
beq(done);
}
-void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
- Register input_high, Register scratch,
- DoubleRegister double_scratch, Label* done,
- Label* exact) {
- DCHECK(!result.is(input_high));
- DCHECK(!double_input.is(double_scratch));
- Label exception;
-
- MovDoubleHighToInt(input_high, double_input);
-
- // Test for NaN/Inf
- ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
- cmpli(result, Operand(0x7ff));
- beq(&exception);
-
- // Convert (rounding to -Inf)
- ConvertDoubleToInt64(double_input,
-#if !V8_TARGET_ARCH_PPC64
- scratch,
-#endif
- result, double_scratch, kRoundToMinusInf);
-
-// Test for overflow
-#if V8_TARGET_ARCH_PPC64
- TestIfInt32(result, r0);
-#else
- TestIfInt32(scratch, result, r0);
-#endif
- bne(&exception);
-
- // Test for exactness
- fcfid(double_scratch, double_scratch);
- fcmpu(double_scratch, double_input);
- beq(exact);
- b(done);
-
- bind(&exception);
-}
-
-
-void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
- DoubleRegister double_input,
- Label* done) {
- DoubleRegister double_scratch = kScratchDoubleReg;
-#if !V8_TARGET_ARCH_PPC64
- Register scratch = ip;
-#endif
-
- ConvertDoubleToInt64(double_input,
-#if !V8_TARGET_ARCH_PPC64
- scratch,
-#endif
- result, double_scratch);
-
-// Test for overflow
-#if V8_TARGET_ARCH_PPC64
- TestIfInt32(result, r0);
-#else
- TestIfInt32(scratch, result, r0);
-#endif
- beq(done);
-}
-
-
-void MacroAssembler::TruncateDoubleToI(Register result,
- DoubleRegister double_input) {
- Label done;
-
- TryInlineTruncateDoubleToI(result, double_input, &done);
-
- // If we fell through then inline version didn't succeed - call stub instead.
- mflr(r0);
- push(r0);
- // Put input on stack.
- stfdu(double_input, MemOperand(sp, -kDoubleSize));
-
- DoubleToIStub stub(isolate(), sp, result, 0, true, true);
- CallStub(&stub);
-
- addi(sp, sp, Operand(kDoubleSize));
- pop(r0);
- mtlr(r0);
-
- bind(&done);
-}
-
-
-void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
- Label done;
- DoubleRegister double_scratch = kScratchDoubleReg;
- DCHECK(!result.is(object));
-
- lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
- TryInlineTruncateDoubleToI(result, double_scratch, &done);
-
- // If we fell through then inline version didn't succeed - call stub instead.
- mflr(r0);
- push(r0);
- DoubleToIStub stub(isolate(), object, result,
- HeapNumber::kValueOffset - kHeapObjectTag, true, true);
- CallStub(&stub);
- pop(r0);
- mtlr(r0);
-
- bind(&done);
-}
-
-
-void MacroAssembler::TruncateNumberToI(Register object, Register result,
- Register heap_number_map,
- Register scratch1, Label* not_number) {
- Label done;
- DCHECK(!result.is(object));
-
- UntagAndJumpIfSmi(result, object, &done);
- JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
- TruncateHeapNumberToI(result, object);
-
- bind(&done);
-}
-
-
-void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
- int num_least_bits) {
-#if V8_TARGET_ARCH_PPC64
- rldicl(dst, src, kBitsPerPointer - kSmiShift,
- kBitsPerPointer - num_least_bits);
-#else
- rlwinm(dst, src, kBitsPerPointer - kSmiShift,
- kBitsPerPointer - num_least_bits, 31);
-#endif
-}
-
-
-void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
- int num_least_bits) {
- rlwinm(dst, src, 0, 32 - num_least_bits, 31);
-}
-
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
@@ -2279,17 +1831,6 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
CallStub(&stub);
}
-
-void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- mov(r3, Operand(num_arguments));
- mov(r4, Operand(ext));
-
- CEntryStub stub(isolate(), 1);
- CallStub(&stub);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
@@ -2378,29 +1919,13 @@ void TurboAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
- Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
}
// will not return here
}
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- for (int i = 1; i < context_chain_length; i++) {
- LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in esi).
- mr(dst, cp);
- }
-}
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadP(dst, NativeContextMemOperand());
LoadP(dst, ContextMemOperand(dst, index));
@@ -2423,60 +1948,6 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
}
-
-void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
- Register reg, Register scratch, Label* not_power_of_two_or_zero) {
- subi(scratch, reg, Operand(1));
- cmpi(scratch, Operand::Zero());
- blt(not_power_of_two_or_zero);
- and_(r0, scratch, reg, SetRC);
- bne(not_power_of_two_or_zero, cr0);
-}
-
-
-void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
- Register scratch,
- Label* zero_and_neg,
- Label* not_power_of_two) {
- subi(scratch, reg, Operand(1));
- cmpi(scratch, Operand::Zero());
- blt(zero_and_neg);
- and_(r0, scratch, reg, SetRC);
- bne(not_power_of_two, cr0);
-}
-
-#if !V8_TARGET_ARCH_PPC64
-void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
- DCHECK(!reg.is(overflow));
- mr(overflow, reg); // Save original value.
- SmiTag(reg);
- xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0.
-}
-
-
-void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
- Register overflow) {
- if (dst.is(src)) {
- // Fall back to slower case.
- SmiTagCheckOverflow(dst, overflow);
- } else {
- DCHECK(!dst.is(src));
- DCHECK(!dst.is(overflow));
- DCHECK(!src.is(overflow));
- SmiTag(dst, src);
- xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0.
- }
-}
-#endif
-
-void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
- Label* on_not_both_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- orx(r0, reg1, reg2, LeaveRC);
- JumpIfNotSmi(r0, on_not_both_smi);
-}
-
-
void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
Label* smi_case) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2586,25 +2057,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
-void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
- if (emit_debug_code()) {
- CompareRoot(reg, index);
- Check(eq, kHeapNumberMapRegisterClobbered);
- }
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number) {
- LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- cmp(scratch, heap_number_map);
- bne(on_not_heap_number);
-}
-
-
void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@@ -2619,19 +2071,6 @@ void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
scratch2, failure);
}
-void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that neither is a smi.
- and_(scratch1, first, second);
- JumpIfSmi(scratch1, failure);
- JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
- scratch2, failure);
-}
-
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -2644,38 +2083,6 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
bind(&succeed);
}
-
-// Allocates a heap number or jumps to the need_gc label if the young space
-// is full and a scavenge is needed.
-void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required,
- MutableMode mode) {
- // Allocate an object in the heap for the heap number and tag it as a heap
- // object.
- Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- Heap::RootListIndex map_index = mode == MUTABLE
- ? Heap::kMutableHeapNumberMapRootIndex
- : Heap::kHeapNumberMapRootIndex;
- AssertIsRoot(heap_number_map, map_index);
-
- // Store heap number map in the allocated object.
- StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
- r0);
-}
-
-
-void MacroAssembler::AllocateHeapNumberWithValue(
- Register result, DoubleRegister value, Register scratch1, Register scratch2,
- Register heap_number_map, Label* gc_required) {
- AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
- stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
-}
-
-
void MacroAssembler::AllocateJSValue(Register result, Register constructor,
Register value, Register scratch1,
Register scratch2, Label* gc_required) {
@@ -2699,29 +2106,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
- Register count,
- Register filler) {
- Label loop;
- mtctr(count);
- bind(&loop);
- StoreP(filler, MemOperand(current_address));
- addi(current_address, current_address, Operand(kPointerSize));
- bdnz(&loop);
-}
-
-void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
- Register end_address,
- Register filler) {
- Label done;
- sub(r0, end_address, current_address, LeaveOE, SetRC);
- beq(&done, cr0);
- ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
- InitializeNFieldsWithFiller(current_address, r0, filler);
- bind(&done);
-}
-
-
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@@ -2753,47 +2137,6 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
return stack_passed_words;
}
-
-void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
- Register value,
- uint32_t encoding_mask) {
- Label is_object;
- TestIfSmi(string, r0);
- Check(ne, kNonObject, cr0);
-
- LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
-
- andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
- cmpi(ip, Operand(encoding_mask));
- Check(eq, kUnexpectedStringType);
-
-// The index is assumed to be untagged coming in, tag it to compare with the
-// string length without using a temp register, it is restored at the end of
-// this function.
-#if !V8_TARGET_ARCH_PPC64
- Label index_tag_ok, index_tag_bad;
- JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
-#endif
- SmiTag(index, index);
-#if !V8_TARGET_ARCH_PPC64
- b(&index_tag_ok);
- bind(&index_tag_bad);
- Abort(kIndexIsTooLarge);
- bind(&index_tag_ok);
-#endif
-
- LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
- cmp(index, ip);
- Check(lt, kIndexIsTooLarge);
-
- DCHECK(Smi::kZero == 0);
- cmpi(index, Operand::Zero());
- Check(ge, kIndexIsNegative);
-
- SmiUntag(index, index);
-}
-
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
@@ -2897,34 +2240,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
-void MacroAssembler::DecodeConstantPoolOffset(Register result,
- Register location) {
- Label overflow_access, done;
- DCHECK(!AreAliased(result, location, r0));
-
- // Determine constant pool access type
- // Caller has already placed the instruction word at location in result.
- ExtractBitRange(r0, result, 31, 26);
- cmpi(r0, Operand(ADDIS >> 26));
- beq(&overflow_access);
-
- // Regular constant pool access
- // extract the load offset
- andi(result, result, Operand(kImm16Mask));
- b(&done);
-
- bind(&overflow_access);
- // Overflow constant pool access
- // shift addis immediate
- slwi(r0, result, Operand(16));
- // sign-extend and add the load offset
- lwz(result, MemOperand(location, kInstrSize));
- extsh(result, result);
- add(result, r0, result);
-
- bind(&done);
-}
-
void TurboAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
@@ -3016,46 +2331,6 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
beq(value_is_white, cr0);
}
-
-// Saturate a value into 8-bit unsigned integer
-// if input_value < 0, output_value is 0
-// if input_value > 255, output_value is 255
-// otherwise output_value is the input_value
-void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- int satval = (1 << 8) - 1;
-
- if (CpuFeatures::IsSupported(ISELECT)) {
- // set to 0 if negative
- cmpi(input_reg, Operand::Zero());
- isel(lt, output_reg, r0, input_reg);
-
- // set to satval if > satval
- li(r0, Operand(satval));
- cmpi(output_reg, Operand(satval));
- isel(lt, output_reg, output_reg, r0);
- } else {
- Label done, negative_label, overflow_label;
- cmpi(input_reg, Operand::Zero());
- blt(&negative_label);
-
- cmpi(input_reg, Operand(satval));
- bgt(&overflow_label);
- if (!output_reg.is(input_reg)) {
- mr(output_reg, input_reg);
- }
- b(&done);
-
- bind(&negative_label);
- li(output_reg, Operand::Zero()); // set to 0 if negative
- b(&done);
-
- bind(&overflow_label); // set to satval if > satval
- li(output_reg, Operand(satval));
-
- bind(&done);
- }
-}
-
void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
void TurboAssembler::ResetRoundingMode() {
@@ -3063,59 +2338,11 @@ void TurboAssembler::ResetRoundingMode() {
}
-void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister double_scratch) {
- Label above_zero;
- Label done;
- Label in_bounds;
-
- LoadDoubleLiteral(double_scratch, Double(0.0), result_reg);
- fcmpu(input_reg, double_scratch);
- bgt(&above_zero);
-
- // Double value is less than zero, NaN or Inf, return 0.
- LoadIntLiteral(result_reg, 0);
- b(&done);
-
- // Double value is >= 255, return 255.
- bind(&above_zero);
- LoadDoubleLiteral(double_scratch, Double(255.0), result_reg);
- fcmpu(input_reg, double_scratch);
- ble(&in_bounds);
- LoadIntLiteral(result_reg, 255);
- b(&done);
-
- // In 0-255 range, round and truncate.
- bind(&in_bounds);
-
- // round to nearest (default rounding mode)
- fctiw(double_scratch, input_reg);
- MovDoubleLowToInt(result_reg, double_scratch);
- bind(&done);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
}
-
-void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
- DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
-}
-
-
-void MacroAssembler::EnumLength(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
- ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
- SmiTag(dst);
-}
-
-
void MacroAssembler::LoadAccessor(Register dst, Register holder,
int accessor_index,
AccessorComponent accessor) {
@@ -3129,53 +2356,6 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
LoadP(dst, FieldMemOperand(dst, offset));
}
-
-void MacroAssembler::CheckEnumCache(Label* call_runtime) {
- Register null_value = r8;
- Register empty_fixed_array_value = r9;
- LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Label next, start;
- mr(r5, r3);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
-
- EnumLength(r6, r4);
- CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
- beq(call_runtime);
-
- LoadRoot(null_value, Heap::kNullValueRootIndex);
- b(&start);
-
- bind(&next);
- LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
-
- // For all objects but the receiver, check that the cache is empty.
- EnumLength(r6, r4);
- CmpSmiLiteral(r6, Smi::kZero, r0);
- bne(call_runtime);
-
- bind(&start);
-
- // Check that there are no elements. Register r5 contains the current JS
- // object we've reached through the prototype chain.
- Label no_elements;
- LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
- cmp(r5, empty_fixed_array_value);
- beq(&no_elements);
-
- // Second chance, the object may be using the empty slow element dictionary.
- CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
- bne(call_runtime);
-
- bind(&no_elements);
- LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
- cmp(r5, null_value);
- bne(&next);
-}
-
-
////////////////////////////////////////////////////////////////////////////////
//
// New MacroAssembler Interfaces added for PPC
@@ -3989,55 +3169,6 @@ void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
}
}
-void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Register scratch2_reg,
- Label* no_memento_found) {
- Label map_check;
- Label top_check;
- ExternalReference new_space_allocation_top_adr =
- ExternalReference::new_space_allocation_top_address(isolate());
- const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoLastWordOffset =
- kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
- Register mask = scratch2_reg;
-
- DCHECK(!AreAliased(receiver_reg, scratch_reg, mask));
-
- // Bail out if the object is not in new space.
- JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
-
- DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
- lis(mask, Operand((~Page::kPageAlignmentMask >> 16)));
- addi(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
-
- // If the object is in new space, we need to check whether it is on the same
- // page as the current top.
- mov(ip, Operand(new_space_allocation_top_adr));
- LoadP(ip, MemOperand(ip));
- Xor(r0, scratch_reg, Operand(ip));
- and_(r0, r0, mask, SetRC);
- beq(&top_check, cr0);
- // The object is on a different page than allocation top. Bail out if the
- // object sits on the page boundary as no memento can follow and we cannot
- // touch the memory following it.
- xor_(r0, scratch_reg, receiver_reg);
- and_(r0, r0, mask, SetRC);
- bne(no_memento_found, cr0);
- // Continue with the actual map check.
- b(&map_check);
- // If top is on the same page as the current object, we need to check whether
- // we are below top.
- bind(&top_check);
- cmp(scratch_reg, ip);
- bge(no_memento_found);
- // Memento map check.
- bind(&map_check);
- LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
- Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
- r0);
-}
-
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
@@ -4049,7 +3180,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
@@ -4129,28 +3260,6 @@ void CodePatcher::EmitCondition(Condition cond) {
masm_.emit(instr);
}
-
-void MacroAssembler::TruncatingDiv(Register result, Register dividend,
- int32_t divisor) {
- DCHECK(!dividend.is(result));
- DCHECK(!dividend.is(r0));
- DCHECK(!result.is(r0));
- base::MagicNumbersForDivision<uint32_t> mag =
- base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
- mov(r0, Operand(mag.multiplier));
- mulhw(result, dividend, r0);
- bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
- if (divisor > 0 && neg) {
- add(result, result, dividend);
- }
- if (divisor < 0 && !neg && mag.multiplier > 0) {
- sub(result, result, dividend);
- }
- if (mag.shift > 0) srawi(result, result, mag.shift);
- ExtractBit(r0, dividend, 31);
- add(result, result, r0);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 7577c762e8..ded1ec63ca 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -8,7 +8,6 @@
#include "src/assembler.h"
#include "src/bailout-reason.h"
#include "src/double.h"
-#include "src/frames.h"
#include "src/globals.h"
namespace v8 {
@@ -178,7 +177,7 @@ class TurboAssembler : public Assembler {
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type, Register base = no_reg,
int prologue_offset = 0);
- void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
+ void Prologue(Register base, int prologue_offset = 0);
// Push a standard frame, consisting of lr, fp, constant pool,
// context and JS function
@@ -316,6 +315,13 @@ class TurboAssembler : public Assembler {
void MultiPushDoubles(RegList dregs, Register location = sp);
void MultiPopDoubles(RegList dregs, Register location = sp);
+ void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+ void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);
@@ -408,6 +414,10 @@ class TurboAssembler : public Assembler {
Condition cond = al);
void Call(Label* target);
+ void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
+ Call(target, rmode);
+ }
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count);
@@ -705,11 +715,6 @@ class MacroAssembler : public TurboAssembler {
pointers_to_here_check_for_value);
}
- // Notify the garbage collector that we wrote a code entry into a
- // JSFunction. Only scratch is clobbered by the operation.
- void RecordWriteCodeEntryField(Register js_function, Register code_entry,
- Register scratch);
-
void RecordWriteForMap(Register object, Register map, Register dst,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
@@ -724,19 +729,10 @@ class MacroAssembler : public TurboAssembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
- void PopCommonFrame(Register marker_reg = no_reg);
-
- void PushObject(Handle<Object> handle);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
- // Store value in register src in the safepoint stack slot for
- // register dst.
- void StoreToSafepointRegisterSlot(Register src, Register dst);
- // Load the value of the src register from its safepoint stack slot
- // into register dst.
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
// Loads the constant pool pointer (kConstantPoolRegister).
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
@@ -747,10 +743,6 @@ class MacroAssembler : public TurboAssembler {
// Does not handle errors.
void FlushICache(Register address, size_t size, Register scratch);
-
-
-
-
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
// At least one slot (for the return address) should be provided.
@@ -764,8 +756,6 @@ class MacroAssembler : public TurboAssembler {
bool restore_context,
bool argument_count_is_length = false);
- void LoadContext(Register dst, int context_chain_length);
-
// Load the global object from the current context.
void LoadGlobalObject(Register dst) {
LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
@@ -838,8 +828,7 @@ class MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ const ParameterCount& actual, InvokeFlag flag);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -849,19 +838,14 @@ class MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function, Register new_target,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Register function, const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
-
- void IsObjectJSStringType(Register object, Register scratch, Label* fail);
+ const ParameterCount& actual, InvokeFlag flag);
void DebugBreak();
// Frame restart support
@@ -877,44 +861,6 @@ class MacroAssembler : public TurboAssembler {
void PopStackHandler();
// ---------------------------------------------------------------------------
- // Inline caching support
-
- void GetNumberHash(Register t0, Register scratch);
-
- inline void MarkCode(NopMarkerTypes type) { nop(type); }
-
- // Check if the given instruction is a 'type' marker.
- // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
- // These instructions are generated to mark special location in the code,
- // like some special IC code.
- static inline bool IsMarkedCode(Instr instr, int type) {
- DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
- return IsNop(instr, type);
- }
-
-
- static inline int GetCodeMarker(Instr instr) {
- int dst_reg_offset = 12;
- int dst_mask = 0xf << dst_reg_offset;
- int src_mask = 0xf;
- int dst_reg = (instr & dst_mask) >> dst_reg_offset;
- int src_reg = instr & src_mask;
- uint32_t non_register_mask = ~(dst_mask | src_mask);
- uint32_t mov_mask = al | 13 << 21;
-
- // Return <n> if we have a mov rn rn, else return -1.
- int type = ((instr & non_register_mask) == mov_mask) &&
- (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
- (dst_reg < LAST_CODE_MARKER)
- ? src_reg
- : -1;
- DCHECK((type == -1) ||
- ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
- return type;
- }
-
-
- // ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space or old space. The object_size is
@@ -927,39 +873,12 @@ class MacroAssembler : public TurboAssembler {
void Allocate(int object_size, Register result, Register scratch1,
Register scratch2, Label* gc_required, AllocationFlags flags);
- void Allocate(Register object_size, Register result, Register result_end,
- Register scratch, Label* gc_required, AllocationFlags flags);
-
- // Allocates a heap number or jumps to the gc_required label if the young
- // space is full and a scavenge is needed. All registers are clobbered also
- // when control continues at the gc_required label.
- void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
- Register heap_number_map, Label* gc_required,
- MutableMode mode = IMMUTABLE);
- void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
- Register scratch1, Register scratch2,
- Register heap_number_map,
- Label* gc_required);
-
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch1, Register scratch2,
Label* gc_required);
- // Initialize fields with filler values. |count| fields starting at
- // |current_address| are overwritten with the value in |filler|. At the end
- // the loop, |current_address| points at the next uninitialized field.
- // |count| is assumed to be non-zero.
- void InitializeNFieldsWithFiller(Register current_address, Register count,
- Register filler);
-
- // Initialize fields with filler values. Fields starting at |current_address|
- // not including |end_address| are overwritten with the value in |filler|. At
- // the end the loop, |current_address| takes the value of |end_address|.
- void InitializeFieldsWithFiller(Register current_address,
- Register end_address, Register filler);
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -1006,7 +925,6 @@ class MacroAssembler : public TurboAssembler {
void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
Label* fail, SmiCheckType smi_check_type);
-
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
@@ -1034,91 +952,14 @@ class MacroAssembler : public TurboAssembler {
bne(if_not_equal);
}
- // Load and check the instance type of an object for being a string.
- // Loads the type into the second argument register.
- // Returns a condition that will be enabled if the object was a string.
- Condition IsObjectStringType(Register obj, Register type) {
- LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
- lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
- andi(r0, type, Operand(kIsNotStringMask));
- DCHECK_EQ(0u, kStringTag);
- return eq;
- }
-
- // Get the number of least significant bits from a register
- void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
- void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
-
// Load the value of a smi object into a double register.
void SmiToDouble(DoubleRegister value, Register smi);
- // Check if a double can be exactly represented as a signed 32-bit integer.
- // CR_EQ in cr7 is set if true.
- void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
- Register scratch2, DoubleRegister double_scratch);
-
- // Check if a double is equal to -0.0.
- // CR_EQ in cr7 holds the result.
- void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
- Register scratch2);
-
- // Check the sign of a double.
- // CR_LT in cr7 holds the result.
- void TestDoubleSign(DoubleRegister input, Register scratch);
- void TestHeapNumberSign(Register input, Register scratch);
-
// Try to convert a double to a signed 32-bit integer.
// CR_EQ in cr7 is set and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
Register scratch, DoubleRegister double_scratch);
- // Floor a double and writes the value to the result register.
- // Go to exact if the conversion is exact (to be able to test -0),
- // fall through calling code if an overflow occurred, else go to done.
- // In return, input_high is loaded with high bits of input.
- void TryInt32Floor(Register result, DoubleRegister double_input,
- Register input_high, Register scratch,
- DoubleRegister double_scratch, Label* done, Label* exact);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- //
- // Only public for the test code in test-code-stubs-arm.cc.
- void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
- Label* done);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer.
- void TruncateDoubleToI(Register result, DoubleRegister double_input);
-
- // Performs a truncating conversion of a heap number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
- // must be different registers. Exits with 'result' holding the answer.
- void TruncateHeapNumberToI(Register result, Register object);
-
- // Converts the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
- // different registers.
- void TruncateNumberToI(Register object, Register result,
- Register heap_number_map, Register scratch1,
- Label* not_int32);
-
- // Overflow handling functions.
- // Usage: call the appropriate arithmetic function and then call one of the
- // flow control functions with the corresponding label.
-
- void BranchOnOverflow(Label* label) { blt(label, cr0); }
-
- void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
-
- void RetOnOverflow(void) { Ret(lt, cr0); }
-
- void RetOnNoOverflow(void) { Ret(ge, cr0); }
-
// ---------------------------------------------------------------------------
// Runtime calls
@@ -1152,9 +993,6 @@ class MacroAssembler : public TurboAssembler {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
- // Convenience function: call an external reference.
- void CallExternalReference(const ExternalReference& ext, int num_arguments);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
@@ -1164,10 +1002,6 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
- // Emit code for a truncating division by a constant. The dividend register is
- // unchanged and ip gets clobbered. Dividend and result must be different.
- void TruncatingDiv(Register result, Register dividend, int32_t divisor);
-
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1179,28 +1013,6 @@ class MacroAssembler : public TurboAssembler {
Register scratch2);
// ---------------------------------------------------------------------------
- // Number utilities
-
- // Check whether the value of reg is a power of two and not zero. If not
- // control continues at the label not_power_of_two. If reg is a power of two
- // the register scratch contains the value of (reg - 1) when control falls
- // through.
- void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
- Label* not_power_of_two_or_zero);
- // Check whether the value of reg is a power of two and not zero.
- // Control falls through if it is, with scratch containing the mask
- // value (reg - 1).
- // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
- // zero or negative, or jumps to the 'not_power_of_two' label if the value is
- // strictly positive but not a power of two.
- void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
- Label* zero_and_neg,
- Label* not_power_of_two);
-
-
-
-
- // ---------------------------------------------------------------------------
// Smi utilities
// Shift left by kSmiShift
@@ -1209,34 +1021,6 @@ class MacroAssembler : public TurboAssembler {
ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
}
-#if !V8_TARGET_ARCH_PPC64
- // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
- void SmiTagCheckOverflow(Register reg, Register overflow);
- void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
-
- inline void JumpIfNotSmiCandidate(Register value, Register scratch,
- Label* not_smi_label) {
- // High bits must be identical to fit into an Smi
- STATIC_ASSERT(kSmiShift == 1);
- addis(scratch, value, Operand(0x40000000u >> 16));
- cmpi(scratch, Operand::Zero());
- blt(not_smi_label);
- }
-#endif
- inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle any of the high bits being set in the value.
- TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
- scratch);
- }
- inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
- Label* not_smi_label) {
- TestUnsignedSmiCandidate(value, scratch);
- bne(not_smi_label, cr0);
- }
-
-
void SmiToPtrArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_PPC64
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
@@ -1247,82 +1031,15 @@ class MacroAssembler : public TurboAssembler {
#endif
}
- void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
-
- void SmiToShortArrayOffset(Register dst, Register src) {
-#if V8_TARGET_ARCH_PPC64
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
- ShiftRightArithImm(dst, src, kSmiShift - 1);
-#else
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
- if (!dst.is(src)) {
- mr(dst, src);
- }
-#endif
- }
-
- void SmiToIntArrayOffset(Register dst, Register src) {
-#if V8_TARGET_ARCH_PPC64
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
- ShiftRightArithImm(dst, src, kSmiShift - 2);
-#else
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
- ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
-#endif
- }
-
-#define SmiToFloatArrayOffset SmiToIntArrayOffset
-
- void SmiToDoubleArrayOffset(Register dst, Register src) {
-#if V8_TARGET_ARCH_PPC64
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
- ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
-#else
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
- ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
-#endif
- }
-
- void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
- if (kSmiShift < elementSizeLog2) {
- ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
- } else if (kSmiShift > elementSizeLog2) {
- ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
- } else if (!dst.is(src)) {
- mr(dst, src);
- }
- }
-
- void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
- bool isSmi) {
- if (isSmi) {
- SmiToArrayOffset(dst, src, elementSizeLog2);
- } else {
- ShiftLeftImm(dst, src, Operand(elementSizeLog2));
- }
- }
-
// Untag the source value into destination and jump if source is a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
-
- inline void TestIfPositiveSmi(Register value, Register scratch) {
-#if V8_TARGET_ARCH_PPC64
- rldicl(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), SetRC);
-#else
- rlwinm(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize),
- kBitsPerPointer - 1, SetRC);
-#endif
- }
-
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value, r0);
bne(not_smi_label, cr0);
}
- // Jump if either of the registers contain a non-smi.
- void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
@@ -1333,7 +1050,7 @@ class MacroAssembler : public TurboAssembler {
#if V8_TARGET_ARCH_PPC64
- // Ensure it is permissable to read/write int value directly from
+ // Ensure it is permissible to read/write int value directly from
// upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -1361,16 +1078,6 @@ class MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
- // Abort execution if reg is not the root value with the given index,
- // enabled via --debug-code.
- void AssertIsRoot(Register reg, Heap::RootListIndex index);
-
- // ---------------------------------------------------------------------------
- // HeapNumber utilities
-
- void JumpIfNotHeapNumber(Register object, Register heap_number_map,
- Register scratch, Label* on_not_heap_number);
-
// ---------------------------------------------------------------------------
// String utilities
@@ -1382,13 +1089,6 @@ class MacroAssembler : public TurboAssembler {
Register scratch2,
Label* failure);
- // Checks if both objects are sequential one-byte strings and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
- Register scratch1,
- Register scratch2,
- Label* not_flat_one_byte_strings);
-
// Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialOneByte(
@@ -1397,29 +1097,10 @@ class MacroAssembler : public TurboAssembler {
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
- void EmitSeqStringSetCharCheck(Register string, Register index,
- Register value, uint32_t encoding_mask);
-
// ---------------------------------------------------------------------------
// Patching helpers.
- // Decode offset from constant pool load instruction(s).
- // Caller must place the instruction word at <location> in <result>.
- void DecodeConstantPoolOffset(Register result, Register location);
-
- void ClampUint8(Register output_reg, Register input_reg);
-
- // Saturate a value into 8-bit unsigned integer
- // if input_value < 0, output_value is 0
- // if input_value > 255, output_value is 255
- // otherwise output_value is the (int)input_value (round to nearest)
- void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
- DoubleRegister temp_double_reg);
-
-
void LoadInstanceDescriptors(Register map, Register descriptors);
- void EnumLength(Register dst, Register map);
- void NumberOfOwnDescriptors(Register dst, Register map);
void LoadAccessor(Register dst, Register holder, int accessor_index,
AccessorComponent accessor);
@@ -1434,59 +1115,16 @@ class MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg, rc);
}
- template <typename Field>
- void DecodeFieldToSmi(Register dst, Register src) {
-#if V8_TARGET_ARCH_PPC64
- DecodeField<Field>(dst, src);
- SmiTag(dst);
-#else
- // 32-bit can do this in one instruction:
- int start = Field::kSize + kSmiShift - 1;
- int end = kSmiShift;
- int rotate = kSmiShift - Field::kShift;
- if (rotate < 0) {
- rotate += kBitsPerPointer;
- }
- rlwinm(dst, src, rotate, kBitsPerPointer - start - 1,
- kBitsPerPointer - end - 1);
-#endif
- }
-
- template <typename Field>
- void DecodeFieldToSmi(Register reg) {
- DecodeFieldToSmi<Field>(reg, reg);
- }
-
- // Load the type feedback vector from a JavaScript frame.
- void EmitLoadFeedbackVector(Register vector);
-
-
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
- // Expects object in r3 and returns map with validated enum cache
- // in r3. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Label* call_runtime);
-
- // AllocationMemento support. Arrays may have an associated
- // AllocationMemento object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, condition flags are set to eq.
- void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Register scratch2_reg,
- Label* no_memento_found);
-
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
- bool* definitely_mismatches, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ bool* definitely_mismatches, InvokeFlag flag);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch,
@@ -1504,9 +1142,6 @@ class MacroAssembler : public TurboAssembler {
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
- MemOperand SafepointRegisterSlot(Register reg);
- MemOperand SafepointRegistersAndDoublesSlot(Register reg);
-
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 0face8c562..b643004aa3 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -13,7 +13,7 @@
#include "src/codegen.h"
#include "src/disasm.h"
#include "src/ppc/constants-ppc.h"
-#include "src/ppc/frames-ppc.h"
+#include "src/ppc/frame-constants-ppc.h"
#include "src/ppc/simulator-ppc.h"
#include "src/runtime/runtime-utils.h"
@@ -23,7 +23,7 @@
namespace v8 {
namespace internal {
-const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+const auto GetRegConfig = RegisterConfiguration::Default;
// static
base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
@@ -4059,7 +4059,7 @@ void Simulator::Execute() {
}
} else {
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
- // we reach the particular instuction count.
+ // we reach the particular instruction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index 92da0d5811..aba6c3671b 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -402,7 +402,7 @@ class Simulator {
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];
- // Syncronization primitives. See ARM DDI 0406C.b, A2.9.
+ // Synchronization primitives. See ARM DDI 0406C.b, A2.9.
enum class MonitorAccess {
Open,
Exclusive,