summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64/code-stubs-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm64/code-stubs-arm64.cc')
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc738
1 files changed, 46 insertions, 692 deletions
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 2a994fca01..1ad50e5112 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -9,12 +9,10 @@
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/codegen.h"
#include "src/counters.h"
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/heap/heap-inl.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@@ -41,35 +39,21 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label done;
- Register input = source();
Register result = destination();
- DCHECK(is_truncating());
DCHECK(result.Is64Bits());
DCHECK(jssp.Is(masm->StackPointer()));
- int double_offset = offset();
-
- DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
- Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
- Register scratch2 =
- GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
-
- __ Push(scratch1, scratch2);
- // Account for saved regs if input is jssp.
- if (input.is(jssp)) double_offset += 2 * kPointerSize;
-
- if (!skip_fastpath()) {
- __ Push(double_scratch);
- if (input.is(jssp)) double_offset += 1 * kDoubleSize;
- __ Ldr(double_scratch, MemOperand(input, double_offset));
- // Try to convert with a FPU convert instruction. This handles all
- // non-saturating cases.
- __ TryConvertDoubleToInt64(result, double_scratch, &done);
- __ Fmov(result, double_scratch);
- } else {
- __ Ldr(result, MemOperand(input, double_offset));
- }
+ UseScratchRegisterScope temps(masm);
+ Register scratch1 = temps.AcquireX();
+ Register scratch2 = temps.AcquireX();
+ DoubleRegister double_scratch = temps.AcquireD();
+
+ __ Peek(double_scratch, 0);
+ // Try to convert with a FPU convert instruction. This handles all
+ // non-saturating cases.
+ __ TryConvertDoubleToInt64(result, double_scratch, &done);
+ __ Fmov(result, double_scratch);
// If we reach here we need to manually convert the input to an int32.
@@ -110,55 +94,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Lsl(result, mantissa, exponent);
__ Bind(&done);
- if (!skip_fastpath()) {
- __ Pop(double_scratch);
- }
- __ Pop(scratch2, scratch1);
- __ Ret();
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- CPURegList saved_regs = kCallerSaved;
- CPURegList saved_fp_regs = kCallerSavedV;
-
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
-
- // We don't care if MacroAssembler scratch registers are corrupted.
- saved_regs.Remove(*(masm->TmpList()));
- saved_fp_regs.Remove(*(masm->FPTmpList()));
- DCHECK_EQ(saved_regs.Count() % 2, 0);
- DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
-
- __ PushCPURegList(saved_regs);
- if (save_doubles()) {
- __ PushCPURegList(saved_fp_regs);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(x0, ExternalReference::isolate_address(isolate()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
-
- if (save_doubles()) {
- __ PopCPURegList(saved_fp_regs);
- }
- __ PopCPURegList(saved_regs);
__ Ret();
}
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
// Stack on entry:
// jssp[0]: Exponent (as a tagged value).
@@ -284,10 +223,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the following stubs are generated in this order
// because pregenerated stubs can only call other pregenerated stubs.
- // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
- // CEntryStub.
CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -299,8 +235,7 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
USE(isolate);
}
-
-bool CEntryStub::NeedsImmovableCode() {
+Movability CEntryStub::NeedsImmovableCode() {
// CEntryStub stores the return address on the stack before calling into
// C++ code. In some cases, the VM accesses this address, but it is not used
// when the C++ code returns to the stub because LR holds the return address
@@ -309,7 +244,7 @@ bool CEntryStub::NeedsImmovableCode() {
// TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
// find any comment to confirm this, and I don't hit any crashes whatever
// this function returns. The anaylsis should be properly confirmed.
- return true;
+ return kImmovable;
}
@@ -369,7 +304,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// - Adjust for the arg[] array.
Register temp_argv = x11;
if (!argv_in_register()) {
- __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ __ SlotAddress(temp_argv, x0);
// - Adjust for the receiver.
__ Sub(temp_argv, temp_argv, 1 * kPointerSize);
}
@@ -480,11 +415,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Peek(argc, 2 * kPointerSize);
__ Peek(target, 3 * kPointerSize);
- __ LeaveExitFrame(save_doubles(), x10, true);
+ __ LeaveExitFrame(save_doubles(), x10);
DCHECK(jssp.Is(__ StackPointer()));
if (!argv_in_register()) {
// Drop the remaining stack slots and return from the stub.
- __ Drop(x11);
+ __ DropArguments(x11);
}
__ AssertFPCRState();
__ Ret();
@@ -498,10 +433,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- IsolateAddressId::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_entrypoint_address(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@@ -543,12 +476,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Bind(&not_js_frame);
// Compute the handler entry address and jump to it.
- __ Mov(x10, Operand(pending_handler_code_address));
+ __ Mov(x10, Operand(pending_handler_entrypoint_address));
__ Ldr(x10, MemOperand(x10));
- __ Mov(x11, Operand(pending_handler_offset_address));
- __ Ldr(x11, MemOperand(x11));
- __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
- __ Add(x10, x10, x11);
__ Br(x10);
}
@@ -610,7 +539,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Select between the inner and outermost frame marker, based on the JS entry
// sp. We assert that the inner marker is zero, so we can use xzr to save a
// move instruction.
- DCHECK(StackFrame::INNER_JSENTRY_FRAME == 0);
+ DCHECK_EQ(StackFrame::INNER_JSENTRY_FRAME, 0);
__ Cmp(x11, 0); // If x11 is zero, this is the outermost frame.
__ Csel(x12, xzr, StackFrame::OUTERMOST_JSENTRY_FRAME, ne);
__ B(ne, &done);
@@ -738,371 +667,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-void StringHelper::GenerateFlatOneByteStringEquals(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3) {
- DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
- Register result = x0;
- Register left_length = scratch1;
- Register right_length = scratch2;
-
- // Compare lengths. If lengths differ, strings can't be equal. Lengths are
- // smis, and don't need to be untagged.
- Label strings_not_equal, check_zero_length;
- __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
- __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
- __ Cmp(left_length, right_length);
- __ B(eq, &check_zero_length);
-
- __ Bind(&strings_not_equal);
- __ Mov(result, Smi::FromInt(NOT_EQUAL));
- __ Ret();
-
- // Check if the length is zero. If so, the strings must be equal (and empty.)
- Label compare_chars;
- __ Bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ Cbnz(left_length, &compare_chars);
- __ Mov(result, Smi::FromInt(EQUAL));
- __ Ret();
-
- // Compare characters. Falls through if all characters are equal.
- __ Bind(&compare_chars);
- GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
- scratch3, &strings_not_equal);
-
- // Characters in strings are equal.
- __ Mov(result, Smi::FromInt(EQUAL));
- __ Ret();
-}
-
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3, Register scratch4) {
- DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
- Label result_not_equal, compare_lengths;
-
- // Find minimum length and length difference.
- Register length_delta = scratch3;
- __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ Subs(length_delta, scratch1, scratch2);
-
- Register min_length = scratch1;
- __ Csel(min_length, scratch2, scratch1, gt);
- __ Cbz(min_length, &compare_lengths);
-
- // Compare loop.
- GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
- scratch4, &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ Bind(&compare_lengths);
-
- DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
-
- // Use length_delta as result if it's zero.
- Register result = x0;
- __ Subs(result, length_delta, 0);
-
- __ Bind(&result_not_equal);
- Register greater = x10;
- Register less = x11;
- __ Mov(greater, Smi::FromInt(GREATER));
- __ Mov(less, Smi::FromInt(LESS));
- __ CmovX(result, greater, gt);
- __ CmovX(result, less, lt);
- __ Ret();
-}
-
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch1, Register scratch2, Label* chars_not_equal) {
- DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
-
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ Add(left, left, scratch1);
- __ Add(right, right, scratch1);
-
- Register index = length;
- __ Neg(index, length); // index = -length;
-
- // Compare loop
- Label loop;
- __ Bind(&loop);
- __ Ldrb(scratch1, MemOperand(left, index));
- __ Ldrb(scratch2, MemOperand(right, index));
- __ Cmp(scratch1, scratch2);
- __ B(ne, chars_not_equal);
- __ Add(index, index, 1);
- __ Cbnz(index, &loop);
-}
-
-
-RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
- Register address,
- Register scratch)
- : object_(object),
- address_(address),
- scratch0_(scratch),
- saved_regs_(kCallerSaved),
- saved_fp_regs_(kCallerSavedV) {
- DCHECK(!AreAliased(scratch, object, address));
-
- // The SaveCallerSaveRegisters method needs to save caller-saved
- // registers, but we don't bother saving MacroAssembler scratch registers.
- saved_regs_.Remove(MacroAssembler::DefaultTmpList());
- saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
-
- // We would like to require more scratch registers for this stub,
- // but the number of registers comes down to the ones used in
- // FullCodeGen::SetVar(), which is architecture independent.
- // We allocate 2 extra scratch registers that we'll save on the stack.
- CPURegList pool_available = GetValidRegistersForAllocation();
- CPURegList used_regs(object, address, scratch);
- pool_available.Remove(used_regs);
- scratch1_ = pool_available.PopLowestIndex().Reg();
- scratch2_ = pool_available.PopLowestIndex().Reg();
-
- // The scratch registers will be restored by other means so we don't need
- // to save them with the other caller saved registers.
- saved_regs_.Remove(scratch0_);
- saved_regs_.Remove(scratch1_);
- saved_regs_.Remove(scratch2_);
-}
-
-RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
- // Find the mode depending on the first two instructions.
- Instruction* instr1 =
- reinterpret_cast<Instruction*>(stub->instruction_start());
- Instruction* instr2 = instr1->following();
-
- if (instr1->IsUncondBranchImm()) {
- DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
- return INCREMENTAL;
- }
-
- DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
-
- if (instr2->IsUncondBranchImm()) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(instr2->IsPCRelAddressing());
-
- return STORE_BUFFER_ONLY;
-}
-
-// We patch the two first instructions of the stub back and forth between an
-// adr and branch when we start and stop incremental heap marking.
-// The branch is
-// b label
-// The adr is
-// adr xzr label
-// so effectively a nop.
-void RecordWriteStub::Patch(Code* stub, Mode mode) {
- // We are going to patch the two first instructions of the stub.
- PatchingAssembler patcher(stub->GetIsolate(), stub->instruction_start(), 2);
- Instruction* instr1 = patcher.InstructionAt(0);
- Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
- // Instructions must be either 'adr' or 'b'.
- DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
- DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
- // Retrieve the offsets to the labels.
- auto offset_to_incremental_noncompacting =
- static_cast<int32_t>(instr1->ImmPCOffset());
- auto offset_to_incremental_compacting =
- static_cast<int32_t>(instr2->ImmPCOffset());
-
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- patcher.adr(xzr, offset_to_incremental_noncompacting);
- patcher.adr(xzr, offset_to_incremental_compacting);
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
- patcher.adr(xzr, offset_to_incremental_compacting);
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- patcher.adr(xzr, offset_to_incremental_noncompacting);
- patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
- break;
- }
- DCHECK(GetMode(stub) == mode);
-}
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- // We need some extra registers for this stub, they have been allocated
- // but we need to save them before using them.
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- Register val = regs_.scratch0();
- __ Ldr(val, MemOperand(regs_.address()));
- __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm); // Restore the extra scratch registers we used.
-
- __ RememberedSetHelper(object(), address(),
- value(), // scratch1
- save_fp_regs_mode());
-
- __ Bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm); // Restore the extra scratch registers we used.
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- Register address =
- x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
- DCHECK(!address.Is(regs_.object()));
- DCHECK(!address.Is(x0));
- __ Mov(address, regs_.address());
- __ Mov(x0, regs_.object());
- __ Mov(x1, address);
- __ Mov(x2, ExternalReference::isolate_address(isolate()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- ExternalReference function =
- ExternalReference::incremental_marking_record_write_function(
- isolate());
- __ CallCFunction(function, 3, 0);
-
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-void RecordWriteStub::Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-}
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
-#ifndef V8_CONCURRENT_MARKING
- Label on_black;
- // If the object is not black we don't have to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm); // Restore the extra scratch registers we used.
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(),
- value(), // scratch1
- save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ Bind(&on_black);
-#endif
-
- // Get the value from the slot.
- Register val = regs_.scratch0();
- __ Ldr(val, MemOperand(regs_.address()));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlagClear(val, regs_.scratch1(),
- MemoryChunk::kEvacuationCandidateMask,
- &ensure_not_white);
-
- __ CheckPageFlagClear(regs_.object(),
- regs_.scratch1(),
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- &need_incremental);
-
- __ Bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.address(), regs_.object());
- __ JumpIfWhite(val,
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- regs_.scratch2(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm); // Restore the extra scratch registers we used.
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(),
- value(), // scratch1
- save_fp_regs_mode());
- } else {
- __ Ret();
- }
-
- __ Bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ Bind(&need_incremental);
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // We patch these two first instructions back and forth between a nop and
- // real branch when we start and stop incremental heap marking.
- // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
- // are generated.
- // See RecordWriteStub::Patch for details.
- {
- InstructionAccurateScope scope(masm, 2);
- __ adr(xzr, &skip_to_incremental_noncompacting);
- __ adr(xzr, &skip_to_incremental_compacting);
- }
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(),
- value(), // scratch1
- save_fp_regs_mode());
- }
- __ Ret();
-
- __ Bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ Bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-}
-
-
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@@ -1110,21 +674,21 @@ static const unsigned int kProfileEntryHookCallSize =
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != NULL) {
+ if (tasm->isolate()->function_entry_hook() != nullptr) {
Assembler::BlockConstPoolScope no_const_pools(tasm);
DontEmitDebugCodeScope no_debug_code(tasm);
Label entry_hook_call_start;
tasm->Bind(&entry_hook_call_start);
tasm->Push(padreg, lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- DCHECK(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
- kProfileEntryHookCallSize);
+ DCHECK_EQ(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start),
+ kProfileEntryHookCallSize);
tasm->Pop(lr, padreg);
}
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
+ if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
Assembler::BlockConstPoolScope no_const_pools(masm);
DontEmitDebugCodeScope no_debug_code(masm);
@@ -1132,8 +696,8 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
__ Bind(&entry_hook_call_start);
__ Push(padreg, lr);
__ CallStub(&stub);
- DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
- kProfileEntryHookCallSize);
+ DCHECK_EQ(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start),
+ kProfileEntryHookCallSize);
__ Pop(lr, padreg);
}
}
@@ -1170,7 +734,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The caller's return address is above the saved temporaries.
// Grab its location for the second argument to the hook.
- __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
+ __ SlotAddress(x1, kNumSavedRegs);
{
// Create a dummy frame, as CallCFunction requires this.
@@ -1218,164 +782,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
__ Blr(lr);
}
-void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0) {
- DCHECK(!AreAliased(receiver, properties, scratch0));
- DCHECK(name->IsUniqueName());
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
- __ Sub(index, index, 1);
- __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- Register tmp = index;
- __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
- __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
-
- // Stop if found the property.
- __ Cmp(entity_name, Operand(name));
- __ B(eq, miss);
-
- Label good;
- __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
-
- // Check if the entry name is not a unique name.
- __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ Ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
- __ Bind(&good);
- }
-
- CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
- spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
- spill_list.Combine(lr);
- spill_list.Combine(padreg); // Add padreg to make the list of even length.
- DCHECK_EQ(spill_list.Count() % 2, 0);
-
- __ PushCPURegList(spill_list);
-
- __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
- __ Mov(x1, Operand(name));
- NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- // Move stub return value to scratch0. Note that scratch0 is not included in
- // spill_list and won't be clobbered by PopCPURegList.
- __ Mov(scratch0, x0);
- __ PopCPURegList(spill_list);
-
- __ Cbz(scratch0, done);
- __ B(miss);
-}
-
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- //
- // Arguments are in x0 and x1:
- // x0: property dictionary.
- // x1: the name of the property we are looking for.
- //
- // Return value is in x0 and is zero if lookup failed, non zero otherwise.
- // If the lookup is successful, x2 will contains the index of the entry.
-
- Register result = x0;
- Register dictionary = x0;
- Register key = x1;
- Register index = x2;
- Register mask = x3;
- Register hash = x4;
- Register undefined = x5;
- Register entry_key = x6;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
- __ Sub(mask, mask, 1);
-
- __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Add(index, hash,
- NameDictionary::GetProbeOffset(i) << Name::kHashShift);
- } else {
- __ Mov(index, hash);
- }
- __ And(index, mask, Operand(index, LSR, Name::kHashShift));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
- __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ Cmp(entry_key, undefined);
- __ B(eq, &not_in_dictionary);
-
- // Stop if found the property.
- __ Cmp(entry_key, key);
- __ B(eq, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a unique name.
- __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
- }
- }
-
- __ Bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup, probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ Mov(result, 0);
- __ Ret();
- }
-
- __ Bind(&in_dictionary);
- __ Mov(result, 1);
- __ Ret();
-
- __ Bind(&not_in_dictionary);
- __ Mov(result, 0);
- __ Ret();
-}
-
-
template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
@@ -1562,7 +968,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
@@ -1571,7 +977,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// We should either have undefined in the allocation_site register or a
// valid AllocationSite.
- __ AssertUndefinedOrAllocationSite(allocation_site, x10);
+ __ AssertUndefinedOrAllocationSite(allocation_site);
}
// Enter the context of the Array function.
@@ -1659,7 +1065,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
+ // Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
@@ -1710,8 +1116,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Register function_address,
ExternalReference thunk_ref,
int stack_space, int spill_offset,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
+ MemOperand return_value_operand) {
ASM_LOCATION("CallApiFunctionAndReturn");
Isolate* isolate = masm->isolate();
ExternalReference next_address =
@@ -1813,12 +1218,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Peek(x21, (spill_offset + 2) * kXRegSize);
__ Peek(x22, (spill_offset + 3) * kXRegSize);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ Ldr(cp, *context_restore_operand);
- }
-
- __ LeaveExitFrame(false, x1, !restore_context);
+ __ LeaveExitFrame(false, x1);
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
@@ -1848,7 +1248,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : callee
// -- x4 : call_data
// -- x2 : holder
// -- x1 : api_function_address
@@ -1858,21 +1257,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1) * 8] : first argument
// -- sp[argc * 8] : receiver
- // -- sp[(argc + 1) * 8] : accessor_holder
// -----------------------------------
- Register callee = x0;
Register call_data = x4;
Register holder = x2;
Register api_function_address = x1;
- Register context = cp;
typedef FunctionCallbackArguments FCA;
- STATIC_ASSERT(FCA::kArgsLength == 8);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@@ -1882,8 +1276,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
Register undef = x7;
__ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
- // Push new target, context, callee and call data.
- __ Push(undef, context, callee, call_data);
+ // Push new target, call data.
+ __ Push(undef, call_data);
Register isolate_reg = x5;
__ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
@@ -1892,40 +1286,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// return value, return value default, isolate, holder.
__ Push(undef, undef, isolate_reg, holder);
- // Enter a new context.
- if (is_lazy()) {
- // ----------- S t a t e -------------------------------------
- // -- sp[0] : holder
- // -- ...
- // -- sp[(FCA::kArgsLength - 1) * 8] : new_target
- // -- sp[FCA::kArgsLength * 8] : last argument
- // -- ...
- // -- sp[(FCA::kArgsLength + argc - 1) * 8] : first argument
- // -- sp[(FCA::kArgsLength + argc) * 8] : receiver
- // -- sp[(FCA::kArgsLength + argc + 1) * 8] : accessor_holder
- // -----------------------------------------------------------
-
- // Load context from accessor_holder.
- Register accessor_holder = context;
- Register scratch = undef;
- Register scratch2 = callee;
- __ Ldr(accessor_holder,
- MemOperand(__ StackPointer(),
- (FCA::kArgsLength + 1 + argc()) * kPointerSize));
- // Look for the constructor if |accessor_holder| is not a function.
- Label skip_looking_for_constructor;
- __ Ldr(scratch, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
- __ Ldrb(scratch2, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ Tst(scratch2, Operand(1 << Map::kIsConstructor));
- __ B(ne, &skip_looking_for_constructor);
- __ GetMapConstructor(context, scratch, scratch, scratch2);
- __ Bind(&skip_looking_for_constructor);
- __ Ldr(context, FieldMemOperand(context, JSFunction::kContextOffset));
- } else {
- // Load context from callee.
- __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
- }
-
// Prepare arguments.
Register args = x6;
__ Mov(args, masm->StackPointer());
@@ -1944,7 +1304,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
DCHECK(!AreAliased(x0, api_function_address));
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
- __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
+ __ SlotAddress(x0, 1);
// FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
__ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
@@ -1956,25 +1316,19 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
- int return_value_offset = 0;
- if (is_store()) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
+ int return_value_offset = 2 + FCA::kReturnValueOffset;
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
// The number of arguments might be odd, but will be padded when calling the
- // stub. We do not round up stack_space here, this will be done in
- // CallApiFunctionAndReturn.
- const int stack_space = argc() + FCA::kArgsLength + 2;
- DCHECK_EQ((stack_space - argc()) % 2, 0);
+ // stub. We do not round up stack_space to account for odd argc here, this
+ // will be done in CallApiFunctionAndReturn.
+ const int stack_space = (argc() + 1) + FCA::kArgsLength;
+
+ // The current frame needs to be aligned.
+ DCHECK_EQ((stack_space - (argc() + 1)) % 2, 0);
const int spill_offset = 1 + kApiStackSpace;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- spill_offset, return_value_operand,
- &context_restore_operand);
+ spill_offset, return_value_operand);
}
@@ -2033,7 +1387,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
__ Poke(x1, 1 * kPointerSize);
- __ Add(x1, masm->StackPointer(), 1 * kPointerSize);
+ __ SlotAddress(x1, 1);
// x1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
@@ -2051,7 +1405,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, spill_offset,
- return_value_operand, NULL);
+ return_value_operand);
}
#undef __