summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/arm64/builtins-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/arm64/builtins-arm64.cc')
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc858
1 files changed, 438 insertions, 420 deletions
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 8fadff4768..9c7397897a 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -11,6 +11,8 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
#include "src/objects/cell.h"
@@ -50,7 +52,8 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
- __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadTaggedPointerField(
+ x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
__ Tst(x10, kSmiTagMask);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CompareObjectType(x10, x11, x12, MAP_TYPE);
@@ -140,7 +143,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Tbnz(slot_count_without_rounding, 0, &already_aligned);
// Store padding, if needed.
- __ Str(padreg, MemOperand(x2, 1 * kPointerSize));
+ __ Str(padreg, MemOperand(x2, 1 * kSystemPointerSize));
__ Bind(&already_aligned);
// Copy arguments to the expression stack.
@@ -155,26 +158,26 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- x0: number of arguments (untagged)
- // -- x1: constructor function
- // -- x3: new target
+ // -- x0: number of arguments (untagged)
+ // -- x1: constructor function
+ // -- x3: new target
// If argc is odd:
- // -- sp[0*kPointerSize]: argument n - 1
+ // -- sp[0*kSystemPointerSize]: argument n - 1
// -- ...
- // -- sp[(n-1)*kPointerSize]: argument 0
- // -- sp[(n+0)*kPointerSize]: the hole (receiver)
- // -- sp[(n+1)*kPointerSize]: padding
- // -- sp[(n+2)*kPointerSize]: padding
- // -- sp[(n+3)*kPointerSize]: number of arguments (tagged)
- // -- sp[(n+4)*kPointerSize]: context (pushed by FrameScope)
+ // -- sp[(n-1)*kSystemPointerSize]: argument 0
+ // -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
+ // -- sp[(n+1)*kSystemPointerSize]: padding
+ // -- sp[(n+2)*kSystemPointerSize]: padding
+ // -- sp[(n+3)*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[(n+4)*kSystemPointerSize]: context (pushed by FrameScope)
// If argc is even:
- // -- sp[0*kPointerSize]: argument n - 1
+ // -- sp[0*kSystemPointerSize]: argument n - 1
// -- ...
- // -- sp[(n-1)*kPointerSize]: argument 0
- // -- sp[(n+0)*kPointerSize]: the hole (receiver)
- // -- sp[(n+1)*kPointerSize]: padding
- // -- sp[(n+2)*kPointerSize]: number of arguments (tagged)
- // -- sp[(n+3)*kPointerSize]: context (pushed by FrameScope)
+ // -- sp[(n-1)*kSystemPointerSize]: argument 0
+ // -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
+ // -- sp[(n+1)*kSystemPointerSize]: padding
+ // -- sp[(n+2)*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[(n+3)*kSystemPointerSize]: context (pushed by FrameScope)
// -----------------------------------
// Call the function.
@@ -203,13 +206,13 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
- Label enough_stack_space;
+
__ LoadRoot(scratch, RootIndex::kRealStackLimit);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ Sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
- __ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ __ Cmp(scratch, Operand(num_args, LSL, kSystemPointerSizeLog2));
__ B(le, stack_overflow);
#if defined(V8_OS_WIN)
@@ -218,7 +221,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Label chkstk, chkstk_done;
Register probe = temps.AcquireX();
- __ Sub(scratch, sp, Operand(num_args, LSL, kPointerSizeLog2));
+ __ Sub(scratch, sp, Operand(num_args, LSL, kSystemPointerSizeLog2));
__ Mov(probe, sp);
// Loop start of stack probe.
@@ -265,18 +268,19 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(x0, x1, padreg, x3);
// ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: new target
- // -- sp[1*kPointerSize]: padding
- // -- x1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context (pushed by FrameScope)
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- x1 and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
// -----------------------------------
- __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
- __ TestAndBranchIfAnySet(w4,
- SharedFunctionInfo::IsDerivedConstructorBit::kMask,
- &not_create_implicit_receiver);
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
+ __ JumpIfIsInRange(w4, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -290,12 +294,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LoadRoot(x0, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
- // -- x0: receiver
- // -- Slot 4 / sp[0*kPointerSize]: new target
- // -- Slot 3 / sp[1*kPointerSize]: padding
- // -- Slot 2 / sp[2*kPointerSize]: constructor function
- // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[4*kPointerSize]: context
+ // -- x0: receiver
+ // -- Slot 4 / sp[0*kSystemPointerSize]: new target
+ // -- Slot 3 / sp[1*kSystemPointerSize]: padding
+ // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -304,7 +308,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Bind(&post_instantiation_deopt_entry);
// Restore new target from the top of the stack.
- __ Peek(x3, 0 * kPointerSize);
+ __ Peek(x3, 0 * kSystemPointerSize);
// Restore constructor function and argument count.
__ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
@@ -326,15 +330,16 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(x0, x0);
// ----------- S t a t e -------------
- // -- x3: new target
- // -- x12: number of arguments (untagged)
- // -- sp[0*kPointerSize]: implicit receiver (overwrite if argc odd)
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: implicit receiver
- // -- sp[3*kPointerSize]: padding
- // -- x1 and sp[4*kPointerSize]: constructor function
- // -- sp[5*kPointerSize]: number of arguments (tagged)
- // -- sp[6*kPointerSize]: context
+ // -- x3: new target
+ // -- x12: number of arguments (untagged)
+ // -- sp[0*kSystemPointerSize]: implicit receiver (overwrite if argc
+ // odd)
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: implicit receiver
+ // -- sp[3*kSystemPointerSize]: padding
+ // -- x1 and sp[4*kSystemPointerSize]: constructor function
+ // -- sp[5*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[6*kSystemPointerSize]: context
// -----------------------------------
// Round the number of arguments down to the next even number, and claim
@@ -373,11 +378,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
// ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: padding
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -414,7 +419,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ Bind(&use_receiver);
- __ Peek(x0, 0 * kPointerSize);
+ __ Peek(x0, 0 * kSystemPointerSize);
__ CompareRoot(x0, RootIndex::kTheHoleValue);
__ B(eq, &do_throw);
@@ -443,8 +448,9 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
Label done;
__ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
__ B(ne, &done);
- __ Ldr(sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ LoadTaggedPointerField(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ Bind(&done);
}
@@ -458,13 +464,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(x1);
// Store input value into generator object.
- __ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ StoreTaggedField(
+ x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
- __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
- __ Ldr(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(cp,
+ FieldMemOperand(x4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -491,7 +500,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
- __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(w10, FieldMemOperand(
x10, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -502,11 +512,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store padding (which might be replaced by the receiver).
__ Sub(x11, x11, 1);
- __ Poke(padreg, Operand(x11, LSL, kPointerSizeLog2));
+ __ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
// Poke receiver into highest claimed slot.
- __ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
- __ Poke(x5, Operand(x10, LSL, kPointerSizeLog2));
+ __ LoadTaggedPointerField(
+ x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
+ __ Poke(x5, Operand(x10, LSL, kSystemPointerSizeLog2));
// ----------- S t a t e -------------
// -- x1 : the JSGeneratorObject to resume
@@ -520,8 +531,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Copy the function arguments from the generator object's register file.
- __ Ldr(x5,
- FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
+ __ LoadTaggedPointerField(
+ x5,
+ FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done;
__ Cbz(x10, &done);
@@ -529,9 +541,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Bind(&loop);
__ Sub(x10, x10, 1);
- __ Add(x11, x5, Operand(x12, LSL, kPointerSizeLog2));
- __ Ldr(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
- __ Poke(x11, Operand(x10, LSL, kPointerSizeLog2));
+ __ Add(x11, x5, Operand(x12, LSL, kTaggedSizeLog2));
+ __ LoadAnyTaggedField(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
+ __ Poke(x11, Operand(x10, LSL, kSystemPointerSizeLog2));
__ Add(x12, x12, 1);
__ Cbnz(x10, &loop);
__ Bind(&done);
@@ -539,8 +551,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
- __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, x3, x0);
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
@@ -548,7 +562,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
- __ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(w0, FieldMemOperand(
x0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
@@ -557,7 +572,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Mov(x3, x1);
__ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ JumpCodeObject(x2);
}
@@ -569,7 +584,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(x1, padreg, x4, x5);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(padreg, x1);
- __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -579,7 +595,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(x1, padreg);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(padreg, x1);
- __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -650,7 +667,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
- Label non_outermost_js, done;
+ Label done;
ExternalReference js_entry_sp = ExternalReference::Create(
IsolateAddressId::kJSEntrySPAddress, masm->isolate());
__ Mov(x10, js_entry_sp);
@@ -709,9 +726,9 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Bind(&invoke);
// Push new stack handler.
- static_assert(StackHandlerConstants::kSize == 2 * kPointerSize,
+ static_assert(StackHandlerConstants::kSize == 2 * kSystemPointerSize,
"Unexpected offset for StackHandlerConstants::kSize");
- static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
+ static_assert(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize,
"Unexpected offset for StackHandlerConstants::kNextOffset");
// Link the current handler as the next handler.
@@ -740,7 +757,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
// Pop the stack handler and unlink this frame from the handler chain.
- static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
+ static_assert(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize,
"Unexpected offset for StackHandlerConstants::kNextOffset");
__ Pop(x10, padreg);
__ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
@@ -763,7 +780,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
Label non_outermost_js_2;
{
Register c_entry_fp = x11;
- __ PeekPair(x10, c_entry_fp, 1 * kPointerSize);
+ __ PeekPair(x10, c_entry_fp, 1 * kSystemPointerSize);
__ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
__ B(ne, &non_outermost_js_2);
__ Mov(x12, js_entry_sp);
@@ -777,9 +794,10 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
}
// Reset the stack to the callee saved registers.
- static_assert(EntryFrameConstants::kFixedFrameSize % (2 * kPointerSize) == 0,
- "Size of entry frame is not a multiple of 16 bytes");
- __ Drop(EntryFrameConstants::kFixedFrameSize / kPointerSize);
+ static_assert(
+ EntryFrameConstants::kFixedFrameSize % (2 * kSystemPointerSize) == 0,
+ "Size of entry frame is not a multiple of 16 bytes");
+ __ Drop(EntryFrameConstants::kFixedFrameSize / kSystemPointerSize);
// Restore the callee-saved registers and return.
__ PopCalleeSavedRegisters();
__ Ret();
@@ -848,7 +866,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Store padding (which might be overwritten).
__ SlotAddress(scratch, slots_to_claim);
- __ Str(padreg, MemOperand(scratch, -kPointerSize));
+ __ Str(padreg, MemOperand(scratch, -kSystemPointerSize));
// Store receiver and function on the stack.
__ SlotAddress(scratch, argc);
@@ -867,11 +885,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Bind(&loop);
// Load the handle.
- __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
+ __ Ldr(x11, MemOperand(argv, kSystemPointerSize, PostIndex));
// Dereference the handle.
__ Ldr(x11, MemOperand(x11));
// Poke the result into the stack.
- __ Str(x11, MemOperand(scratch, -kPointerSize, PreIndex));
+ __ Str(x11, MemOperand(scratch, -kSystemPointerSize, PreIndex));
// Loop if we've not reached the end of copy marker.
__ Cmp(sp, scratch);
__ B(lt, &loop);
@@ -936,7 +954,8 @@ static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
// Store code entry in the closure.
- __ Str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
+ __ StoreTaggedField(optimized_code,
+ FieldMemOperand(closure, JSFunction::kCodeOffset));
__ Mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
@@ -957,10 +976,10 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
// Drop receiver + arguments.
if (__ emit_debug_code()) {
- __ Tst(args_size, kPointerSize - 1);
+ __ Tst(args_size, kSystemPointerSize - 1);
__ Check(eq, AbortReason::kUnexpectedValue);
}
- __ Lsr(args_size, args_size, kPointerSizeLog2);
+ __ Lsr(args_size, args_size, kSystemPointerSizeLog2);
__ DropArguments(args_size);
}
@@ -993,7 +1012,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = x1;
Register optimized_code_entry = scratch1;
- __ Ldr(
+ __ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
@@ -1046,8 +1065,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
- __ Ldr(scratch2, FieldMemOperand(optimized_code_entry,
- Code::kCodeDataContainerOffset));
+ __ LoadTaggedPointerField(
+ scratch2,
+ FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Ldr(
scratch2,
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
@@ -1149,9 +1169,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, x11);
// The bytecode array could have been flushed from the shared function info,
@@ -1162,9 +1184,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(ne, &compile_lazy);
// Load the feedback vector from the closure.
- __ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
@@ -1224,7 +1248,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// register in the register file.
Label loop_header;
__ LoadRoot(x10, RootIndex::kUndefinedValue);
- __ Lsr(x11, x11, kPointerSizeLog2);
+ __ Lsr(x11, x11, kSystemPointerSizeLog2);
// Round up the number of registers to a multiple of 2, to align the stack
// to 16 bytes.
__ Add(x11, x11, 1);
@@ -1241,7 +1265,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeArrayRegister,
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ Cbz(x10, &no_incoming_new_target_or_generator_register);
- __ Str(x3, MemOperand(fp, x10, LSL, kPointerSizeLog2));
+ __ Str(x3, MemOperand(fp, x10, LSL, kSystemPointerSizeLog2));
__ Bind(&no_incoming_new_target_or_generator_register);
// Load accumulator with undefined.
@@ -1257,11 +1281,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
#if defined(V8_OS_WIN)
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x23, LSL, kPointerSizeLog2));
+ __ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2));
#else
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Mov(x1, Operand(x18, LSL, kSystemPointerSizeLog2));
#endif
__ Ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, x1));
@@ -1342,7 +1366,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
__ Sub(scratch, slots_to_claim, 1);
- __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2));
+ __ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
}
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
@@ -1359,12 +1383,12 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
}
__ Sub(last_arg_addr, first_arg_index,
- Operand(slots_to_copy, LSL, kPointerSizeLog2));
- __ Add(last_arg_addr, last_arg_addr, kPointerSize);
+ Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
+ __ Add(last_arg_addr, last_arg_addr, kSystemPointerSize);
// Load the final spread argument into spread_arg_out, if necessary.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kPointerSize));
+ __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kSystemPointerSize));
}
// Copy the rest of the arguments.
@@ -1458,8 +1482,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(x1, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
@@ -1508,11 +1534,11 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
#if defined(V8_OS_WIN)
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x23, LSL, kPointerSizeLog2));
+ __ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2));
#else
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Mov(x1, Operand(x18, LSL, kSystemPointerSizeLog2));
#endif
__ Ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, x1));
@@ -1610,7 +1636,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// Three arguments.
__ Bind(&three_args);
__ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
- 2 * kPointerSize));
+ 2 * kSystemPointerSize));
__ Push(new_target, scratch3, scratch1, scratch2);
__ Bind(&args_done);
@@ -1622,7 +1648,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time.
- __ SmiUntag(x4, MemOperand(sp, 3 * kPointerSize));
+ __ SmiUntag(x4, MemOperand(sp, 3 * kSystemPointerSize));
__ Drop(4);
scope.GenerateLeaveFrame();
@@ -1637,7 +1663,8 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ Ldr(x4, FieldMemOperand(new_target, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(new_target, JSFunction::kCodeOffset));
__ JumpCodeObject(x4);
}
@@ -1651,7 +1678,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
(allocatable_register_count +
BuiltinContinuationFrameConstants::PaddingSlotCount(
allocatable_register_count)) *
- kPointerSize;
+ kSystemPointerSize;
// Set up frame pointer.
__ Add(fp, sp, frame_size);
@@ -1665,14 +1692,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Restore registers in pairs.
int offset = -BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
- allocatable_register_count * kPointerSize;
+ allocatable_register_count * kSystemPointerSize;
for (int i = allocatable_register_count - 1; i > 0; i -= 2) {
int code1 = config->GetAllocatableGeneralCode(i);
int code2 = config->GetAllocatableGeneralCode(i - 1);
Register reg1 = Register::from_code(code1);
Register reg2 = Register::from_code(code2);
__ Ldp(reg1, reg2, MemOperand(fp, offset));
- offset += 2 * kPointerSize;
+ offset += 2 * kSystemPointerSize;
}
// Restore first register separately, if number of registers is odd.
@@ -1753,13 +1780,14 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x0, Code::kDeoptimizationDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ SmiUntag(x1,
- FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex)));
+ __ SmiUntagField(
+ x1, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -1817,7 +1845,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Mov(saved_argc, argc);
__ Ldp(arg_array, this_arg, MemOperand(scratch)); // Overwrites argc.
- __ Ldr(receiver, MemOperand(scratch, 2 * kPointerSize));
+ __ Ldr(receiver, MemOperand(scratch, 2 * kSystemPointerSize));
__ Drop(2); // Drop the undefined values we pushed above.
__ DropArguments(saved_argc, TurboAssembler::kCountExcludesReceiver);
@@ -1889,7 +1917,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Copy arguments two slots higher in memory, overwriting the original
// receiver and padding.
{
- Label loop;
Register copy_from = x10;
Register copy_to = x11;
Register count = x12;
@@ -1897,7 +1924,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Mov(count, argc);
__ Sub(last_arg_slot, argc, 1);
__ SlotAddress(copy_from, last_arg_slot);
- __ Add(copy_to, copy_from, 2 * kPointerSize);
+ __ Add(copy_to, copy_from, 2 * kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, count,
TurboAssembler::kSrcLessThanDst);
// Drop two slots. These are copies of the last two arguments.
@@ -1964,8 +1991,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register scratch = x10;
__ SlotAddress(scratch, argc);
__ Ldp(arguments_list, this_argument,
- MemOperand(scratch, 1 * kPointerSize));
- __ Ldr(target, MemOperand(scratch, 3 * kPointerSize));
+ MemOperand(scratch, 1 * kSystemPointerSize));
+ __ Ldr(target, MemOperand(scratch, 3 * kSystemPointerSize));
__ Drop(4); // Drop the undefined values we pushed above.
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
@@ -2041,8 +2068,9 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// slot (argc + 1).
Register scratch = x10;
__ SlotAddress(scratch, argc);
- __ Ldp(new_target, arguments_list, MemOperand(scratch, 1 * kPointerSize));
- __ Ldr(target, MemOperand(scratch, 3 * kPointerSize));
+ __ Ldp(new_target, arguments_list,
+ MemOperand(scratch, 1 * kSystemPointerSize));
+ __ Ldr(target, MemOperand(scratch, 3 * kSystemPointerSize));
__ Cmp(argc, 2);
__ CmovX(new_target, target, ls); // target if argc <= 2.
@@ -2153,7 +2181,8 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
__ Mov(dst, len); // CopySlots will corrupt dst.
__ CopySlots(dst, src, slots_to_copy);
__ Add(scratch, argc, 1);
- __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2)); // Store padding.
+ __ Poke(padreg,
+ Operand(scratch, LSL, kSystemPointerSizeLog2)); // Store padding.
}
__ Bind(&exit);
@@ -2175,7 +2204,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
Label ok, fail;
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
- __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
__ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
__ Cmp(x13, FIXED_ARRAY_TYPE);
__ B(eq, &ok);
@@ -2218,10 +2247,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// TODO(all): Consider using Ldp and Stp.
__ Bind(&loop);
__ Sub(len, len, 1);
- __ Ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
+ __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
__ Cmp(scratch, the_hole_value);
__ Csel(scratch, scratch, undefined_value, ne);
- __ Poke(scratch, Operand(len, LSL, kPointerSizeLog2));
+ __ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2));
__ Cbnz(len, &loop);
}
__ Bind(&done);
@@ -2251,7 +2280,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(x3, &new_target_not_constructor);
- __ Ldr(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
__ TestAndBranchIfAnySet(x5, Map::IsConstructorBit::kMask,
&new_target_constructor);
@@ -2282,8 +2311,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ Ldr(scratch,
MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(scratch,
- FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ scratch,
+ FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(len,
FieldMemOperand(scratch,
SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2311,7 +2341,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Push varargs.
{
Register dst = x13;
- __ Add(args_fp, args_fp, 2 * kPointerSize);
+ __ Add(args_fp, args_fp, 2 * kSystemPointerSize);
__ SlotAddress(dst, 0);
__ CopyDoubleWords(dst, args_fp, len);
}
@@ -2337,7 +2367,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that function is not a "classConstructor".
Label class_constructor;
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
&class_constructor);
@@ -2345,7 +2376,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(cp,
+ FieldMemOperand(x1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ TestAndBranchIfAnySet(w3,
@@ -2396,7 +2428,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(cp, x1, x0, padreg);
__ SmiUntag(x0);
}
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Bind(&convert_receiver);
}
__ Poke(x3, Operand(x0, LSL, kXRegSizeLog2));
@@ -2439,10 +2472,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into x2 and length of that into x4.
Label no_bound_arguments;
- __ Ldr(bound_argv,
- FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
- __ SmiUntag(bound_argc,
- FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
+ __ LoadTaggedPointerField(
+ bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntagField(bound_argc,
+ FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
__ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
@@ -2466,7 +2499,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// here which will cause x10 to become negative.
__ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
- __ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
+ __ Cmp(x10, Operand(bound_argc, LSL, kSystemPointerSizeLog2));
__ B(hs, &done);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&done);
@@ -2483,7 +2516,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load receiver before we start moving the arguments. We will only
// need this in this path because the bound arguments are odd.
Register receiver = x14;
- __ Peek(receiver, Operand(argc, LSL, kPointerSizeLog2));
+ __ Peek(receiver, Operand(argc, LSL, kSystemPointerSizeLog2));
// Claim space we need. If argc is even, slots_to_claim = bound_argc + 1,
// as we need one extra padding slot. If argc is odd, we know that the
@@ -2522,12 +2555,12 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SlotAddress(copy_to, argc);
__ Add(argc, argc,
bound_argc); // Update argc to include bound arguments.
- __ Lsl(counter, bound_argc, kPointerSizeLog2);
+ __ Lsl(counter, bound_argc, kSystemPointerSizeLog2);
__ Bind(&loop);
- __ Sub(counter, counter, kPointerSize);
- __ Ldr(scratch, MemOperand(bound_argv, counter));
+ __ Sub(counter, counter, kTaggedSize);
+ __ LoadAnyTaggedField(scratch, MemOperand(bound_argv, counter));
// Poke into claimed area of stack.
- __ Str(scratch, MemOperand(copy_to, kPointerSize, PostIndex));
+ __ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
__ Cbnz(counter, &loop);
}
@@ -2536,8 +2569,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
__ Tbz(bound_argc, 0, &done);
// Store receiver.
- __ Add(scratch, sp, Operand(total_argc, LSL, kPointerSizeLog2));
- __ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
+ __ Add(scratch, sp, Operand(total_argc, LSL, kSystemPointerSizeLog2));
+ __ Str(receiver, MemOperand(scratch, kSystemPointerSize, PostIndex));
__ Tbnz(total_argc, 0, &done);
// Store padding.
__ Str(padreg, MemOperand(scratch));
@@ -2559,14 +2592,16 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(x1);
// Patch the receiver to [[BoundThis]].
- __ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
- __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
+ __ LoadAnyTaggedField(x10,
+ FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
+ __ Poke(x10, Operand(x0, LSL, kSystemPointerSizeLog2));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
@@ -2634,7 +2669,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAllClear(
w4, SharedFunctionInfo::ConstructAsBuiltinBit::kMask, &call_generic_stub);
@@ -2665,13 +2701,14 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label done;
__ Cmp(x1, x3);
__ B(ne, &done);
- __ Ldr(x3,
- FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Bind(&done);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2689,7 +2726,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(x1, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ Ldr(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
__ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
__ TestAndBranchIfAllClear(x2, Map::IsConstructorBit::kMask,
&non_constructor);
@@ -2792,123 +2829,198 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
+ Register argc_actual_minus_expected = x5;
- Label dont_adapt_arguments, stack_overflow;
+ Label create_adaptor_frame, dont_adapt_arguments, stack_overflow,
+ adapt_arguments_in_place;
- Label enough_arguments;
__ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
- EnterArgumentsAdaptorFrame(masm);
-
- Register copy_from = x10;
- Register copy_end = x11;
- Register copy_to = x12;
- Register argc_to_copy = x13;
- Register argc_unused_actual = x14;
- Register scratch1 = x15, scratch2 = x16;
-
- // We need slots for the expected arguments, with one extra slot for the
- // receiver.
- __ RecordComment("-- Stack check --");
- __ Add(scratch1, argc_expected, 1);
- Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
-
- // Round up number of slots to be even, to maintain stack alignment.
- __ RecordComment("-- Allocate callee frame slots --");
- __ Add(scratch1, scratch1, 1);
- __ Bic(scratch1, scratch1, 1);
- __ Claim(scratch1, kPointerSize);
-
- __ Mov(copy_to, sp);
-
- // Preparing the expected arguments is done in four steps, the order of
- // which is chosen so we can use LDP/STP and avoid conditional branches as
- // much as possible.
-
- // (1) If we don't have enough arguments, fill the remaining expected
- // arguments with undefined, otherwise skip this step.
- __ Subs(scratch1, argc_actual, argc_expected);
- __ Csel(argc_unused_actual, xzr, scratch1, lt);
- __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
- __ B(ge, &enough_arguments);
-
- // Fill the remaining expected arguments with undefined.
- __ RecordComment("-- Fill slots with undefined --");
- __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kPointerSizeLog2));
- __ LoadRoot(scratch1, RootIndex::kUndefinedValue);
-
- Label fill;
- __ Bind(&fill);
- __ Stp(scratch1, scratch1, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
- // We might write one slot extra, but that is ok because we'll overwrite it
- // below.
- __ Cmp(copy_end, copy_to);
- __ B(hi, &fill);
-
- // Correct copy_to, for the case where we wrote one additional slot.
- __ Mov(copy_to, copy_end);
-
- __ Bind(&enough_arguments);
- // (2) Copy all of the actual arguments, or as many as we need.
- Label skip_copy;
- __ RecordComment("-- Copy actual arguments --");
- __ Cbz(argc_to_copy, &skip_copy);
- __ Add(copy_end, copy_to, Operand(argc_to_copy, LSL, kPointerSizeLog2));
- __ Add(copy_from, fp, 2 * kPointerSize);
- // Adjust for difference between actual and expected arguments.
- __ Add(copy_from, copy_from,
- Operand(argc_unused_actual, LSL, kPointerSizeLog2));
-
- // Copy arguments. We use load/store pair instructions, so we might overshoot
- // by one slot, but since we copy the arguments starting from the last one, if
- // we do overshoot, the extra slot will be overwritten later by the receiver.
- Label copy_2_by_2;
- __ Bind(&copy_2_by_2);
- __ Ldp(scratch1, scratch2,
- MemOperand(copy_from, 2 * kPointerSize, PostIndex));
- __ Stp(scratch1, scratch2, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
- __ Cmp(copy_end, copy_to);
- __ B(hi, &copy_2_by_2);
- __ Bind(&skip_copy);
-
- // (3) Store padding, which might be overwritten by the receiver, if it is not
- // necessary.
- __ RecordComment("-- Store padding --");
- __ Str(padreg, MemOperand(fp, -5 * kPointerSize));
-
- // (4) Store receiver. Calculate target address from the sp to avoid checking
- // for padding. Storing the receiver will overwrite either the extra slot
- // we copied with the actual arguments, if we did copy one, or the padding we
- // stored above.
- __ RecordComment("-- Store receiver --");
- __ Add(copy_from, fp, 2 * kPointerSize);
- __ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
- __ Str(scratch1, MemOperand(sp, argc_expected, LSL, kPointerSizeLog2));
-
- // Arguments have been adapted. Now call the entry point.
- __ RecordComment("-- Call entry point --");
- __ Mov(argc_actual, argc_expected);
- // x0 : expected number of arguments
- // x1 : function (passed through to callee)
- // x3 : new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ CallCodeObject(x2);
+ // When the difference between argc_actual and argc_expected is odd, we
+ // create an arguments adaptor frame.
+ __ Sub(argc_actual_minus_expected, argc_actual, argc_expected);
+ __ Tbnz(argc_actual_minus_expected, 0, &create_adaptor_frame);
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+ // When the difference is even, check if we are allowed to adjust the
+ // existing frame instead.
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
+ __ TestAndBranchIfAnySet(
+ w4, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
+ &adapt_arguments_in_place);
+
+ // -------------------------------------------
+ // Create an arguments adaptor frame.
+ // -------------------------------------------
+ __ Bind(&create_adaptor_frame);
+ {
+ __ RecordComment("-- Adapt arguments --");
+ EnterArgumentsAdaptorFrame(masm);
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Ret();
+ Register copy_from = x10;
+ Register copy_end = x11;
+ Register copy_to = x12;
+ Register argc_to_copy = x13;
+ Register argc_unused_actual = x14;
+ Register scratch1 = x15, scratch2 = x16;
+
+ // We need slots for the expected arguments, with one extra slot for the
+ // receiver.
+ __ RecordComment("-- Stack check --");
+ __ Add(scratch1, argc_expected, 1);
+ Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
+
+ // Round up number of slots to be even, to maintain stack alignment.
+ __ RecordComment("-- Allocate callee frame slots --");
+ __ Add(scratch1, scratch1, 1);
+ __ Bic(scratch1, scratch1, 1);
+ __ Claim(scratch1, kSystemPointerSize);
+
+ __ Mov(copy_to, sp);
+
+ // Preparing the expected arguments is done in four steps, the order of
+ // which is chosen so we can use LDP/STP and avoid conditional branches as
+ // much as possible.
+
+ // (1) If we don't have enough arguments, fill the remaining expected
+ // arguments with undefined, otherwise skip this step.
+ Label enough_arguments;
+ __ Subs(scratch1, argc_actual, argc_expected);
+ __ Csel(argc_unused_actual, xzr, scratch1, lt);
+ __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
+ __ B(ge, &enough_arguments);
+
+ // Fill the remaining expected arguments with undefined.
+ __ RecordComment("-- Fill slots with undefined --");
+ __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kSystemPointerSizeLog2));
+ __ LoadRoot(scratch1, RootIndex::kUndefinedValue);
+
+ Label fill;
+ __ Bind(&fill);
+ __ Stp(scratch1, scratch1,
+ MemOperand(copy_to, 2 * kSystemPointerSize, PostIndex));
+ // We might write one slot extra, but that is ok because we'll overwrite it
+ // below.
+ __ Cmp(copy_end, copy_to);
+ __ B(hi, &fill);
+
+ // Correct copy_to, for the case where we wrote one additional slot.
+ __ Mov(copy_to, copy_end);
+
+ __ Bind(&enough_arguments);
+ // (2) Copy all of the actual arguments, or as many as we need.
+ Label skip_copy;
+ __ RecordComment("-- Copy actual arguments --");
+ __ Cbz(argc_to_copy, &skip_copy);
+ __ Add(copy_end, copy_to,
+ Operand(argc_to_copy, LSL, kSystemPointerSizeLog2));
+ __ Add(copy_from, fp, 2 * kSystemPointerSize);
+ // Adjust for difference between actual and expected arguments.
+ __ Add(copy_from, copy_from,
+ Operand(argc_unused_actual, LSL, kSystemPointerSizeLog2));
+
+ // Copy arguments. We use load/store pair instructions, so we might
+ // overshoot by one slot, but since we copy the arguments starting from the
+ // last one, if we do overshoot, the extra slot will be overwritten later by
+ // the receiver.
+ Label copy_2_by_2;
+ __ Bind(&copy_2_by_2);
+ __ Ldp(scratch1, scratch2,
+ MemOperand(copy_from, 2 * kSystemPointerSize, PostIndex));
+ __ Stp(scratch1, scratch2,
+ MemOperand(copy_to, 2 * kSystemPointerSize, PostIndex));
+ __ Cmp(copy_end, copy_to);
+ __ B(hi, &copy_2_by_2);
+ __ Bind(&skip_copy);
+
+ // (3) Store padding, which might be overwritten by the receiver, if it is
+ // not necessary.
+ __ RecordComment("-- Store padding --");
+ __ Str(padreg, MemOperand(fp, -5 * kSystemPointerSize));
+
+ // (4) Store receiver. Calculate target address from the sp to avoid
+ // checking for padding. Storing the receiver will overwrite either the
+ // extra slot we copied with the actual arguments, if we did copy one, or
+ // the padding we stored above.
+ __ RecordComment("-- Store receiver --");
+ __ Add(copy_from, fp, 2 * kSystemPointerSize);
+ __ Ldr(scratch1,
+ MemOperand(copy_from, argc_actual, LSL, kSystemPointerSizeLog2));
+ __ Str(scratch1,
+ MemOperand(sp, argc_expected, LSL, kSystemPointerSizeLog2));
+
+ // Arguments have been adapted. Now call the entry point.
+ __ RecordComment("-- Call entry point --");
+ __ Mov(argc_actual, argc_expected);
+ // x0 : expected number of arguments
+ // x1 : function (passed through to callee)
+ // x3 : new target (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ CallCodeObject(x2);
- // Call the entry point without adapting the arguments.
- __ RecordComment("-- Call without adapting args --");
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
+ masm->pc_offset());
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+ }
+
+ // -----------------------------------------
+ // Adapt arguments in the existing frame.
+ // -----------------------------------------
+ __ Bind(&adapt_arguments_in_place);
+ {
+ __ RecordComment("-- Update arguments in place --");
+ // The callee cannot observe the actual arguments, so it's safe to just
+ // pass the expected arguments by massaging the stack appropriately. See
+ // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
+ Label under_application, over_application;
+ __ Tbnz(argc_actual_minus_expected, kXSignBit, &under_application);
+
+ __ Bind(&over_application);
+ {
+ // Remove superfluous arguments from the stack. The number of superflous
+ // arguments is even.
+ __ RecordComment("-- Over-application --");
+ __ Mov(argc_actual, argc_expected);
+ __ Drop(argc_actual_minus_expected);
+ __ B(&dont_adapt_arguments);
+ }
+
+ __ Bind(&under_application);
+ {
+ // Fill remaining expected arguments with undefined values.
+ __ RecordComment("-- Under-application --");
+ Label fill;
+ Register undef_value = x16;
+ __ LoadRoot(undef_value, RootIndex::kUndefinedValue);
+ __ Bind(&fill);
+ __ Add(argc_actual, argc_actual, 2);
+ __ Push(undef_value, undef_value);
+ __ Cmp(argc_actual, argc_expected);
+ __ B(lt, &fill);
+ __ B(&dont_adapt_arguments);
+ }
+ }
+
+ // -------------------------------------------
+ // Dont adapt arguments.
+ // -------------------------------------------
__ Bind(&dont_adapt_arguments);
- static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ JumpCodeObject(x2);
+ {
+ // Call the entry point without adapting the arguments.
+ __ RecordComment("-- Call without adapting args --");
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ JumpCodeObject(x2);
+ }
__ Bind(&stack_overflow);
__ RecordComment("-- Stack overflow --");
@@ -2944,8 +3056,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// function.
__ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object.
- __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset));
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Mov(cp, Smi::zero());
@@ -3011,7 +3124,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
if (argv_mode == kArgvOnStack) {
__ SlotAddress(temp_argv, x0);
// - Adjust for the receiver.
- __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+ __ Sub(temp_argv, temp_argv, 1 * kSystemPointerSize);
}
// Reserve three slots to preserve x21-x23 callee-saved registers.
@@ -3023,9 +3136,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Poke callee-saved registers into reserved space.
- __ Poke(argv, 1 * kPointerSize);
- __ Poke(argc, 2 * kPointerSize);
- __ Poke(target, 3 * kPointerSize);
+ __ Poke(argv, 1 * kSystemPointerSize);
+ __ Poke(argc, 2 * kSystemPointerSize);
+ __ Poke(target, 3 * kSystemPointerSize);
// We normally only keep tagged values in callee-saved registers, as they
// could be pushed onto the stack by called stubs and functions, and on the
@@ -3096,9 +3209,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Restore callee-saved registers x21-x23.
__ Mov(x11, argc);
- __ Peek(argv, 1 * kPointerSize);
- __ Peek(argc, 2 * kPointerSize);
- __ Peek(target, 3 * kPointerSize);
+ __ Peek(argv, 1 * kSystemPointerSize);
+ __ Peek(argc, 2 * kSystemPointerSize);
+ __ Peek(target, 3 * kSystemPointerSize);
__ LeaveExitFrame(save_doubles == kSaveFPRegs, x10, x9);
if (argv_mode == kArgvOnStack) {
@@ -3178,7 +3291,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
DoubleRegister double_scratch = temps.AcquireD();
// Account for saved regs.
- const int kArgumentOffset = 2 * kPointerSize;
+ const int kArgumentOffset = 2 * kSystemPointerSize;
__ Push(result, scratch1); // scratch1 is also pushed to preserve alignment.
__ Peek(double_scratch, kArgumentOffset);
@@ -3232,98 +3345,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- Register exponent_integer = x12;
- Register saved_lr = x19;
- VRegister result_double = d0;
- VRegister base_double = d0;
- VRegister exponent_double = d1;
- VRegister base_double_copy = d2;
- VRegister scratch1_double = d6;
- VRegister scratch0_double = d7;
-
- // A fast-path for integer exponents.
- Label exponent_is_integer;
- // Allocate a heap number for the result, and return it.
- Label done;
-
- // Unpack the inputs.
-
- // Handle double (heap number) exponents.
- // Detect integer exponents stored as doubles and handle those in the
- // integer fast-path.
- __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
- scratch0_double, &exponent_is_integer);
-
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(saved_lr, lr);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- __ Mov(lr, saved_lr);
- __ B(&done);
- }
-
- __ Bind(&exponent_is_integer);
-
- // Find abs(exponent). For negative exponents, we can find the inverse later.
- Register exponent_abs = x13;
- __ Cmp(exponent_integer, 0);
- __ Cneg(exponent_abs, exponent_integer, mi);
-
- // Repeatedly multiply to calculate the power.
- // result = 1.0;
- // For each bit n (exponent_integer{n}) {
- // if (exponent_integer{n}) {
- // result *= base;
- // }
- // base *= base;
- // if (remaining bits in exponent_integer are all zero) {
- // break;
- // }
- // }
- Label power_loop, power_loop_entry, power_loop_exit;
- __ Fmov(scratch1_double, base_double);
- __ Fmov(base_double_copy, base_double);
- __ Fmov(result_double, 1.0);
- __ B(&power_loop_entry);
-
- __ Bind(&power_loop);
- __ Fmul(scratch1_double, scratch1_double, scratch1_double);
- __ Lsr(exponent_abs, exponent_abs, 1);
- __ Cbz(exponent_abs, &power_loop_exit);
-
- __ Bind(&power_loop_entry);
- __ Tbz(exponent_abs, 0, &power_loop);
- __ Fmul(result_double, result_double, scratch1_double);
- __ B(&power_loop);
-
- __ Bind(&power_loop_exit);
-
- // If the exponent was positive, result_double holds the result.
- __ Tbz(exponent_integer, kXSignBit, &done);
-
- // The exponent was negative, so find the inverse.
- __ Fmov(scratch0_double, 1.0);
- __ Fdiv(result_double, scratch0_double, result_double);
- // ECMA-262 only requires Math.pow to return an 'implementation-dependent
- // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
- // to calculate the subnormal value 2^-1074. This method of calculating
- // negative powers doesn't work because 2^1074 overflows to infinity. To
- // catch this corner-case, we bail out if the result was 0. (This can only
- // occur if the divisor is infinity or the base is zero.)
- __ Fcmp(result_double, 0.0);
- __ B(&done, ne);
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(saved_lr, lr);
- __ Fmov(base_double, base_double_copy);
- __ Scvtf(exponent_double, exponent_integer);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- __ Mov(lr, saved_lr);
- __ Bind(&done);
- __ Ret();
-}
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
@@ -3340,8 +3361,9 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
Label unexpected_map, map_ok;
// Initial map for the builtin Array function should be a map.
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadTaggedPointerField(
+ x10,
+ FieldMemOperand(constructor, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
@@ -3351,8 +3373,9 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
Register kind = w3;
// Figure out the right elements kind
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadTaggedPointerField(
+ x10,
+ FieldMemOperand(constructor, JSFunction::kPrototypeOrInitialMapOffset));
// Retrieve elements_kind from map.
__ LoadElementsKindFromMap(kind, x10);
@@ -3531,32 +3554,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- cp : kTargetContext
- // -- r1 : kApiFunctionAddress
- // -- r2 : kArgc
- // --
+ // -- cp : context
+ // -- x1 : api function address
+ // -- x2 : arguments count (not including the receiver)
+ // -- x3 : call data
+ // -- x0 : holder
// -- sp[0] : last argument
// -- ...
// -- sp[(argc - 1) * 8] : first argument
// -- sp[(argc + 0) * 8] : receiver
- // -- sp[(argc + 1) * 8] : kHolder
- // -- sp[(argc + 2) * 8] : kCallData
// -----------------------------------
Register api_function_address = x1;
Register argc = x2;
+ Register call_data = x3;
+ Register holder = x0;
Register scratch = x4;
- Register index = x5; // For indexing MemOperands.
-
- DCHECK(!AreAliased(api_function_address, argc, scratch, index));
- // Stack offsets (without argc).
- static constexpr int kReceiverOffset = 0;
- static constexpr int kHolderOffset = kReceiverOffset + 1;
- static constexpr int kCallDataOffset = kHolderOffset + 1;
-
- // Extra stack arguments are: the receiver, kHolder, kCallData.
- static constexpr int kExtraStackArgumentCount = 3;
+ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
typedef FunctionCallbackArguments FCA;
@@ -3571,35 +3586,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
- // sp[0 * kPointerSize]: kHolder
- // sp[1 * kPointerSize]: kIsolate
- // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
- // sp[3 * kPointerSize]: undefined (kReturnValue)
- // sp[4 * kPointerSize]: kData
- // sp[5 * kPointerSize]: undefined (kNewTarget)
+ // sp[0 * kSystemPointerSize]: kHolder
+ // sp[1 * kSystemPointerSize]: kIsolate
+ // sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
+ // sp[4 * kSystemPointerSize]: kData
+ // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
// Reserve space on the stack.
- __ Sub(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+ __ Sub(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
// kHolder.
- __ Add(index, argc, Operand(FCA::kArgsLength + kHolderOffset));
- __ Ldr(scratch, MemOperand(sp, index, LSL, kPointerSizeLog2));
- __ Str(scratch, MemOperand(sp, 0 * kPointerSize));
+ __ Str(holder, MemOperand(sp, 0 * kSystemPointerSize));
// kIsolate.
__ Mov(scratch, ExternalReference::isolate_address(masm->isolate()));
- __ Str(scratch, MemOperand(sp, 1 * kPointerSize));
+ __ Str(scratch, MemOperand(sp, 1 * kSystemPointerSize));
- // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ // kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ Str(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Str(scratch, MemOperand(sp, 3 * kPointerSize));
- __ Str(scratch, MemOperand(sp, 5 * kPointerSize));
+ __ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
+ __ Str(scratch, MemOperand(sp, 3 * kSystemPointerSize));
// kData.
- __ Add(index, argc, Operand(FCA::kArgsLength + kCallDataOffset));
- __ Ldr(scratch, MemOperand(sp, index, LSL, kPointerSizeLog2));
- __ Str(scratch, MemOperand(sp, 4 * kPointerSize));
+ __ Str(call_data, MemOperand(sp, 4 * kSystemPointerSize));
+
+ // kNewTarget.
+ __ Str(scratch, MemOperand(sp, 5 * kSystemPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
@@ -3616,16 +3629,17 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
- __ Str(scratch, MemOperand(sp, 1 * kPointerSize));
+ __ Str(scratch, MemOperand(sp, 1 * kSystemPointerSize));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
- __ Add(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
- __ Add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
- __ Str(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ Add(scratch, scratch,
+ Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
+ __ Add(scratch, scratch, Operand(argc, LSL, kSystemPointerSizeLog2));
+ __ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
// FunctionCallbackInfo::length_.
- __ Str(argc, MemOperand(sp, 3 * kPointerSize));
+ __ Str(argc, MemOperand(sp, 3 * kSystemPointerSize));
// We also store the number of slots to drop from the stack after returning
// from the API function here.
@@ -3633,12 +3647,12 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// drop, not the number of bytes. arm64 must always drop a slot count that is
// a multiple of two, and related helper functions (DropArguments) expect a
// register containing the slot count.
- __ Add(scratch, argc, Operand(FCA::kArgsLength + kExtraStackArgumentCount));
- __ Str(scratch, MemOperand(sp, 4 * kPointerSize));
+ __ Add(scratch, argc, Operand(FCA::kArgsLength + 1 /*receiver*/));
+ __ Str(scratch, MemOperand(sp, 4 * kSystemPointerSize));
// v8::InvocationCallback's argument.
DCHECK(!AreAliased(x0, api_function_address));
- __ add(x0, sp, Operand(1 * kPointerSize));
+ __ add(x0, sp, Operand(1 * kSystemPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
@@ -3649,11 +3663,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// TODO(jgruber): Document what these arguments are.
static constexpr int kStackSlotsAboveFCA = 2;
MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
static constexpr int kSpillOffset = 1 + kApiStackSpace;
static constexpr int kUseStackSpaceOperand = 0;
- MemOperand stack_space_operand(sp, 4 * kPointerSize);
+ MemOperand stack_space_operand(sp, 4 * kSystemPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -3681,10 +3695,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
name));
- __ Ldr(data, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadAnyTaggedField(data,
+ FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ LoadRoot(undef, RootIndex::kUndefinedValue);
__ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
- __ Ldr(name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ LoadTaggedPointerField(
+ name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
// PropertyCallbackArguments:
// receiver, data, return value, return value default, isolate, holder,
@@ -3700,8 +3716,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
"slots must be a multiple of 2 for stack pointer alignment");
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ Mov(x0, sp); // x0 = Handle<Name>
- __ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
+ __ Mov(x0, sp); // x0 = Handle<Name>
+ __ Add(x1, x0, 1 * kSystemPointerSize); // x1 = v8::PCI::args_
const int kApiStackSpace = 1;
@@ -3710,7 +3726,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
- __ Poke(x1, 1 * kPointerSize);
+ __ Poke(x1, 1 * kSystemPointerSize);
__ SlotAddress(x1, 1);
// x1 = v8::PropertyCallbackInfo&
@@ -3719,14 +3735,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Register api_function_address = x2;
Register js_getter = x4;
- __ Ldr(js_getter, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ LoadTaggedPointerField(
+ js_getter, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
__ Ldr(api_function_address,
FieldMemOperand(js_getter, Foreign::kForeignAddressOffset));
const int spill_offset = 1 + kApiStackSpace;
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ fp,
+ (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,