summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/arm64/builtins-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/arm64/builtins-arm64.cc')
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc167
1 files changed, 91 insertions, 76 deletions
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index dd92af89bb..54d2524d6e 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
- __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x2);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -180,6 +173,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(x0);
}
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Br(x2);
}
@@ -332,7 +326,8 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(w4, SharedFunctionInfo::kDerivedConstructorMask,
+ __ TestAndBranchIfAnySet(w4,
+ SharedFunctionInfo::IsDerivedConstructorBit::kMask,
&not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -460,11 +455,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ TestAndBranchIfAllClear(w4, SharedFunctionInfo::kClassConstructorMask,
- &use_receiver);
+ __ TestAndBranchIfAllClear(
+ w4, SharedFunctionInfo::IsClassConstructorBit::kMask, &use_receiver);
} else {
- __ TestAndBranchIfAnySet(w4, SharedFunctionInfo::kClassConstructorMask,
- &use_receiver);
+ __ TestAndBranchIfAnySet(
+ w4, SharedFunctionInfo::IsClassConstructorBit::kMask, &use_receiver);
__ CallRuntime(
Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ B(&use_receiver);
@@ -552,7 +547,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(__ StackPointer(), Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
@@ -617,9 +612,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(x3, x1);
__ Move(x1, x4);
- __ Ldr(x5, FieldMemOperand(x1, JSFunction::kCodeOffset));
- __ Add(x5, x5, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(x5);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(x2);
}
__ Bind(&prepare_step_in_if_stepping);
@@ -663,7 +659,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
- __ Sub(scratch, masm->StackPointer(), scratch);
+ __ Sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
__ B(le, stack_overflow);
@@ -745,7 +741,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Poke the result into the stack.
__ Str(x11, MemOperand(scratch, -kPointerSize, PreIndex));
// Loop if we've not reached the end of copy marker.
- __ Cmp(__ StackPointer(), scratch);
+ __ Cmp(sp, scratch);
__ B(lt, &loop);
__ Bind(&done);
@@ -920,9 +916,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ Add(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Add(x2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(x2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -936,10 +933,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -949,11 +949,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ Cmp(bytecode, Operand(0x1));
- __ B(hi, &load_size);
+ __ B(hi, &process_bytecode);
__ B(eq, &extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -961,7 +961,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ Add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ B(&load_size);
+ __ B(&process_bytecode);
__ Bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -970,8 +970,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ Bind(&load_size);
+ __ Bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ B(if_return, eq);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ Add(bytecode_offset, bytecode_offset, scratch1);
}
@@ -998,7 +1006,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1009,7 +1017,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
- __ Add(fp, __ StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
@@ -1022,7 +1030,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Bind(&bytecode_array_loaded);
// Increment invocation count for the function.
- __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
__ Ldr(w10, FieldMemOperand(x11, FeedbackVector::kInvocationCountOffset));
__ Add(w10, w10, Operand(1));
@@ -1060,7 +1068,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
- __ Sub(x10, __ StackPointer(), Operand(x11));
+ __ Sub(x10, sp, Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1101,11 +1109,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
- __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
- __ Call(ip0);
+ __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, x1));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1118,16 +1127,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ B(&do_return, eq);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, x1, x2,
+ &do_return);
__ B(&do_dispatch);
__ bind(&do_return);
@@ -1336,11 +1342,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
- __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
- __ Jump(ip0);
+ __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, x1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1356,14 +1363,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, x1, x2,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1381,7 +1394,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = x2;
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1394,7 +1407,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1423,7 +1440,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1613,7 +1630,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time.
- __ Ldr(w4, UntagSmiMemOperand(__ StackPointer(), 3 * kPointerSize));
+ __ Ldr(w4, UntagSmiMemOperand(sp, 3 * kPointerSize));
__ Drop(4);
scope.GenerateLeaveFrame();
@@ -1646,7 +1663,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kPointerSize;
// Set up frame pointer.
- __ Add(fp, __ StackPointer(), frame_size);
+ __ Add(fp, sp, frame_size);
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@@ -1682,7 +1699,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset));
// Restore fp, lr.
- __ Mov(__ StackPointer(), fp);
+ __ Mov(sp, fp);
__ Pop(fp, lr);
// Call builtin.
@@ -2090,8 +2107,7 @@ void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Push(x11, x1); // x1: function
__ SmiTag(x11, x0); // x0: number of arguments.
__ Push(x11, padreg);
- __ Add(fp, __ StackPointer(),
- ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
}
void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2101,7 +2117,7 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Get the number of arguments passed (as a smi), tear down the frame and
// then drop the parameters and the receiver.
__ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Mov(__ StackPointer(), fp);
+ __ Mov(sp, fp);
__ Pop(fp, lr);
// Drop actual parameters and receiver.
@@ -2194,7 +2210,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
- __ Sub(x10, masm->StackPointer(), x10);
+ __ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(len, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
@@ -2341,7 +2357,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::kClassConstructorMask,
+ __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2467,7 +2483,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
- __ Sub(x10, masm->StackPointer(), x10);
+ __ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
@@ -2539,8 +2555,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
__ Tbz(bound_argc, 0, &done);
// Store receiver.
- __ Add(scratch, __ StackPointer(),
- Operand(total_argc, LSL, kPointerSizeLog2));
+ __ Add(scratch, sp, Operand(total_argc, LSL, kPointerSizeLog2));
__ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
__ Tbnz(total_argc, 0, &done);
// Store padding.
@@ -2825,7 +2840,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
- Register code_entry = x10;
Label dont_adapt_arguments, stack_overflow;
@@ -2854,7 +2868,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kPointerSize);
- __ Mov(copy_to, __ StackPointer());
+ __ Mov(copy_to, sp);
// Preparing the expected arguments is done in four steps, the order of
// which is chosen so we can use LDP/STP and avoid conditional branches as
@@ -2918,8 +2932,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ RecordComment("-- Store receiver --");
__ Add(copy_from, fp, 2 * kPointerSize);
__ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
- __ Str(scratch1,
- MemOperand(__ StackPointer(), argc_expected, LSL, kPointerSizeLog2));
+ __ Str(scratch1, MemOperand(sp, argc_expected, LSL, kPointerSizeLog2));
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
@@ -2927,9 +2940,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// x0 : expected number of arguments
// x1 : function (passed through to callee)
// x3 : new target (passed through to callee)
- __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ Add(code_entry, code_entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(x2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2941,9 +2955,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point without adapting the arguments.
__ RecordComment("-- Call without adapting args --");
__ Bind(&dont_adapt_arguments);
- __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ Add(code_entry, code_entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(x2);
__ Bind(&stack_overflow);
__ RecordComment("-- Stack overflow --");