summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/arm64/builtins-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/arm64/builtins-arm64.cc')
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc587
1 files changed, 365 insertions, 222 deletions
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 875f261835..dd92af89bb 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -111,9 +111,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
__ Tst(x10, kSmiTagMask);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CompareObjectType(x10, x11, x12, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -138,9 +138,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
__ Tst(x10, kSmiTagMask);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(x10, x11, x12, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
@@ -210,7 +210,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Check that FrameScope pushed the context on to the stack already.
__ Peek(x2, 0);
__ Cmp(x2, cp);
- __ Check(eq, kUnexpectedValue);
+ __ Check(eq, AbortReason::kUnexpectedValue);
}
// Push number of arguments.
@@ -315,7 +315,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Check that FrameScope pushed the context on to the stack already.
__ Peek(x2, 0);
__ Cmp(x2, cp);
- __ Check(eq, kUnexpectedValue);
+ __ Check(eq, AbortReason::kUnexpectedValue);
}
// Preserve the incoming parameters on the stack.
@@ -348,10 +348,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- x0: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -388,9 +389,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- sp[0*kPointerSize]: implicit receiver (overwrite if argc odd)
// -- sp[1*kPointerSize]: implicit receiver
// -- sp[2*kPointerSize]: implicit receiver
- // -- x1 and sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- sp[3*kPointerSize]: padding
+ // -- x1 and sp[4*kPointerSize]: constructor function
+ // -- sp[5*kPointerSize]: number of arguments (tagged)
+ // -- sp[6*kPointerSize]: context
// -----------------------------------
// Round the number of arguments down to the next even number, and claim
@@ -416,14 +418,8 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
// ----------- S t a t e -------------
- // If argc is odd:
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
- // If argc is even:
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: padding
// -- sp[2*kPointerSize]: constructor function
// -- sp[3*kPointerSize]: number of arguments
// -- sp[4*kPointerSize]: context
@@ -556,7 +552,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(__ StackPointer(), Heap::kRealStackLimitRootIndex);
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
@@ -564,10 +560,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ldr(w10,
FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset));
- // Claim slots for arguments and receiver.
- __ Add(x11, x10, 1);
+ // Claim slots for arguments and receiver (rounded up to a multiple of two).
+ __ Add(x11, x10, 2);
+ __ Bic(x11, x11, 1);
__ Claim(x11);
+ // Store padding (which might be replaced by the receiver).
+ __ Sub(x11, x11, 1);
+ __ Poke(padreg, Operand(x11, LSL, kPointerSizeLog2));
+
// Poke receiver into highest claimed slot.
__ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
__ Poke(x5, Operand(x10, LSL, kPointerSizeLog2));
@@ -578,8 +579,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- x10 : argument count
// -- cp : generator context
// -- lr : return address
- // -- jssp[arg count] : generator receiver
- // -- jssp[0 .. arg count - 1] : claimed for args
+ // -- sp[arg count] : generator receiver
+ // -- sp[0 .. arg count - 1] : claimed for args
// -----------------------------------
// Push holes for arguments to generator function. Since the parser forced
@@ -603,7 +604,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kMissingBytecodeArray);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -624,10 +625,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
+ __ Push(x1, padreg);
__ PushArgument(x4);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(x1);
+ __ Pop(padreg, x1);
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -635,9 +636,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Bind(&prepare_step_in_suspended_generator);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
+ __ Push(x1, padreg);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(x1);
+ __ Pop(padreg, x1);
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -652,8 +653,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Label* stack_overflow) {
- DCHECK(masm->StackPointer().Is(jssp));
-
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
@@ -767,10 +766,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Mov(x23, x19);
__ Mov(x24, x19);
__ Mov(x25, x19);
+ __ Mov(x28, x19);
// Don't initialize the reserved registers.
// x26 : root register (root).
// x27 : context pointer (cp).
- // x28 : JS stack pointer (jssp).
// x29 : frame pointer (fp).
Handle<Code> builtin = is_construct
@@ -820,7 +819,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
// Drop receiver + arguments.
if (__ emit_debug_code()) {
__ Tst(args_size, kPointerSize - 1);
- __ Check(eq, kUnexpectedValue);
+ __ Check(eq, AbortReason::kUnexpectedValue);
}
__ Lsr(args_size, args_size, kPointerSizeLog2);
__ DropArguments(args_size);
@@ -873,6 +872,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
&fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -887,7 +889,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ Cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, kExpectedOptimizationSentinel);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ B(&fallthrough);
}
@@ -967,7 +969,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ Add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ B(&load_size);
// Load the size of the current bytecode.
__ Bind(&load_size);
@@ -985,7 +986,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
// - x3: the incoming new target or generator object
// - cp: our context.
// - fp: our caller's frame pointer.
-// - jssp: stack pointer.
// - lr: return address.
//
// The function builds an interpreter frame. See InterpreterFrameConstants in
@@ -1009,7 +1009,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, __ StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
@@ -1030,11 +1030,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
- kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ AssertNotSmi(
+ kInterpreterBytecodeArrayRegister,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1058,8 +1060,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
- DCHECK(jssp.Is(__ StackPointer()));
- __ Sub(x10, jssp, Operand(x11));
+ __ Sub(x10, __ StackPointer(), Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1181,10 +1182,19 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Unreachable();
__ Bind(&done);
- // TODO(arm64): Claim one extra slot for padding and store padreg to the
- // padding slot.
+ // Round up to an even number of slots and claim them.
+ __ Add(slots_to_claim, slots_to_claim, 1);
+ __ Bic(slots_to_claim, slots_to_claim, 1);
__ Claim(slots_to_claim);
+ {
+ // Store padding, which may be overwritten.
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Sub(scratch, slots_to_claim, 1);
+ __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2));
+ }
+
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Store "undefined" as the receiver arg if we need to.
Register receiver = x14;
@@ -1311,11 +1321,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
- kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ AssertNotSmi(
+ kInterpreterBytecodeArrayRegister,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1375,7 +1387,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1634,7 +1646,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kPointerSize;
// Set up frame pointer.
- __ Add(fp, jssp, frame_size);
+ __ Add(fp, __ StackPointer(), frame_size);
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@@ -1770,9 +1782,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- jssp[0] : argArray (if argc == 2)
- // -- jssp[8] : thisArg (if argc >= 1)
- // -- jssp[16] : receiver
+ // -- sp[0] : argArray (if argc == 2)
+ // -- sp[8] : thisArg (if argc >= 1)
+ // -- sp[16] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
@@ -1824,7 +1836,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x2 : argArray
// -- x1 : receiver
- // -- jssp[0] : thisArg
+ // -- sp[0] : thisArg
// -----------------------------------
// 2. We don't need to check explicitly for callable receiver here,
@@ -1855,55 +1867,65 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register argc = x0;
Register function = x1;
- Register scratch1 = x10;
- Register scratch2 = x11;
ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
- // 1. Make sure we have at least one argument.
+ // 1. Get the callable to call (passed as receiver) from the stack.
+ __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+
+ // 2. Handle case with no arguments.
{
- Label done;
- __ Cbnz(argc, &done);
- __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
- __ Push(scratch1);
- __ Mov(argc, 1);
- __ Bind(&done);
+ Label non_zero;
+ Register scratch = x10;
+ __ Cbnz(argc, &non_zero);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ // Overwrite receiver with undefined, which will be the new receiver.
+ // We do not need to overwrite the padding slot above it with anything.
+ __ Poke(scratch, 0);
+ // Call function. The argument count is already zero.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Bind(&non_zero);
}
- // 2. Get the callable to call (passed as receiver) from the stack.
- __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+ // 3. Overwrite the receiver with padding. If argc is odd, this is all we
+ // need to do.
+ Label arguments_ready;
+ __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
+ __ Tbnz(argc, 0, &arguments_ready);
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
+ // 4. If argc is even:
+ // Copy arguments two slots higher in memory, overwriting the original
+ // receiver and padding.
{
Label loop;
- // Calculate the copy start address (destination). Copy end address is jssp.
- __ SlotAddress(scratch2, argc);
- __ Sub(scratch1, scratch2, kPointerSize);
-
- __ Bind(&loop);
- __ Ldr(x12, MemOperand(scratch1, -kPointerSize, PostIndex));
- __ Str(x12, MemOperand(scratch2, -kPointerSize, PostIndex));
- __ Cmp(scratch1, jssp);
- __ B(ge, &loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ Sub(argc, argc, 1);
- __ Drop(1);
+ Register copy_from = x10;
+ Register copy_to = x11;
+ Register count = x12;
+ Register last_arg_slot = x13;
+ __ Mov(count, argc);
+ __ Sub(last_arg_slot, argc, 1);
+ __ SlotAddress(copy_from, last_arg_slot);
+ __ Add(copy_to, copy_from, 2 * kPointerSize);
+ __ CopyDoubleWords(copy_to, copy_from, count,
+ TurboAssembler::kSrcLessThanDst);
+ // Drop two slots. These are copies of the last two arguments.
+ __ Drop(2);
}
- // 4. Call the callable.
+ // 5. Adjust argument count to make the original first argument the new
+ // receiver and call the callable.
+ __ Bind(&arguments_ready);
+ __ Sub(argc, argc, 1);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- jssp[0] : argumentsList (if argc == 3)
- // -- jssp[8] : thisArgument (if argc >= 2)
- // -- jssp[16] : target (if argc >= 1)
- // -- jssp[24] : receiver
+ // -- sp[0] : argumentsList (if argc == 3)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[16] : target (if argc >= 1)
+ // -- sp[24] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_ReflectApply");
@@ -1962,7 +1984,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x2 : argumentsList
// -- x1 : target
- // -- jssp[0] : thisArgument
+ // -- sp[0] : thisArgument
// -----------------------------------
// 2. We don't need to check explicitly for callable target here,
@@ -1977,10 +1999,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- jssp[0] : new.target (optional)
- // -- jssp[8] : argumentsList
- // -- jssp[16] : target
- // -- jssp[24] : receiver
+ // -- sp[0] : new.target (optional)
+ // -- sp[8] : argumentsList
+ // -- sp[16] : target
+ // -- sp[24] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_ReflectConstruct");
@@ -2044,7 +2066,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- x2 : argumentsList
// -- x1 : target
// -- x3 : new.target
- // -- jssp[0] : receiver (undefined)
+ // -- sp[0] : receiver (undefined)
// -----------------------------------
// 2. We don't need to check explicitly for constructor target here,
@@ -2060,25 +2082,26 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+namespace {
+
+void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Push(lr, fp);
__ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ Push(x11, x1); // x1: function
- // We do not yet push the number of arguments, to maintain a 16-byte aligned
- // stack pointer. This is done in step (3) in
- // Generate_ArgumentsAdaptorTrampoline.
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ SmiTag(x11, x0); // x0: number of arguments.
+ __ Push(x11, padreg);
+ __ Add(fp, __ StackPointer(),
+ ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
}
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : result being passed through
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then drop the parameters and the receiver.
- __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
- __ Mov(jssp, fp);
+ __ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Mov(__ StackPointer(), fp);
__ Pop(fp, lr);
// Drop actual parameters and receiver.
@@ -2086,6 +2109,67 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ DropArguments(x10, TurboAssembler::kCountExcludesReceiver);
}
+// Prepares the stack for copying the varargs. First we claim the necessary
+// slots, taking care of potential padding. Then we copy the existing arguments
+// one slot up or one slot down, as needed.
+void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
+ Register len) {
+ Label len_odd, exit;
+ Register slots_to_copy = x10; // If needed.
+ __ Add(slots_to_copy, argc, 1);
+ __ Add(argc, argc, len);
+ __ Tbnz(len, 0, &len_odd);
+ __ Claim(len);
+ __ B(&exit);
+
+ __ Bind(&len_odd);
+ // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
+ // one extra padding slot. If argc is odd, we know that the original arguments
+ // will have a padding slot we can reuse (since len is odd), so
+ // slots_to_claim = len - 1.
+ {
+ Register scratch = x11;
+ Register slots_to_claim = x12;
+ __ Add(slots_to_claim, len, 1);
+ __ And(scratch, argc, 1);
+ __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
+ __ Claim(slots_to_claim);
+ }
+
+ Label copy_down;
+ __ Tbz(slots_to_copy, 0, &copy_down);
+
+ // Copy existing arguments one slot up.
+ {
+ Register src = x11;
+ Register dst = x12;
+ Register scratch = x13;
+ __ Sub(scratch, argc, 1);
+ __ SlotAddress(src, scratch);
+ __ SlotAddress(dst, argc);
+ __ CopyDoubleWords(dst, src, slots_to_copy,
+ TurboAssembler::kSrcLessThanDst);
+ }
+ __ B(&exit);
+
+ // Copy existing arguments one slot down and add padding.
+ __ Bind(&copy_down);
+ {
+ Register src = x11;
+ Register dst = x12;
+ Register scratch = x13;
+ __ Add(src, len, 1);
+ __ Mov(dst, len); // CopySlots will corrupt dst.
+ __ CopySlots(dst, src, slots_to_copy);
+ __ Add(scratch, argc, 1);
+ __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2)); // Store padding.
+ }
+
+ __ Bind(&exit);
+}
+
+} // namespace
+
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
@@ -2118,30 +2202,34 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Bind(&done);
}
- // Push arguments onto the stack (thisArgument is already on the stack).
- {
- Label done, push, loop;
- Register src = x5;
+ // Skip argument setup if we don't need to push any varargs.
+ Label done;
+ __ Cbz(len, &done);
- __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(argc, argc, len); // The 'len' argument for Call() or Construct().
- __ Cbz(len, &done);
+ Generate_PrepareForCopyingVarargs(masm, argc, len);
+
+ // Push varargs.
+ {
+ Label loop;
+ Register src = x10;
Register the_hole_value = x11;
Register undefined_value = x12;
- // We do not use the CompareRoot macro as it would do a LoadRoot behind the
- // scenes and we want to avoid that in a loop.
+ Register scratch = x13;
+ __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
__ LoadRoot(the_hole_value, Heap::kTheHoleValueRootIndex);
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- __ Claim(len);
+ // We do not use the CompareRoot macro as it would do a LoadRoot behind the
+ // scenes and we want to avoid that in a loop.
+ // TODO(all): Consider using Ldp and Stp.
__ Bind(&loop);
__ Sub(len, len, 1);
- __ Ldr(x10, MemOperand(src, kPointerSize, PostIndex));
- __ Cmp(x10, the_hole_value);
- __ Csel(x10, x10, undefined_value, ne);
- __ Poke(x10, Operand(len, LSL, kPointerSizeLog2));
+ __ Ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
+ __ Cmp(scratch, the_hole_value);
+ __ Csel(scratch, scratch, undefined_value, ne);
+ __ Poke(scratch, Operand(len, LSL, kPointerSizeLog2));
__ Cbnz(len, &loop);
- __ Bind(&done);
}
+ __ Bind(&done);
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2158,13 +2246,16 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -- x2 : start index (to support rest parameters)
// -----------------------------------
+ Register argc = x0;
+ Register start_index = x2;
+
// Check if new.target has a [[Construct]] internal method.
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(x3, &new_target_not_constructor);
__ Ldr(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(x5, 1 << Map::kIsConstructor,
+ __ TestAndBranchIfAnySet(x5, Map::IsConstructorBit::kMask,
&new_target_constructor);
__ Bind(&new_target_not_constructor);
{
@@ -2177,49 +2268,57 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
}
// Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ Ldr(x5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x4, MemOperand(x5, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &arguments_adaptor);
- {
- __ Ldr(x6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x6, FieldMemOperand(x6, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(x6, FieldMemOperand(
- x6, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Mov(x5, fp);
- }
- __ B(&arguments_done);
- __ Bind(&arguments_adaptor);
+ // args_fp will point to the frame that contains the actual arguments, which
+ // will be the current frame unless we have an arguments adaptor frame, in
+ // which case args_fp points to the arguments adaptor frame.
+ Register args_fp = x5;
+ Register len = x6;
{
- // Just load the length from ArgumentsAdaptorFrame.
- __ Ldrsw(x6, UntagSmiMemOperand(
- x5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ Label arguments_adaptor, arguments_done;
+ Register scratch = x10;
+ __ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x4, MemOperand(args_fp,
+ CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(eq, &arguments_adaptor);
+ {
+ __ Ldr(scratch,
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(scratch,
+ FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(len,
+ FieldMemOperand(
+ scratch, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Mov(args_fp, fp);
+ }
+ __ B(&arguments_done);
+ __ Bind(&arguments_adaptor);
+ {
+ // Just load the length from ArgumentsAdaptorFrame.
+ __ Ldrsw(len,
+ UntagSmiMemOperand(
+ args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ Bind(&arguments_done);
}
- __ Bind(&arguments_done);
Label stack_done, stack_overflow;
- __ Subs(x6, x6, x2);
+ __ Subs(len, len, start_index);
__ B(le, &stack_done);
- {
- // Check for stack overflow.
- Generate_StackOverflowCheck(masm, x6, &stack_overflow);
+ // Check for stack overflow.
+ Generate_StackOverflowCheck(masm, x6, &stack_overflow);
- // Forward the arguments from the caller frame.
- {
- Label loop;
- __ Add(x5, x5, kPointerSize);
- __ Add(x0, x0, x6);
- __ Bind(&loop);
- {
- __ Ldr(x4, MemOperand(x5, x6, LSL, kPointerSizeLog2));
- __ Push(x4);
- __ Subs(x6, x6, 1);
- __ B(ne, &loop);
- }
- }
+ Generate_PrepareForCopyingVarargs(masm, argc, len);
+
+ // Push varargs.
+ {
+ Register dst = x13;
+ __ Add(args_fp, args_fp, 2 * kPointerSize);
+ __ SlotAddress(dst, 0);
+ __ CopyDoubleWords(dst, args_fp, len);
}
__ B(&stack_done);
+
__ Bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&stack_done);
@@ -2338,12 +2437,16 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- x3 : new.target (only in case of [[Construct]])
// -----------------------------------
+ Register bound_argc = x4;
+ Register bound_argv = x2;
+
// Load [[BoundArguments]] into x2 and length of that into x4.
Label no_bound_arguments;
- __ Ldr(x2, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
- __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
- __ Cmp(x4, 0);
- __ B(eq, &no_bound_arguments);
+ __ Ldr(bound_argv,
+ FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
+ __ Ldrsw(bound_argc,
+ UntagSmiFieldMemOperand(bound_argv, FixedArray::kLengthOffset));
+ __ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
@@ -2353,44 +2456,97 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- x4 : the number of [[BoundArguments]]
// -----------------------------------
+ Register argc = x0;
+
+ // Check for stack overflow.
{
- Label done;
- __ Claim(x4);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex);
+ Label done;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ __ Sub(x10, masm->StackPointer(), x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
- // Restore the stack pointer.
- __ Drop(x4);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- }
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&done);
}
- UseScratchRegisterScope temps(masm);
- Register argc = temps.AcquireX();
- // Relocate arguments down the stack.
- __ Mov(argc, x0);
- __ CopySlots(0, x4, argc);
+ // Check if we need padding.
+ Label copy_args, copy_bound_args;
+ Register total_argc = x15;
+ Register slots_to_claim = x12;
+ __ Add(total_argc, argc, bound_argc);
+ __ Mov(slots_to_claim, bound_argc);
+ __ Tbz(bound_argc, 0, &copy_args);
+
+ // Load receiver before we start moving the arguments. We will only
+ // need this in this path because the bound arguments are odd.
+ Register receiver = x14;
+ __ Peek(receiver, Operand(argc, LSL, kPointerSizeLog2));
- // Copy [[BoundArguments]] to the stack (below the arguments). The first
- // element of the array is copied to the highest address.
+ // Claim space we need. If argc is even, slots_to_claim = bound_argc + 1,
+ // as we need one extra padding slot. If argc is odd, we know that the
+ // original arguments will have a padding slot we can reuse (since
+ // bound_argc is odd), so slots_to_claim = bound_argc - 1.
{
- Label loop;
- __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
- __ Add(x2, x2, FixedArray::kHeaderSize - kHeapObjectTag);
- __ SlotAddress(x11, x0);
- __ Add(x0, x0, x4);
- __ Bind(&loop);
- __ Sub(x4, x4, 1);
- __ Ldr(x10, MemOperand(x2, x4, LSL, kPointerSizeLog2));
- // Poke into claimed area of stack.
- __ Str(x10, MemOperand(x11, kPointerSize, PostIndex));
- __ Cbnz(x4, &loop);
+ Register scratch = x11;
+ __ Add(slots_to_claim, bound_argc, 1);
+ __ And(scratch, total_argc, 1);
+ __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
+ }
+
+ // Copy bound arguments.
+ __ Bind(&copy_args);
+ // Skip claim and copy of existing arguments in the special case where we
+ // do not need to claim any slots (this will be the case when
+ // bound_argc == 1 and the existing arguments have padding we can reuse).
+ __ Cbz(slots_to_claim, &copy_bound_args);
+ __ Claim(slots_to_claim);
+ {
+ Register count = x10;
+ // Relocate arguments to a lower address.
+ __ Mov(count, argc);
+ __ CopySlots(0, slots_to_claim, count);
+
+ __ Bind(&copy_bound_args);
+ // Copy [[BoundArguments]] to the stack (below the arguments). The first
+ // element of the array is copied to the highest address.
+ {
+ Label loop;
+ Register counter = x10;
+ Register scratch = x11;
+ Register copy_to = x12;
+ __ Add(bound_argv, bound_argv,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SlotAddress(copy_to, argc);
+ __ Add(argc, argc,
+ bound_argc); // Update argc to include bound arguments.
+ __ Lsl(counter, bound_argc, kPointerSizeLog2);
+ __ Bind(&loop);
+ __ Sub(counter, counter, kPointerSize);
+ __ Ldr(scratch, MemOperand(bound_argv, counter));
+ // Poke into claimed area of stack.
+ __ Str(scratch, MemOperand(copy_to, kPointerSize, PostIndex));
+ __ Cbnz(counter, &loop);
+ }
+
+ {
+ Label done;
+ Register scratch = x10;
+ __ Tbz(bound_argc, 0, &done);
+ // Store receiver.
+ __ Add(scratch, __ StackPointer(),
+ Operand(total_argc, LSL, kPointerSizeLog2));
+ __ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
+ __ Tbnz(total_argc, 0, &done);
+ // Store padding.
+ __ Str(padreg, MemOperand(scratch));
+ __ Bind(&done);
+ }
}
}
__ Bind(&no_bound_arguments);
@@ -2438,7 +2594,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
+ __ TestAndBranchIfAllClear(x4, Map::IsCallableBit::kMask, &non_callable);
// Check if target is a proxy and call CallProxy external builtin
__ Cmp(x5, JS_PROXY_TYPE);
@@ -2533,7 +2689,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x2, 1 << Map::kIsConstructor, &non_constructor);
+ __ TestAndBranchIfAllClear(x2, Map::IsConstructorBit::kMask,
+ &non_constructor);
// Only dispatch to bound functions after checking whether they are
// constructors.
@@ -2605,19 +2762,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_AbortJS");
- // ----------- S t a t e -------------
- // -- x1 : message as String object
- // -- lr : return address
- // -----------------------------------
- MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
- __ PushArgument(x1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
// ----------- S t a t e -------------
@@ -2651,14 +2795,16 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// 4 | num of | |
// | actual args | |
// |- - - - - - - - -| |
- // [5] | [padding] | |
+ // 5 | padding | |
// |-----------------+---- |
- // 5+pad | receiver | ^ |
+ // [6] | [padding] | ^ |
+ // |- - - - - - - - -| | |
+ // 6+pad | receiver | | |
// | (parameter 0) | | |
// |- - - - - - - - -| | |
- // 6+pad | parameter 1 | | |
+ // 7+pad | parameter 1 | | |
// |- - - - - - - - -| Frame slots ----> expected args
- // 7+pad | parameter 2 | | |
+ // 8+pad | parameter 2 | | |
// |- - - - - - - - -| | |
// | | | |
// ... | ... | | |
@@ -2671,7 +2817,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// | [undefined] | v <-- stack ptr v
// -----+-----------------+---------------------------------
//
- // There is an optional slot of padding to ensure stack alignment.
+ // There is an optional slot of padding above the receiver to ensure stack
+ // alignment of the arguments.
// If the number of expected arguments is larger than the number of actual
// arguments, the remaining expected slots will be filled with undefined.
@@ -2695,10 +2842,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register argc_unused_actual = x14;
Register scratch1 = x15, scratch2 = x16;
- // We need slots for the expected arguments, with two extra slots for the
- // number of actual arguments and the receiver.
+ // We need slots for the expected arguments, with one extra slot for the
+ // receiver.
__ RecordComment("-- Stack check --");
- __ Add(scratch1, argc_expected, 2);
+ __ Add(scratch1, argc_expected, 1);
Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
// Round up number of slots to be even, to maintain stack alignment.
@@ -2707,7 +2854,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kPointerSize);
- __ Mov(copy_to, jssp);
+ __ Mov(copy_to, __ StackPointer());
// Preparing the expected arguments is done in four steps, the order of
// which is chosen so we can use LDP/STP and avoid conditional branches as
@@ -2738,7 +2885,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bind(&enough_arguments);
// (2) Copy all of the actual arguments, or as many as we need.
+ Label skip_copy;
__ RecordComment("-- Copy actual arguments --");
+ __ Cbz(argc_to_copy, &skip_copy);
__ Add(copy_end, copy_to, Operand(argc_to_copy, LSL, kPointerSizeLog2));
__ Add(copy_from, fp, 2 * kPointerSize);
// Adjust for difference between actual and expected arguments.
@@ -2755,21 +2904,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Stp(scratch1, scratch2, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
__ Cmp(copy_end, copy_to);
__ B(hi, &copy_2_by_2);
+ __ Bind(&skip_copy);
- // (3) Store number of actual arguments and padding. The padding might be
- // unnecessary, in which case it will be overwritten by the receiver.
- __ RecordComment("-- Store number of args and padding --");
- __ SmiTag(scratch1, argc_actual);
- __ Stp(xzr, scratch1, MemOperand(fp, -4 * kPointerSize));
+ // (3) Store padding, which might be overwritten by the receiver, if it is not
+ // necessary.
+ __ RecordComment("-- Store padding --");
+ __ Str(padreg, MemOperand(fp, -5 * kPointerSize));
- // (4) Store receiver. Calculate target address from jssp to avoid checking
+ // (4) Store receiver. Calculate target address from the sp to avoid checking
// for padding. Storing the receiver will overwrite either the extra slot
// we copied with the actual arguments, if we did copy one, or the padding we
// stored above.
__ RecordComment("-- Store receiver --");
__ Add(copy_from, fp, 2 * kPointerSize);
__ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
- __ Str(scratch1, MemOperand(jssp, argc_expected, LSL, kPointerSizeLog2));
+ __ Str(scratch1,
+ MemOperand(__ StackPointer(), argc_expected, LSL, kPointerSizeLog2));
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
@@ -2805,10 +2955,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
- // Wasm code uses the csp. This builtin excepts to use the jssp.
- // Thus, move csp to jssp when entering this builtin (called from wasm).
- DCHECK(masm->StackPointer().is(jssp));
- __ Move(jssp, csp);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2833,9 +2979,6 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ PopDRegList(fp_regs);
__ PopXRegList(gp_regs);
}
- // Move back to csp land. jssp now has the same value as when entering this
- // function, but csp might have changed in the runtime call.
- __ Move(csp, jssp);
// Now jump to the instructions of the returned code object.
__ Jump(x8);
}