summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/arm/builtins-arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/arm/builtins-arm.cc')
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc129
1 files changed, 74 insertions, 55 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 2b2b9c2b34..1ea0bb733b 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -156,13 +156,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -190,6 +183,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ pop(r0);
__ SmiUntag(r0, r0);
}
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
@@ -297,7 +291,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kDerivedConstructorMask));
+ __ tst(r4, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ b(ne, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -417,7 +411,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ tst(r4, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -559,9 +553,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(r3, r1);
__ Move(r1, r4);
- __ ldr(scratch, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(scratch);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
__ bind(&prepare_step_in_if_stepping);
@@ -828,9 +823,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ add(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ add(r2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(r2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -844,10 +840,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -857,11 +856,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmp(bytecode, Operand(0x1));
- __ b(hi, &load_size);
+ __ b(hi, &process_bytecode);
__ b(eq, &extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -869,7 +868,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
+ __ jmp(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -878,8 +877,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ b(if_return, eq);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ add(bytecode_offset, bytecode_offset, scratch1);
}
@@ -907,7 +914,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1008,11 +1015,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ ldrb(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ldr(r4, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
- kPointerSizeLog2));
- __ Call(r4);
+ __ ldr(
+ kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, r4, LSL, kPointerSizeLog2));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1025,16 +1033,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ cmp(r1, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ b(&do_return, eq);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r1, r2,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1215,13 +1220,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- __ ldr(scratch, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
- kPointerSizeLog2));
- __ Jump(scratch);
+ __ ldrb(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, scratch, LSL,
+ kPointerSizeLog2));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1237,14 +1243,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r1, r2,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1262,7 +1274,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = r2;
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1275,7 +1287,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1304,7 +1320,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1491,9 +1507,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
namespace {
@@ -1978,7 +1995,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ b(ne, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2449,9 +2466,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r0 : expected number of arguments
// r1 : function (passed through to callee)
// r3 : new target (passed through to callee)
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2464,9 +2482,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
__ bind(&stack_overflow);
{