aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/arm/builtins-arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/arm/builtins-arm.cc')
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc66
1 files changed, 38 insertions, 28 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 6c5a59ff8c..e0a5a90978 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -73,15 +73,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee)
// -- r1 : target function (preserved for callee)
// -- r3 : new target (preserved for callee)
// -----------------------------------
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push the number of arguments to the callee.
- __ SmiTag(r0);
- __ push(r0);
// Push a copy of the target function and the new target.
__ push(r1);
__ push(r3);
@@ -94,8 +90,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
// Restore target function and new target.
__ pop(r3);
__ pop(r1);
- __ pop(r0);
- __ SmiUntag(r0, r0);
}
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ JumpCodeObject(r2);
@@ -103,6 +97,20 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch, Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ __ b(le, stack_overflow); // Signed comparison.
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -115,6 +123,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Register scratch = r2;
+ Label stack_overflow;
+
+ Generate_StackOverflowCheck(masm, r0, scratch, &stack_overflow);
+
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
@@ -170,20 +182,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ add(sp, sp, Operand(scratch, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
-}
-void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch, Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(scratch, RootIndex::kRealStackLimit);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
- __ b(le, stack_overflow); // Signed comparison.
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ bkpt(0); // Unreachable code.
+ }
}
} // namespace
@@ -880,13 +885,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee if needed, and caller)
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(
- !AreAliased(feedback_vector, r0, r1, r3, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, r1, r3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -1071,8 +1074,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ CompareRoot(feedback_vector, RootIndex::kUndefinedValue);
- __ b(eq, &push_stack_frame);
+ __ ldr(r4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ ldrh(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
+ __ b(ne, &push_stack_frame);
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1092,10 +1097,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Reset code age.
- __ mov(r9, Operand(BytecodeArray::kNoAgeBytecodeAge));
- __ strb(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kBytecodeAgeOffset));
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
+ // 8-bit fields next to each other, so we could just optimize by writing a
+ // 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ mov(r9, Operand(0));
+ __ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOSRNestingLevelOffset));
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,