summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/mips/builtins-mips.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/mips/builtins-mips.cc')
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc581
1 files changed, 315 insertions, 266 deletions
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 2da3985606..24fe271cb3 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -396,8 +396,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, a2, Code::kHeaderSize - kHeapObjectTag);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -421,8 +420,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
- __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
@@ -443,11 +441,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
namespace {
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
- Label post_instantiation_deopt_entry;
-
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -457,8 +451,6 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// -- sp[...]: constructor arguments
// -----------------------------------
- Isolate* isolate = masm->isolate();
-
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
@@ -466,186 +458,251 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Preserve the incoming parameters on the stack.
__ SmiTag(a0);
__ Push(cp, a0);
-
- if (create_implicit_receiver) {
- // Allocate the new receiver object.
- __ Push(a1, a3);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ mov(t4, v0);
- __ Pop(a1, a3);
-
- // ----------- S t a t e -------------
- // -- a1: constructor function
- // -- a3: new target
- // -- t0: newly allocated object
- // -----------------------------------
-
- // Retrieve smi-tagged arguments count from the stack.
- __ lw(a0, MemOperand(sp));
- }
-
__ SmiUntag(a0);
- if (create_implicit_receiver) {
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(t4, t4);
- } else {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- }
-
- // Deoptimizer re-enters stub code here.
- __ bind(&post_instantiation_deopt_entry);
+ // The receiver for the builtin/api call.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
// Set up pointer to last argument.
- __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // a3: new target
- // t4: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ SmiTag(t4, a0);
+ __ mov(t3, a0);
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a3: new target
+ // -- t2: pointer to last argument
+ // -- t3: counter
+ // -- sp[0*kPointerSize]: the hole (receiver)
+ // -- sp[1*kPointerSize]: number of arguments (tagged)
+ // -- sp[2*kPointerSize]: context
+ // -----------------------------------
__ jmp(&entry);
__ bind(&loop);
- __ Lsa(t0, a2, t4, kPointerSizeLog2 - kSmiTagSize);
+ __ Lsa(t0, t2, t3, kPointerSizeLog2);
__ lw(t1, MemOperand(t0));
__ push(t1);
__ bind(&entry);
- __ Addu(t4, t4, Operand(-2));
- __ Branch(&loop, greater_equal, t4, Operand(zero_reg));
+ __ Addu(t3, t3, Operand(-1));
+ __ Branch(&loop, greater_equal, t3, Operand(zero_reg));
// Call the function.
- // a0: number of arguments
+ // a0: number of arguments (untagged)
// a1: constructor function
// a3: new target
ParameterCount actual(a0);
__ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
- }
-
// Restore context from the frame.
__ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: number of arguments (smi-tagged)
- __ JumpIfSmi(v0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a1, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ lw(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ lw(a1, MemOperand(sp));
- }
-
+ // Restore smi-tagged arguments count from the frame.
+ __ lw(a1, MemOperand(sp));
// Leave construct frame.
}
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label do_throw, dont_throw;
- __ JumpIfSmi(v0, &do_throw);
- __ GetObjectType(v0, a3, a3);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ Branch(&dont_throw, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
- __ bind(&do_throw);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ bind(&dont_throw);
- }
-
+ // Remove caller arguments from the stack and return.
__ Lsa(sp, sp, a1, kPointerSizeLog2 - 1);
__ Addu(sp, sp, kPointerSize);
- if (create_implicit_receiver) {
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
- }
__ Ret();
+}
- // Store offset of trampoline address for deoptimizer. This is the bailout
- // point after the receiver instantiation but before the function invocation.
- // We need to restore some registers in order to continue the above code.
- if (create_implicit_receiver && !is_api_function) {
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Generate_JSConstructStubGeneric(MacroAssembler* masm,
+ bool restrict_constructor_return) {
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- cp: context
+ // -- ra: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0, a1, a3);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- a1 and sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments (tagged)
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(t2,
+ FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ And(t2, t2,
+ Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ t2, t3);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
+ __ Branch(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(v0, Heap::kTheHoleValueRootIndex);
+
+ // ----------- S t a t e -------------
+ // -- v0: receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(a3);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(v0, v0);
// ----------- S t a t e -------------
- // -- a0 : newly allocated object
- // -- sp[0] : constructor function
+ // -- r3: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
- __ Pop(a1);
- __ Push(a0, a0);
-
- // Retrieve smi-tagged arguments count from the stack.
+ // Restore constructor function and argument count.
+ __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ lw(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(a0);
- // Retrieve the new target value from the stack. This was placed into the
- // frame description in place of the receiver by the optimizing compiler.
- __ Addu(a3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- __ Lsa(a3, a3, a0, kPointerSizeLog2);
- __ lw(a3, MemOperand(a3));
+ // Set up pointer to last argument.
+ __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(t3, a0);
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a3: new target
+ // -- t2: pointer to last argument
+ // -- t3: counter
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ Lsa(t0, t2, t3, kPointerSizeLog2);
+ __ lw(t1, MemOperand(t0));
+ __ push(t1);
+ __ bind(&entry);
+ __ Addu(t3, t3, Operand(-1));
+ __ Branch(&loop, greater_equal, t3, Operand(zero_reg));
- // Continue with constructor function invocation.
- __ jmp(&post_instantiation_deopt_entry);
+ // Call the function.
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // ----------- S t a t e -------------
+ // -- v0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // Restore the context from the frame.
+ __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, other_result, leave_frame;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfRoot(v0, Heap::kUndefinedValueRootIndex, &use_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(v0, &other_result);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, t2, t2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ __ bind(&other_result);
+ // The result is now neither undefined nor an object.
+ if (restrict_constructor_return) {
+ // Throw if constructor function is a class constructor
+ __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(t2,
+ FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ And(t2, t2,
+ Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ Branch(&use_receiver, eq, t2, Operand(zero_reg));
+ } else {
+ __ Branch(&use_receiver);
+ }
+
+ __ bind(&do_throw);
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(v0, Heap::kTheHoleValueRootIndex, &do_throw);
+
+ __ bind(&leave_frame);
+ // Restore smi-tagged arguments count from the frame.
+ __ lw(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
}
+ // Remove caller arguments from the stack and return.
+ __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(sp, sp, kPointerSize);
+ __ Ret();
}
} // namespace
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
+void Builtins::Generate_JSConstructStubGenericRestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, true);
+}
+void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -899,6 +956,38 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
}
+static void ReplaceClosureEntryWithOptimizedCode(
+ MacroAssembler* masm, Register optimized_code_entry, Register closure,
+ Register scratch1, Register scratch2, Register scratch3) {
+ Register native_context = scratch1;
+
+ // Store code entry in the closure.
+ __ Addu(optimized_code_entry, optimized_code_entry,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(optimized_code_entry,
+ FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
+
+ // Link the closure into the optimized function list.
+ __ lw(native_context, NativeContextMemOperand());
+ __ lw(scratch2,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ sw(scratch2,
+ FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
+ scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ sw(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Save closure before the write barrier.
+ __ mov(scratch2, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure,
+ scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ mov(closure, scratch2);
+}
+
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch;
@@ -939,6 +1028,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(a1);
+ // First check if there is optimized code in the feedback vector which we
+ // could call instead.
+ Label switch_to_optimized_code;
+ Register optimized_code_entry = t0;
+ __ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+ __ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
+ __ lw(optimized_code_entry,
+ FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ lw(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -1057,6 +1159,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(a1, t0, t1);
__ Jump(t0);
+
+ // If there is optimized code on the type feedback vector, check if it is good
+ // to run, and if so, self heal the closure and call the optimized code.
+ __ bind(&switch_to_optimized_code);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ Label gotta_call_runtime;
+
+ // Check if the optimized code is marked for deopt.
+ __ lw(t1,
+ FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
+ __ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, t1,
+ t2);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code is marked for deopt, bailout to the CompileLazy runtime
+ // function which will clear the feedback vector's optimized code slot.
+ __ bind(&gotta_call_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1170,8 +1295,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
@@ -1298,111 +1422,49 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -- a1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label gotta_call_runtime;
Label try_shared;
- Label loop_top, loop_bottom;
- Register argument_count = a0;
Register closure = a1;
- Register new_target = a3;
- Register map = a0;
Register index = a2;
// Do we have a valid feedback vector?
__ lw(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ lw(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime_no_stack);
-
- __ push(argument_count);
- __ push(new_target);
- __ push(closure);
-
- __ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ Branch(&try_shared, lt, index, Operand(Smi::FromInt(2)));
-
- // a3 : native context
- // a2 : length / index
- // a0 : optimized code map
- // stack[0] : new target
- // stack[4] : closure
- Register native_context = a3;
- __ lw(native_context, NativeContextMemOperand());
-
- __ bind(&loop_top);
- Register temp = a1;
- Register array_pointer = t1;
-
- // Does the native context match?
- __ sll(at, index, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(array_pointer, map, Operand(at));
- __ lw(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ Branch(&loop_bottom, ne, temp, Operand(native_context));
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
- // Code available?
+ // Is optimized code available in the feedback vector?
Register entry = t0;
- __ lw(entry,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ lw(entry, FieldMemOperand(
+ index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found code. Get it into the closure and return.
- __ pop(closure);
- // Store code entry in the closure.
- __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(closure, entry, t1);
+ // Found code, check if it is marked for deopt, if so call into runtime to
+ // clear the optimized code slot.
+ __ lw(t1, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
+ __ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
- // Link the closure into the optimized function list.
- // t0 : code entry
- // a3 : native context
- // a1 : closure
- __ lw(t1,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, a0,
- kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ sw(closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Save closure before the write barrier.
- __ mov(t1, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ mov(closure, t1);
- __ pop(new_target);
- __ pop(argument_count);
+ // Code is good, get it into the closure and tail call.
+ ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, t1, t2);
__ Jump(entry);
- __ bind(&loop_bottom);
- __ Subu(index, index,
- Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
-
- // We found no code.
+ // We found no optimized code.
__ bind(&try_shared);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ lbu(t1, FieldMemOperand(entry,
SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ And(t1, t1,
Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ Branch(&gotta_call_runtime_no_stack, ne, t1, Operand(zero_reg));
+ __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(t1, masm->CodeObject());
- __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1));
+ __ Branch(&gotta_call_runtime, eq, entry, Operand(t1));
// Install the SFI's code entry.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1411,10 +1473,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Jump(entry);
__ bind(&gotta_call_runtime);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
@@ -1551,8 +1609,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ PushStandardFrame(a1);
// Jump to point after the code-age stub.
- __ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength));
- __ Jump(a0);
+ __ Jump(a0, kNoCodeAgeSequenceLength);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
@@ -2111,68 +2168,64 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- a1 : the target to call (can be any Object)
- // -- a2 : start index (to support rest parameters)
- // -- ra : return address.
- // -- sp[0] : thisArgument
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a3 : the new.target (for [[Construct]] calls)
+ // -- a1 : the target to call (can be any Object)
+ // -- a2 : start index (to support rest parameters)
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
- __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&arguments_adaptor, eq, a0,
+ __ lw(t3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(t2, MemOperand(t3, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Branch(&arguments_adaptor, eq, t2,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
{
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(a0, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a0,
- FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(a3, fp);
+ __ lw(t2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(t2, FieldMemOperand(t2, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t2,
+ FieldMemOperand(t2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(t3, fp);
}
__ Branch(&arguments_done);
__ bind(&arguments_adaptor);
{
// Just get the length from the ArgumentsAdaptorFrame.
- __ lw(a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lw(t2, MemOperand(t3, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
- Label stack_empty, stack_done, stack_overflow;
- __ SmiUntag(a0);
- __ Subu(a0, a0, a2);
- __ Branch(&stack_empty, le, a0, Operand(zero_reg));
+ Label stack_done, stack_overflow;
+ __ SmiUntag(t2);
+ __ Subu(t2, t2, a2);
+ __ Branch(&stack_done, le, t2, Operand(zero_reg));
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
+ Generate_StackOverflowCheck(masm, t2, t0, t1, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
- __ mov(a2, a0);
+ __ Addu(a0, a0, t2);
__ bind(&loop);
{
- __ Lsa(at, a3, a2, kPointerSizeLog2);
+ __ Lsa(at, t3, t2, kPointerSizeLog2);
__ lw(at, MemOperand(at, 1 * kPointerSize));
__ push(at);
- __ Subu(a2, a2, Operand(1));
- __ Branch(&loop, ne, a2, Operand(zero_reg));
+ __ Subu(t2, t2, Operand(1));
+ __ Branch(&loop, ne, t2, Operand(zero_reg));
}
}
}
__ Branch(&stack_done);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ li(a0, Operand(0));
- }
__ bind(&stack_done);
+ // Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -2471,8 +2524,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
masm->isolate())));
__ lw(at, MemOperand(at));
- __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, Code::kHeaderSize - kHeapObjectTag);
}
// static
@@ -2696,8 +2748,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
}
// static
@@ -2787,8 +2838,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
__ lw(at, MemOperand(at));
- __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, Code::kHeaderSize - kHeapObjectTag);
}
// static
@@ -3060,8 +3110,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ MultiPop(gp_regs);
}
// Now jump to the instructions of the returned code object.
- __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
}
#undef __