summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/mips64/builtins-mips64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/mips64/builtins-mips64.cc')
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc313
1 files changed, 190 insertions, 123 deletions
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 15fdfc3d7d..7756872b14 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -82,19 +82,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the Array function from the native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -103,9 +90,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, a1);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -120,6 +104,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// Tail call a stub.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -127,29 +112,33 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
+ // -- a1 : array function
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, a1);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, a4,
Operand(zero_reg));
- __ GetObjectType(a2, a3, a4);
+ __ GetObjectType(a2, t0, a4);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, a4,
Operand(MAP_TYPE));
}
- // Run the native code for the Array function called as a normal function.
- // Tail call a stub.
- __ mov(a3, a1);
+ // a2 is the AllocationSite - here undefined.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ // If a3 (new target) is undefined, then this is the 'Call' case, so move
+ // a1 (the constructor) to a3.
+ Label call;
+ __ Branch(&call, ne, a3, Operand(a2));
+ __ mov(a3, a1);
+
+ // Run the native code for the Array function called as a normal function.
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -280,7 +269,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
@@ -401,7 +390,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -445,13 +434,23 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ GetObjectType(sfi_data, scratch1, scratch1);
+ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
+ __ Ld(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -526,6 +525,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, a3, a0);
__ GetObjectType(a3, a3, a3);
__ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
Operand(BYTECODE_ARRAY_TYPE));
@@ -734,7 +734,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = a1;
Register optimized_code_entry = scratch1;
@@ -743,9 +743,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -779,12 +779,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ Ld(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -916,6 +914,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4);
__ Ld(a4, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
__ JumpIfNotSmi(a4, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@@ -1039,12 +1038,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
+ __ Ld(a5, FieldMemOperand(a4, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(a5, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mov(kInterpreterBytecodeArrayRegister, a5);
__ Ld(a5, FieldMemOperand(a4, DebugInfo::kFlagsOffset));
__ SmiUntag(a5);
- __ And(a5, a5, Operand(DebugInfo::kHasBreakInfo));
- __ Branch(&bytecode_array_loaded, eq, a5, Operand(zero_reg));
- __ Ld(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(a4, DebugInfo::kDebugBytecodeArrayOffset));
+ __ And(a5, a5, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ li(a4, Operand(debug_execution_mode));
+ __ Lb(a4, MemOperand(a4));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ Branch(&bytecode_array_loaded, eq, a4, Operand(a5));
+
+ __ push(closure);
+ __ push(feedback_vector);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ push(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ pop(kInterpreterBytecodeArrayRegister);
+ __ pop(feedback_vector);
+ __ pop(closure);
__ Branch(&bytecode_array_loaded);
}
@@ -1087,6 +1104,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1115,11 +1133,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1162,15 +1176,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(a2, t0);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
- __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1192,10 +1204,28 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ Ld(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Ld(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(t0, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister);
+ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
+ Operand(INTERPRETER_DATA_TYPE));
+
+ __ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Branch(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ li(t0, Operand(BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline)));
+
+ __ bind(&trampoline_loaded);
__ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1270,43 +1300,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -- a1 : target function (preserved for callee)
- // -----------------------------------
- Register closure = a1;
-
- // Get the feedback vector.
- Register feedback_vector = a2;
- __ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
- Operand(at));
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(a2);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(a2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ Sd(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ RecordWriteField(a1, JSFunction::kCodeOffset, a2, a4, kRAHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1314,6 +1310,76 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ li(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ // Avoid untagging the Smi by merging the shift
+ STATIC_ASSERT(kPointerSizeLog2 < kSmiShift);
+ __ dsrl(sfi_data, sfi_data, kSmiShift - kPointerSizeLog2);
+ __ Daddu(scratch1, scratch1, sfi_data);
+ __ Ld(sfi_data, MemOperand(scratch1));
+ __ Branch(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ Ld(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ Lhu(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ Branch(&check_is_code, ne, data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ Branch(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ Branch(&done, eq, data_type, Operand(CODE_TYPE));
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ Branch(&check_is_pre_parsed_scope_data, ne, data_type,
+ Operand(FIXED_ARRAY_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ Branch(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ Branch(&check_is_function_template_info, ne, data_type,
+ Operand(TUPLE2_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ Branch(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ Branch(&check_is_interpreter_data, ne, data_type,
+ Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData, data_type,
+ Operand(INTERPRETER_DATA_TYPE));
+ }
+ __ Ld(sfi_data, FieldMemOperand(
+ sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1336,12 +1402,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = a4;
__ Ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, t1);
- // If SFI points to anything other than CompileLazy, install that.
- __ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(t1, masm->CodeObject());
__ Branch(&gotta_call_runtime, eq, entry, Operand(t1));
@@ -1410,25 +1476,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ Ld(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(t1 != target && t1 != scratch0 && t1 != scratch1);
- CHECK(t3 != target && t3 != scratch0 && t3 != scratch1);
-
- __ Sd(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, t3, t1,
- kRAHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ Sd(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ mov(t3, target_builtin); // Write barrier clobbers t3 below.
@@ -2031,7 +2081,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that function is not a "classConstructor".
Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
@@ -2041,7 +2091,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3,
Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
@@ -2259,18 +2309,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- a1 : the constructor to call (checked to be a JSFunction)
// -- a3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(a1);
__ AssertFunction(a1);
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
- __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
+ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2280,6 +2339,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -- a3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(a1);
__ AssertBoundFunction(a1);
// Load [[BoundArguments]] into a2 and length of that into a4.
@@ -2372,16 +2432,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(a1, &non_constructor);
- // Dispatch based on instance type.
- __ GetObjectType(a1, t1, t2);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
-
// Check if target has a [[Construct]] internal method.
+ __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
__ And(t3, t3, Operand(Map::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+ // Dispatch based on instance type.
+ __ Lhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
@@ -2588,26 +2649,32 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = a0; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs =
- Register::ListOf<a0, a1, a2, a3, a4, a5, a6, a7>();
+ constexpr RegList gp_regs = Register::ListOf<a1, a2, a3, a4, a5, a6, a7>();
constexpr RegList fp_regs =
DoubleRegister::ListOf<f2, f4, f6, f8, f10, f12, f14>();
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(kContextRegister, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ MultiPopFPU(fp_regs);
__ MultiPop(gp_regs);
}
- // Now jump to the instructions of the returned code object.
- __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ // Finally, jump to the entrypoint.
+ __ Jump(v0);
}
#undef __