summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/arm/builtins-arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/arm/builtins-arm.cc')
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc559
1 files changed, 156 insertions, 403 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index ec1b37e94f..a5219bf070 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -28,7 +28,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExternalReference::Create(address).address()) &
1);
#endif
- __ Move(r5, ExternalReference::Create(address));
+ __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -39,57 +39,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
-namespace {
-
-void AdaptorWithExitFrameType(MacroAssembler* masm,
- Builtins::ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments excluding receiver
- // -- r1 : target
- // -- r3 : new.target
- // -- r5 : entry point
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument
- // -- sp[4 * argc] : receiver
- // -----------------------------------
- __ AssertFunction(r1);
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // CEntry expects r0 to contain the number of arguments including the
- // receiver and the extra arguments.
- __ add(r0, r0, Operand(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
-
- // Insert extra arguments.
- __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
- __ SmiTag(r0);
- __ Push(r0, r1, r3);
- __ SmiUntag(r0);
-
- // Jump to the C entry runtime stub directly here instead of using
- // JumpToExternalReference. We have already loaded entry point to r5
- // in Generate_adaptor.
- __ mov(r1, r5);
- Handle<Code> code =
- CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- exit_frame_type == Builtins::BUILTIN_EXIT);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-} // namespace
-
-void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, EXIT);
-}
-
-void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -111,39 +60,8 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// function.
// tail call a stub
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : array function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ ldr(r7, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ SmiTst(r7);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r7, r8, r9, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // r2 is the AllocationSite - here undefined.
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- // If r3 (new target) is undefined, then this is the 'Call' case, so move
- // r1 (the constructor) to r3.
- __ cmp(r3, r2);
- __ mov(r3, r1, LeaveCC, eq);
-
- // Run the native code for the Array function called as a normal function.
- // tail call a stub
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -494,20 +412,25 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- sp[0] : generator receiver
// -----------------------------------
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
+ // Copy the function arguments from the generator object's register file.
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldrh(r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldr(r2,
+ FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
+ __ mov(r6, Operand(0));
+
__ bind(&loop);
- __ sub(r3, r3, Operand(1), SetCC);
- __ b(mi, &done_loop);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ cmp(r6, r3);
+ __ b(ge, &done_loop);
+ __ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2));
+ __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ Push(scratch);
+ __ add(r6, r6, Operand(1));
__ b(&loop);
+
__ bind(&done_loop);
}
@@ -523,8 +446,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(
- r0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldrh(r0, FieldMemOperand(
+ r0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -906,17 +829,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
__ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
- __ ldr(r4, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
- __ SmiTst(r4);
- __ b(ne, &maybe_load_debug_bytecode_array);
- __ bind(&bytecode_array_loaded);
// Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector,
@@ -1027,37 +945,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
-
- // Load debug copy of the bytecode array if it exists.
- // kInterpreterBytecodeArrayRegister is already loaded with
- // SharedFunctionInfo::kFunctionDataOffset.
- __ bind(&maybe_load_debug_bytecode_array);
- __ ldr(r9, FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
- __ JumpIfRoot(r9, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
-
- __ mov(kInterpreterBytecodeArrayRegister, r9);
- __ ldr(r9, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
- __ SmiUntag(r9);
- __ And(r9, r9, Operand(DebugInfo::kDebugExecutionMode));
-
- ExternalReference debug_execution_mode =
- ExternalReference::debug_execution_mode_address(masm->isolate());
- __ mov(r4, Operand(debug_execution_mode));
- __ ldrsb(r4, MemOperand(r4));
- STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
- static_cast<int>(DebugInfo::kSideEffects));
- __ cmp(r4, r9);
- __ b(eq, &bytecode_array_loaded);
-
- __ push(closure);
- __ push(feedback_vector);
- __ push(kInterpreterBytecodeArrayRegister);
- __ push(closure);
- __ CallRuntime(Runtime::kDebugApplyInstrumentation);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ pop(feedback_vector);
- __ pop(closure);
- __ b(&bytecode_array_loaded);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1160,8 +1047,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
- ArrayConstructorStub array_constructor_stub(masm->isolate());
- __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r0, r1, and r3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1278,208 +1165,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to CompileLazy.
- __ Move(r2, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ str(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ RecordWriteField(r1, JSFunction::kCodeOffset, r2, r4, kLRHasNotBeenSaved,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // Jump to compile lazy.
- Generate_CompileLazy(masm);
-}
-
-static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
- Register scratch1) {
- // Figure out the SFI's code object.
- Label done;
- Label check_is_bytecode_array;
- Label check_is_exported_function_data;
- Label check_is_fixed_array;
- Label check_is_pre_parsed_scope_data;
- Label check_is_function_template_info;
- Label check_is_interpreter_data;
-
- Register data_type = scratch1;
-
- // IsSmi: Is builtin
- __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
- __ Move(scratch1, ExternalReference::builtins_address(masm->isolate()));
- __ ldr(sfi_data, MemOperand::PointerAddressFromSmiKey(scratch1, sfi_data));
- __ b(&done);
-
- // Get map for subsequent checks.
- __ bind(&check_is_bytecode_array);
- __ ldr(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
- __ ldrh(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
-
- // IsBytecodeArray: Interpret bytecode
- __ cmp(data_type, Operand(BYTECODE_ARRAY_TYPE));
- __ b(ne, &check_is_exported_function_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
- __ b(&done);
-
- // IsWasmExportedFunctionData: Use the wrapper code
- __ bind(&check_is_exported_function_data);
- __ cmp(data_type, Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
- __ b(ne, &check_is_fixed_array);
- __ ldr(sfi_data, FieldMemOperand(
- sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
- __ b(&done);
-
- // IsFixedArray: Instantiate using AsmWasmData
- __ bind(&check_is_fixed_array);
- __ cmp(data_type, Operand(FIXED_ARRAY_TYPE));
- __ b(ne, &check_is_pre_parsed_scope_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
- __ b(&done);
-
- // IsPreParsedScopeData: Compile lazy
- __ bind(&check_is_pre_parsed_scope_data);
- __ cmp(data_type, Operand(TUPLE2_TYPE));
- __ b(ne, &check_is_function_template_info);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ b(&done);
-
- // IsFunctionTemplateInfo: API call
- __ bind(&check_is_function_template_info);
- __ cmp(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
- __ b(ne, &check_is_interpreter_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
- __ b(&done);
-
- // IsInterpreterData: Interpret bytecode
- __ bind(&check_is_interpreter_data);
- if (FLAG_debug_code) {
- __ cmp(data_type, Operand(INTERPRETER_DATA_TYPE));
- __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
- }
- __ ldr(
- sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
-
- __ bind(&done);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee)
- // -- r3 : new target (preserved for callee)
- // -- r1 : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime;
-
- Register closure = r1;
- Register feedback_vector = r2;
-
- // Do we have a valid feedback vector?
- __ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime);
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
-
- // We found no optimized code. Infer the code object needed for the SFI.
- Register entry = r4;
- __ ldr(entry,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(entry,
- FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoCode(masm, entry, r5);
-
- // If code entry points to anything other than CompileLazy, install that.
- __ Move(r5, masm->CodeObject());
- __ cmp(entry, r5);
- __ b(eq, &gotta_call_runtime);
-
- // Install the SFI's code entry.
- __ str(entry, FieldMemOperand(closure, JSFunction::kCodeOffset));
- __ mov(r9, entry); // Write barrier clobbers r9 below.
- __ RecordWriteField(closure, JSFunction::kCodeOffset, r9, r5,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(entry);
-
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
-void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee)
- // -- r3 : new target (preserved for callee)
- // -- r1 : target function (preserved for callee)
- // -----------------------------------
-
- Label deserialize_in_runtime;
-
- Register target = r1; // Must be preserved
- Register scratch0 = r2;
- Register scratch1 = r4;
-
- CHECK(scratch0 != r0 && scratch0 != r3 && scratch0 != r1);
- CHECK(scratch1 != r0 && scratch1 != r3 && scratch1 != r1);
- CHECK(scratch0 != scratch1);
-
- // Load the builtin id for lazy deserialization from SharedFunctionInfo.
-
- __ AssertFunction(target);
- __ ldr(scratch0,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ ldr(scratch1,
- FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
- __ AssertSmi(scratch1);
-
- // The builtin may already have been deserialized. If that is the case, it is
- // stored in the builtins table, and we can copy to correct code object to
- // both the shared function info and function without calling into runtime.
- //
- // Otherwise, we need to call into runtime to deserialize.
-
- {
- // Load the code object at builtins_table[builtin_id] into scratch1.
-
- __ SmiUntag(scratch1);
- __ Move(scratch0, ExternalReference::builtins_address(masm->isolate()));
- __ ldr(scratch1, MemOperand(scratch0, scratch1, LSL, kPointerSizeLog2));
-
- // Check if the loaded code object has already been deserialized. This is
- // the case iff it does not equal DeserializeLazy.
-
- __ Move(scratch0, masm->CodeObject());
- __ cmp(scratch1, scratch0);
- __ b(eq, &deserialize_in_runtime);
- }
-
- {
- // If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over to the target function.
-
- Register target_builtin = scratch1;
-
- __ str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
- __ mov(r9, target_builtin); // Write barrier clobbers r9 below.
- __ RecordWriteField(target, JSFunction::kCodeOffset, r9, r5,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // All copying is done. Jump to the deserialized code object.
-
- __ add(target_builtin, target_builtin,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(target_builtin);
- }
-
- __ bind(&deserialize_in_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1892,10 +1577,27 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- r4 : len (number of elements to push from args)
// -- r3 : new.target (for [[Construct]])
// -----------------------------------
- __ AssertFixedArray(r2);
-
Register scratch = r8;
+ if (masm->emit_debug_code()) {
+ // Allow r2 to be a FixedArray, or a FixedDoubleArray if r4 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(r2);
+ __ ldr(scratch, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldrh(r6, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ cmp(r6, Operand(FIXED_ARRAY_TYPE));
+ __ b(eq, &ok);
+ __ cmp(r6, Operand(FIXED_DOUBLE_ARRAY_TYPE));
+ __ b(ne, &fail);
+ __ cmp(r4, Operand(0));
+ __ b(eq, &ok);
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
+
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@@ -1977,8 +1679,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r5, FieldMemOperand(
- r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldrh(r5, FieldMemOperand(
+ r5, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r4, fp);
}
__ b(&arguments_done);
@@ -2108,8 +1810,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- __ ldr(r2,
- FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldrh(r2,
+ FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(r0);
ParameterCount expected(r2);
__ InvokeFunctionCode(r1, no_reg, expected, actual, JUMP_FUNCTION);
@@ -2378,42 +2080,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : requested object size (untagged)
- // -- lr : return address
- // -----------------------------------
- __ SmiTag(r1);
- __ Push(r1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : requested object size (untagged)
- // -- lr : return address
- // -----------------------------------
- __ SmiTag(r1);
- __ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ Push(r1, r2);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : message_id as Smi
- // -- lr : return address
- // -----------------------------------
- __ Push(r1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : actual number of arguments
@@ -2425,10 +2091,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ cmp(r0, r2);
- __ b(lt, &too_few);
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
+ __ cmp(r0, r2);
+ __ b(lt, &too_few);
Register scratch = r5;
@@ -2547,29 +2213,36 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in r4 by the jump table trampoline.
+ // Convert to Smi for the runtime call.
+ __ SmiTag(r4, r4);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf<r0, r1, r2>();
+ constexpr RegList gp_regs = Register::ListOf<r0, r1, r2, r3>();
constexpr DwVfpRegister lowest_fp_reg = d0;
constexpr DwVfpRegister highest_fp_reg = d7;
__ stm(db_w, sp, gp_regs);
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
- // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ // Pass instance and function index as explicit arguments to the runtime
+ // function.
__ push(kWasmInstanceRegister);
+ __ push(r4);
+ // Load the correct CEntry builtin from the instance object.
+ __ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(cp, Smi::kZero);
- __ CallRuntime(Runtime::kWasmCompileLazy);
- // The entrypoint address is the first return value.
+ __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, r2);
+ // The entrypoint address is the return value.
__ mov(r8, kReturnRegister0);
- // The WASM instance is the second return value.
- __ mov(kWasmInstanceRegister, kReturnRegister1);
// Restore registers.
__ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
@@ -2741,6 +2414,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label negate, done;
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
UseScratchRegisterScope temps(masm);
Register result_reg = r7;
Register double_low = GetRegisterThatIsNotOneOf(result_reg);
@@ -2832,20 +2506,20 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent == r2);
const LowDwVfpRegister double_base = d0;
const LowDwVfpRegister double_exponent = d1;
const LowDwVfpRegister double_result = d2;
const LowDwVfpRegister double_scratch = d3;
const SwVfpRegister single_scratch = s6;
- const Register scratch = r9;
- const Register scratch2 = r4;
+ // Avoid using Registers r0-r3 as they may be needed when calling to C if the
+ // ABI is softfloat.
+ const Register integer_exponent = r4;
+ const Register scratch = r5;
Label call_runtime, done, int_exponent;
// Detect integer exponents stored as double.
- __ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
+ __ TryDoubleToInt32Exact(integer_exponent, double_exponent, double_scratch);
__ b(eq, &int_exponent);
__ push(lr);
@@ -2862,16 +2536,13 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
// Calculate power with integer exponent.
__ bind(&int_exponent);
- // Get two copies of exponent in the registers scratch and exponent.
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
-
__ vmov(double_scratch, double_base); // Back up base.
- __ vmov(double_result, Double(1.0), scratch2);
+ __ vmov(double_result, Double(1.0), scratch);
// Get absolute value of exponent.
- __ cmp(scratch, Operand::Zero());
- __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, mi);
+ __ cmp(integer_exponent, Operand::Zero());
+ __ mov(scratch, integer_exponent);
+ __ rsb(scratch, integer_exponent, Operand::Zero(), LeaveCC, mi);
Label while_true;
__ bind(&while_true);
@@ -2880,7 +2551,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ vmul(double_scratch, double_scratch, double_scratch, ne);
__ b(ne, &while_true);
- __ cmp(exponent, Operand::Zero());
+ __ cmp(integer_exponent, Operand::Zero());
__ b(ge, &done);
__ vmov(double_scratch, Double(1.0), scratch);
__ vdiv(double_result, double_scratch, double_result);
@@ -2890,7 +2561,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ b(ne, &done);
// double_exponent may not containe the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
- __ vmov(single_scratch, exponent);
+ __ vmov(single_scratch, integer_exponent);
__ vcvt_f64_s32(double_exponent, single_scratch);
// Returning or bailing out.
@@ -2908,6 +2579,88 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ __ cmp(r0, Operand(1));
+
+ __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET, lo);
+
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
+ __ Jump(code, RelocInfo::CODE_TARGET, hi);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ ldr(r3, MemOperand(sp, 0));
+ __ cmp(r3, Operand::Zero());
+
+ __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
+ masm->isolate(), GetHoleyElementsKind(kind))
+ .code(),
+ RelocInfo::CODE_TARGET, ne);
+ }
+
+ __ Jump(
+ CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+}
+
+} // namespace
+
+void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- r1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a nullptr and a Smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(r3);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmp(r3, Operand(PACKED_ELEMENTS));
+ __ b(eq, &done);
+ __ cmp(r3, Operand(HOLEY_ELEMENTS));
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmp(r3, Operand(PACKED_ELEMENTS));
+ __ b(eq, &fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+}
+
#undef __
} // namespace internal