diff options
author | Ali Ijaz Sheikh <ofrobots@google.com> | 2016-04-07 14:06:55 -0700 |
---|---|---|
committer | Ali Ijaz Sheikh <ofrobots@google.com> | 2016-04-14 10:03:39 -0700 |
commit | 52af5c4eebf4de8638aef0338bd826656312a02a (patch) | |
tree | 628dc9fb0b558c3a73a2160706fef368876fe548 /deps/v8/src/arm64 | |
parent | 6e3e8acc7cc7ebd3d67db5ade1247b8b558efe09 (diff) | |
download | android-node-v8-52af5c4eebf4de8638aef0338bd826656312a02a.tar.gz android-node-v8-52af5c4eebf4de8638aef0338bd826656312a02a.tar.bz2 android-node-v8-52af5c4eebf4de8638aef0338bd826656312a02a.zip |
deps: upgrade V8 to 5.0.71.32
* Pick up the branch head for V8 5.0 stable [1]
* Edit v8 gitignore to allow trace_event copy
* Update V8 DEP trace_event as per deps/v8/DEPS [2]
[1] https://chromium.googlesource.com/v8/v8.git/+/3c67831
[2] https://chromium.googlesource.com/chromium/src/base/trace_event/common/+/4b09207e447ae5bd34643b4c6321bee7b76d35f9
Ref: https://github.com/nodejs/node/pull/5945
PR-URL: https://github.com/nodejs/node/pull/6111
Reviewed-By: targos - Michaƫl Zasso <mic.besace@gmail.com>
Reviewed-By: bnoordhuis - Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: indutny - Fedor Indutny <fedor.indutny@gmail.com>
Diffstat (limited to 'deps/v8/src/arm64')
-rw-r--r-- | deps/v8/src/arm64/assembler-arm64-inl.h | 22 | ||||
-rw-r--r-- | deps/v8/src/arm64/assembler-arm64.h | 4 | ||||
-rw-r--r-- | deps/v8/src/arm64/builtins-arm64.cc | 638 | ||||
-rw-r--r-- | deps/v8/src/arm64/code-stubs-arm64.cc | 1414 | ||||
-rw-r--r-- | deps/v8/src/arm64/cpu-arm64.cc | 5 | ||||
-rw-r--r-- | deps/v8/src/arm64/deoptimizer-arm64.cc | 25 | ||||
-rw-r--r-- | deps/v8/src/arm64/interface-descriptors-arm64.cc | 64 | ||||
-rw-r--r-- | deps/v8/src/arm64/macro-assembler-arm64.cc | 133 | ||||
-rw-r--r-- | deps/v8/src/arm64/macro-assembler-arm64.h | 28 | ||||
-rw-r--r-- | deps/v8/src/arm64/simulator-arm64.cc | 61 | ||||
-rw-r--r-- | deps/v8/src/arm64/utils-arm64.h | 13 |
11 files changed, 1388 insertions, 1019 deletions
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h index d7769791ef..aeca563c37 100644 --- a/deps/v8/src/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/arm64/assembler-arm64-inl.h @@ -731,8 +731,8 @@ void RelocInfo::set_target_object(Object* target, if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && target->IsHeapObject()) { - host()->GetHeap()->incremental_marking()->RecordWrite( - host(), &Memory::Object_at(pc_), HeapObject::cast(target)); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target)); } } @@ -853,24 +853,6 @@ void RelocInfo::WipeOut() { } -bool RelocInfo::IsPatchedReturnSequence() { - // The sequence must be: - // ldr ip0, [pc, #offset] - // blr ip0 - // See arm64/debug-arm64.cc DebugCodegen::PatchDebugBreakSlot - Instruction* i1 = reinterpret_cast<Instruction*>(pc_); - Instruction* i2 = i1->following(); - return i1->IsLdrLiteralX() && (i1->Rt() == kIp0Code) && - i2->IsBranchAndLinkToRegister() && (i2->Rn() == kIp0Code); -} - - -bool RelocInfo::IsPatchedDebugBreakSlotSequence() { - Instruction* current_instr = reinterpret_cast<Instruction*>(pc_); - return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP); -} - - void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h index 5854704b68..47786eb710 100644 --- a/deps/v8/src/arm64/assembler-arm64.h +++ b/deps/v8/src/arm64/assembler-arm64.h @@ -369,6 +369,8 @@ bool AreSameSizeAndType(const CPURegister& reg1, typedef FPRegister DoubleRegister; +// TODO(arm64) Define SIMD registers. +typedef FPRegister Simd128Register; // ----------------------------------------------------------------------------- // Lists of registers. @@ -925,7 +927,7 @@ class Assembler : public AssemblerBase { // Record a deoptimization reason that can be used by a log or cpu profiler. // Use --trace-deopt to enable. - void RecordDeoptReason(const int reason, const SourcePosition position); + void RecordDeoptReason(const int reason, int raw_position); int buffer_space() const; diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc index b6bae4ad0e..11f66a4ef4 100644 --- a/deps/v8/src/arm64/builtins-arm64.cc +++ b/deps/v8/src/arm64/builtins-arm64.cc @@ -138,6 +138,97 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { // static +void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) { + // ----------- S t a t e ------------- + // -- x0 : number of arguments + // -- lr : return address + // -- sp[(argc - n) * 8] : arg[n] (zero-based) + // -- sp[(argc + 1) * 8] : receiver + // ----------------------------------- + ASM_LOCATION("Builtins::Generate_MathMaxMin"); + + Heap::RootListIndex const root_index = + (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex + : Heap::kMinusInfinityValueRootIndex; + + // Load the accumulator with the default return value (either -Infinity or + // +Infinity), with the tagged value in x1 and the double value in d1. + __ LoadRoot(x1, root_index); + __ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset)); + + // Remember how many slots to drop (including the receiver). + __ Add(x4, x0, 1); + + Label done_loop, loop; + __ Bind(&loop); + { + // Check if all parameters done. + __ Subs(x0, x0, 1); + __ B(lt, &done_loop); + + // Load the next parameter tagged value into x2. + __ Peek(x2, Operand(x0, LSL, kPointerSizeLog2)); + + // Load the double value of the parameter into d2, maybe converting the + // parameter to a number first using the ToNumberStub if necessary. + Label convert_smi, convert_number, done_convert; + __ JumpIfSmi(x2, &convert_smi); + __ JumpIfHeapNumber(x2, &convert_number); + { + // Parameter is not a Number, use the ToNumberStub to convert it. + FrameScope scope(masm, StackFrame::INTERNAL); + __ SmiTag(x0); + __ SmiTag(x4); + __ Push(x0, x1, x4); + __ Mov(x0, x2); + ToNumberStub stub(masm->isolate()); + __ CallStub(&stub); + __ Mov(x2, x0); + __ Pop(x4, x1, x0); + { + // Restore the double accumulator value (d1). + Label done_restore; + __ SmiUntagToDouble(d1, x1, kSpeculativeUntag); + __ JumpIfSmi(x1, &done_restore); + __ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset)); + __ Bind(&done_restore); + } + __ SmiUntag(x4); + __ SmiUntag(x0); + } + __ AssertNumber(x2); + __ JumpIfSmi(x2, &convert_smi); + + __ Bind(&convert_number); + __ Ldr(d2, FieldMemOperand(x2, HeapNumber::kValueOffset)); + __ B(&done_convert); + + __ Bind(&convert_smi); + __ SmiUntagToDouble(d2, x2); + __ Bind(&done_convert); + + // We can use a single fmin/fmax for the operation itself, but we then need + // to work out which HeapNumber (or smi) the result came from. + __ Fmov(x11, d1); + if (kind == MathMaxMinKind::kMin) { + __ Fmin(d1, d1, d2); + } else { + DCHECK(kind == MathMaxMinKind::kMax); + __ Fmax(d1, d1, d2); + } + __ Fmov(x10, d1); + __ Cmp(x10, x11); + __ Csel(x1, x1, x2, eq); + __ B(&loop); + } + + __ Bind(&done_loop); + __ Mov(x0, x1); + __ Drop(x4); + __ Ret(); +} + +// static void Builtins::Generate_NumberConstructor(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- x0 : number of arguments @@ -229,8 +320,9 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) { __ bind(&new_object); { FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(x2, x1, x3); // first argument, constructor, new target - __ CallRuntime(Runtime::kNewObject); + __ Push(x2); // first argument + FastNewObjectStub stub(masm->isolate()); + __ CallStub(&stub); __ Pop(x2); } __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset)); @@ -356,48 +448,49 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) { __ bind(&new_object); { FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(x2, x1, x3); // first argument, constructor, new target - __ CallRuntime(Runtime::kNewObject); + __ Push(x2); // first argument + FastNewObjectStub stub(masm->isolate()); + __ CallStub(&stub); __ Pop(x2); } __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset)); __ Ret(); } +static void GenerateTailCallToSharedCode(MacroAssembler* masm) { + __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); + __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset)); + __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag); + __ Br(x2); +} -static void CallRuntimePassFunction(MacroAssembler* masm, - Runtime::FunctionId function_id) { +static void GenerateTailCallToReturnedCode(MacroAssembler* masm, + Runtime::FunctionId function_id) { // ----------- S t a t e ------------- + // -- x0 : argument count (preserved for callee) // -- x1 : target function (preserved for callee) // -- x3 : new target (preserved for callee) // ----------------------------------- + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Push a copy of the target function and the new target. + // Push another copy as a parameter to the runtime call. + __ SmiTag(x0); + __ Push(x0, x1, x3, x1); - FrameScope scope(masm, StackFrame::INTERNAL); - // Push a copy of the target function and the new target. - // Push another copy as a parameter to the runtime call. - __ Push(x1, x3, x1); - - __ CallRuntime(function_id, 1); - - // Restore target function and new target. - __ Pop(x3, x1); -} + __ CallRuntime(function_id, 1); + __ Move(x2, x0); + // Restore target function and new target. + __ Pop(x3, x1, x0); + __ SmiUntag(x0); + } -static void GenerateTailCallToSharedCode(MacroAssembler* masm) { - __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); - __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset)); __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag); __ Br(x2); } -static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { - __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag); - __ Br(x0); -} - - void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { // Checking whether the queued function is ready for install is optional, // since we come across interrupts and stack checks elsewhere. However, not @@ -408,8 +501,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex); __ B(hs, &ok); - CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); - GenerateTailCallToReturnedCode(masm); + GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); __ Bind(&ok); GenerateTailCallToSharedCode(masm); @@ -418,7 +510,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { static void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function, - bool create_implicit_receiver) { + bool create_implicit_receiver, + bool check_derived_construct) { // ----------- S t a t e ------------- // -- x0 : number of arguments // -- x1 : constructor function @@ -448,148 +541,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ Push(allocation_site, argc); if (create_implicit_receiver) { - // sp[0]: new.target - // sp[1]: Constructor function. - // sp[2]: number of arguments (smi-tagged) - // sp[3]: allocation site - // Try to allocate the object without transitioning into C code. If any of - // the preconditions is not met, the code bails out to the runtime call. - Label rt_call, allocated; - if (FLAG_inline_new) { - // Verify that the new target is a JSFunction. - __ JumpIfNotObjectType(new_target, x10, x11, JS_FUNCTION_TYPE, - &rt_call); - - // Load the initial map and verify that it is in fact a map. - Register init_map = x2; - __ Ldr(init_map, - FieldMemOperand(new_target, - JSFunction::kPrototypeOrInitialMapOffset)); - __ JumpIfSmi(init_map, &rt_call); - __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call); - - // Fall back to runtime if the expected base constructor and base - // constructor differ. - __ Ldr(x10, - FieldMemOperand(init_map, Map::kConstructorOrBackPointerOffset)); - __ Cmp(constructor, x10); - __ B(ne, &rt_call); - - // Check that the constructor is not constructing a JSFunction (see - // comments in Runtime_NewObject in runtime.cc). In which case the - // initial - // map's instance type would be JS_FUNCTION_TYPE. - __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE); - __ B(eq, &rt_call); - - // Now allocate the JSObject on the heap. - Register obj_size = x10; - Register new_obj = x4; - Register next_obj = obj_size; // May overlap. - __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset)); - __ Allocate(obj_size, new_obj, next_obj, x11, &rt_call, SIZE_IN_WORDS); - - // Allocated the JSObject, now initialize the fields. Map is set to - // initial map and properties and elements are set to empty fixed array. - // NB. the object pointer is not tagged, so MemOperand is used. - Register write_address = x5; - Register empty = x7; - __ Mov(write_address, new_obj); - __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex); - STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset); - __ Str(init_map, MemOperand(write_address, kPointerSize, PostIndex)); - STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset); - STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset); - __ Stp(empty, empty, - MemOperand(write_address, 2 * kPointerSize, PostIndex)); - STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize); - - // Add the object tag to make the JSObject real, so that we can continue - // and jump into the continuation code at any time from now on. - __ Add(new_obj, new_obj, kHeapObjectTag); - - // Fill all of the in-object properties with the appropriate filler. - Register filler = x7; - __ LoadRoot(filler, Heap::kUndefinedValueRootIndex); - - if (!is_api_function) { - Label no_inobject_slack_tracking; - - Register constructon_count = x14; - MemOperand bit_field3 = - FieldMemOperand(init_map, Map::kBitField3Offset); - // Check if slack tracking is enabled. - __ Ldr(x11, bit_field3); - __ DecodeField<Map::ConstructionCounter>(constructon_count, x11); - __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd)); - __ B(lt, &no_inobject_slack_tracking); - // Decrease generous allocation count. - __ Subs(x11, x11, Operand(1 << Map::ConstructionCounter::kShift)); - __ Str(x11, bit_field3); - - // Allocate object with a slack. - Register unused_props = x11; - __ Ldr(unused_props, - FieldMemOperand(init_map, Map::kInstanceAttributesOffset)); - __ Ubfx(unused_props, unused_props, - Map::kUnusedPropertyFieldsByte * kBitsPerByte, kBitsPerByte); - - Register end_of_pre_allocated = x11; - __ Sub(end_of_pre_allocated, next_obj, - Operand(unused_props, LSL, kPointerSizeLog2)); - unused_props = NoReg; - - if (FLAG_debug_code) { - __ Cmp(write_address, end_of_pre_allocated); - __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields); - } - - // Fill the pre-allocated fields with undef. - __ InitializeFieldsWithFiller(write_address, end_of_pre_allocated, - filler); - - // Fill the remaining fields with one pointer filler map. - __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex); - __ InitializeFieldsWithFiller(write_address, next_obj, filler); - - __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd)); - __ B(ne, &allocated); - - // Push the constructor, new_target and the object to the stack, - // and then the initial map as an argument to the runtime call. - __ Push(constructor, new_target, new_obj, init_map); - __ CallRuntime(Runtime::kFinalizeInstanceSize); - __ Pop(new_obj, new_target, constructor); - - // Continue with JSObject being successfully allocated. - __ B(&allocated); - - __ bind(&no_inobject_slack_tracking); - } - - __ InitializeFieldsWithFiller(write_address, next_obj, filler); - - // Continue with JSObject being successfully allocated. - __ B(&allocated); - } - - // Allocate the new receiver object using the runtime call. - // x1: constructor function - // x3: new target - __ Bind(&rt_call); - - // Push the constructor and new_target twice, second pair as arguments - // to the runtime call. - __ Push(constructor, new_target, constructor, new_target); - __ CallRuntime(Runtime::kNewObject); + // Allocate the new receiver object. + __ Push(constructor, new_target); + FastNewObjectStub stub(masm->isolate()); + __ CallStub(&stub); __ Mov(x4, x0); __ Pop(new_target, constructor); - // Receiver for constructor call allocated. - // x1: constructor function - // x3: new target - // x4: JSObject - __ Bind(&allocated); + // ----------- S t a t e ------------- + // -- x1: constructor function + // -- x3: new target + // -- x4: newly allocated object + // ----------------------------------- // Reload the number of arguments from the stack. // Set it up in x0 for the function call below. @@ -697,6 +660,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // Leave construct frame. } + // ES6 9.2.2. Step 13+ + // Check that the result is not a Smi, indicating that the constructor result + // from a derived class is neither undefined nor an Object. + if (check_derived_construct) { + Label dont_throw; + __ JumpIfNotSmi(x0, &dont_throw); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject); + } + __ Bind(&dont_throw); + } + __ DropBySMI(x1); __ Drop(1); if (create_implicit_receiver) { @@ -707,17 +683,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, true); + Generate_JSConstructStubHelper(masm, false, true, false); } void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, true, true); + Generate_JSConstructStubHelper(masm, true, false, false); } void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false, false); + Generate_JSConstructStubHelper(masm, false, false, false); +} + + +void Builtins::Generate_JSBuiltinsConstructStubForDerived( + MacroAssembler* masm) { + Generate_JSConstructStubHelper(masm, false, false, true); } @@ -877,10 +859,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { // - jssp: stack pointer. // - lr: return address. // -// The function builds a JS frame. Please see JavaScriptFrameConstants in -// frames-arm64.h for its layout. -// TODO(rmcilroy): We will need to include the current bytecode pointer in the -// frame. +// The function builds an interpreter frame. See InterpreterFrameConstants in +// frames.h for its layout. void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up @@ -888,17 +868,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { FrameScope frame_scope(masm, StackFrame::MANUAL); __ Push(lr, fp, cp, x1); __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); - __ Push(x3); - - // Push zero for bytecode array offset. - __ Mov(x0, Operand(0)); - __ Push(x0); // Get the bytecode array from the function object and load the pointer to the // first entry into kInterpreterBytecodeRegister. __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); + Register debug_info = kInterpreterBytecodeArrayRegister; + Label load_debug_bytecode_array, bytecode_array_loaded; + DCHECK(!debug_info.is(x0)); + __ Ldr(debug_info, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset)); + __ Cmp(debug_info, Operand(DebugInfo::uninitialized())); + __ B(ne, &load_debug_bytecode_array); __ Ldr(kInterpreterBytecodeArrayRegister, FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset)); + __ Bind(&bytecode_array_loaded); if (FLAG_debug_code) { // Check function data field is actually a BytecodeArray object. @@ -909,6 +891,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } + // Push new.target, bytecode array and zero for bytecode array offset. + __ Mov(x0, Operand(0)); + __ Push(x3, kInterpreterBytecodeArrayRegister, x0); + // Allocate the local and temporary register file on the stack. { // Load frame size from the BytecodeArray object. @@ -938,22 +924,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // TODO(rmcilroy): List of things not currently dealt with here but done in // fullcodegen's prologue: - // - Support profiler (specifically profiling_counter). // - Call ProfileEntryHookStub when isolate has a function_entry_hook. - // - Allow simulator stop operations if FLAG_stop_at is set. // - Code aging of the BytecodeArray object. - // Perform stack guard check. - { - Label ok; - __ CompareRoot(jssp, Heap::kStackLimitRootIndex); - __ B(hs, &ok); - __ Push(kInterpreterBytecodeArrayRegister); - __ CallRuntime(Runtime::kStackGuard); - __ Pop(kInterpreterBytecodeArrayRegister); - __ Bind(&ok); - } - // Load accumulator, register file, bytecode offset, dispatch table into // registers. __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex); @@ -961,10 +934,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp)); __ Mov(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); - __ LoadRoot(kInterpreterDispatchTableRegister, - Heap::kInterpreterTableRootIndex); - __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ Mov(kInterpreterDispatchTableRegister, + Operand(ExternalReference::interpreter_dispatch_table_address( + masm->isolate()))); // Dispatch to the first bytecode handler for the function. __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister, @@ -975,6 +947,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // and header removal. __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Call(ip0); + + // Even though the first bytecode handler was called, we will never return. + __ Abort(kUnexpectedReturnFromBytecodeHandler); + + // Load debug copy of the bytecode array. + __ Bind(&load_debug_bytecode_array); + __ Ldr(kInterpreterBytecodeArrayRegister, + FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex)); + __ B(&bytecode_array_loaded); } @@ -998,47 +979,24 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) { } -static void Generate_InterpreterNotifyDeoptimizedHelper( - MacroAssembler* masm, Deoptimizer::BailoutType type) { - // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(kInterpreterAccumulatorRegister); // Save accumulator register. - - // Pass the deoptimization type to the runtime system. - __ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type)))); - __ Push(x1); - __ CallRuntime(Runtime::kNotifyDeoptimized); - - __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register. - // Tear down internal frame. - } - - // Drop state (we don't use this for interpreter deopts). - __ Drop(1); - +static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) { // Initialize register file register and dispatch table register. __ Add(kInterpreterRegisterFileRegister, fp, Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp)); - __ LoadRoot(kInterpreterDispatchTableRegister, - Heap::kInterpreterTableRootIndex); - __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ Mov(kInterpreterDispatchTableRegister, + Operand(ExternalReference::interpreter_dispatch_table_address( + masm->isolate()))); // Get the context from the frame. - // TODO(rmcilroy): Update interpreter frame to expect current context at the - // context slot instead of the function context. __ Ldr(kContextRegister, MemOperand(kInterpreterRegisterFileRegister, InterpreterFrameConstants::kContextFromRegisterPointer)); // Get the bytecode array pointer from the frame. - __ Ldr(x1, - MemOperand(kInterpreterRegisterFileRegister, - InterpreterFrameConstants::kFunctionFromRegisterPointer)); - __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); - __ Ldr(kInterpreterBytecodeArrayRegister, - FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset)); + __ Ldr( + kInterpreterBytecodeArrayRegister, + MemOperand(kInterpreterRegisterFileRegister, + InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer)); if (FLAG_debug_code) { // Check function data field is actually a BytecodeArray object. @@ -1066,6 +1024,29 @@ static void Generate_InterpreterNotifyDeoptimizedHelper( } +static void Generate_InterpreterNotifyDeoptimizedHelper( + MacroAssembler* masm, Deoptimizer::BailoutType type) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Pass the deoptimization type to the runtime system. + __ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type)))); + __ Push(x1); + __ CallRuntime(Runtime::kNotifyDeoptimized); + // Tear down internal frame. + } + + // Drop state (we don't use these for interpreter deopts) and and pop the + // accumulator value into the accumulator register. + __ Drop(1); + __ Pop(kInterpreterAccumulatorRegister); + + // Enter the bytecode dispatch. + Generate_EnterBytecodeDispatch(masm); +} + + void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) { Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER); } @@ -1080,22 +1061,30 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) { Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); } +void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { + // Set the address of the interpreter entry trampoline as a return address. + // This simulates the initial call to bytecode handlers in interpreter entry + // trampoline. The return will never actually be taken, but our stack walker + // uses this address to determine whether a frame is interpreted. + __ LoadObject(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); + + Generate_EnterBytecodeDispatch(masm); +} + void Builtins::Generate_CompileLazy(MacroAssembler* masm) { - CallRuntimePassFunction(masm, Runtime::kCompileLazy); - GenerateTailCallToReturnedCode(masm); + GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); } void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { - CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); - GenerateTailCallToReturnedCode(masm); + GenerateTailCallToReturnedCode(masm, + Runtime::kCompileOptimized_NotConcurrent); } void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { - CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent); - GenerateTailCallToReturnedCode(masm); + GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent); } @@ -1321,14 +1310,11 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver, // Load the next prototype. __ Bind(&next_prototype); - __ Ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset)); - // End if the prototype is null or not hidden. - __ CompareRoot(receiver, Heap::kNullValueRootIndex); - __ B(eq, receiver_check_failed); - __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ Ldr(x16, FieldMemOperand(map, Map::kBitField3Offset)); - __ Tst(x16, Operand(Map::IsHiddenPrototype::kMask)); + __ Tst(x16, Operand(Map::HasHiddenPrototype::kMask)); __ B(eq, receiver_check_failed); + __ Ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset)); + __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Iterate. __ B(&prototype_loop_start); @@ -1868,10 +1854,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) { // Try to create the list from an arguments object. __ Bind(&create_arguments); - __ Ldrsw(len, UntagSmiFieldMemOperand( - arguments_list, - JSObject::kHeaderSize + - Heap::kArgumentsLengthIndex * kPointerSize)); + __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list, + JSArgumentsObject::kLengthOffset)); __ Ldr(x10, FieldMemOperand(arguments_list, JSObject::kElementsOffset)); __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset)); __ CompareAndBranch(len, x11, ne, &create_runtime); @@ -1953,10 +1937,136 @@ void Builtins::Generate_Apply(MacroAssembler* masm) { } } +namespace { + +// Drops top JavaScript frame and an arguments adaptor frame below it (if +// present) preserving all the arguments prepared for current call. +// Does nothing if debugger is currently active. +// ES6 14.6.3. PrepareForTailCall +// +// Stack structure for the function g() tail calling f(): +// +// ------- Caller frame: ------- +// | ... +// | g()'s arg M +// | ... +// | g()'s arg 1 +// | g()'s receiver arg +// | g()'s caller pc +// ------- g()'s frame: ------- +// | g()'s caller fp <- fp +// | g()'s context +// | function pointer: g +// | ------------------------- +// | ... +// | ... +// | f()'s arg N +// | ... +// | f()'s arg 1 +// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!) +// ---------------------- +// +void PrepareForTailCall(MacroAssembler* masm, Register args_reg, + Register scratch1, Register scratch2, + Register scratch3) { + DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3)); + Comment cmnt(masm, "[ PrepareForTailCall"); + + // Prepare for tail call only if the debugger is not active. + Label done; + ExternalReference debug_is_active = + ExternalReference::debug_is_active_address(masm->isolate()); + __ Mov(scratch1, Operand(debug_is_active)); + __ Ldrb(scratch1, MemOperand(scratch1)); + __ Cmp(scratch1, Operand(0)); + __ B(ne, &done); + + // Drop possible interpreter handler/stub frame. + { + Label no_interpreter_frame; + __ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset)); + __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB))); + __ B(ne, &no_interpreter_frame); + __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ bind(&no_interpreter_frame); + } + + // Check if next frame is an arguments adaptor frame. + Label no_arguments_adaptor, formal_parameter_count_loaded; + __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ Ldr(scratch3, + MemOperand(scratch2, StandardFrameConstants::kContextOffset)); + __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ B(ne, &no_arguments_adaptor); + + // Drop arguments adaptor frame and load arguments count. + __ mov(fp, scratch2); + __ Ldr(scratch1, + MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiUntag(scratch1); + __ B(&formal_parameter_count_loaded); + + __ bind(&no_arguments_adaptor); + // Load caller's formal parameter count + __ Ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ Ldr(scratch1, + FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset)); + __ Ldrsw(scratch1, + FieldMemOperand(scratch1, + SharedFunctionInfo::kFormalParameterCountOffset)); + __ bind(&formal_parameter_count_loaded); + + // Calculate the end of destination area where we will put the arguments + // after we drop current frame. We add kPointerSize to count the receiver + // argument which is not included into formal parameters count. + Register dst_reg = scratch2; + __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2)); + __ add(dst_reg, dst_reg, + Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize)); + + Register src_reg = scratch1; + __ add(src_reg, jssp, Operand(args_reg, LSL, kPointerSizeLog2)); + // Count receiver argument as well (not included in args_reg). + __ add(src_reg, src_reg, Operand(kPointerSize)); + + if (FLAG_debug_code) { + __ Cmp(src_reg, dst_reg); + __ Check(lo, kStackAccessBelowStackPointer); + } + + // Restore caller's frame pointer and return address now as they will be + // overwritten by the copying loop. + __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); + __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + + // Now copy callee arguments to the caller frame going backwards to avoid + // callee arguments corruption (source and destination areas could overlap). + + // Both src_reg and dst_reg are pointing to the word after the one to copy, + // so they must be pre-decremented in the loop. + Register tmp_reg = scratch3; + Label loop, entry; + __ B(&entry); + __ bind(&loop); + __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex)); + __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex)); + __ bind(&entry); + __ Cmp(jssp, src_reg); + __ B(ne, &loop); + + // Leave current frame. + __ Mov(jssp, dst_reg); + __ SetStackPointer(jssp); + __ AssertStackConsistency(); + + __ bind(&done); +} +} // namespace // static void Builtins::Generate_CallFunction(MacroAssembler* masm, - ConvertReceiverMode mode) { + ConvertReceiverMode mode, + TailCallMode tail_call_mode) { ASM_LOCATION("Builtins::Generate_CallFunction"); // ----------- S t a t e ------------- // -- x0 : the number of arguments (not including the receiver) @@ -2044,6 +2154,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // -- cp : the function context. // ----------------------------------- + if (tail_call_mode == TailCallMode::kAllow) { + PrepareForTailCall(masm, x0, x3, x4, x5); + } + __ Ldrsw( x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset)); ParameterCount actual(x0); @@ -2140,13 +2254,18 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // static -void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) { +void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm, + TailCallMode tail_call_mode) { // ----------- S t a t e ------------- // -- x0 : the number of arguments (not including the receiver) // -- x1 : the function to call (checked to be a JSBoundFunction) // ----------------------------------- __ AssertBoundFunction(x1); + if (tail_call_mode == TailCallMode::kAllow) { + PrepareForTailCall(masm, x0, x3, x4, x5); + } + // Patch the receiver to [[BoundThis]]. __ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset)); __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2)); @@ -2165,7 +2284,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) { // static -void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { +void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode, + TailCallMode tail_call_mode) { // ----------- S t a t e ------------- // -- x0 : the number of arguments (not including the receiver) // -- x1 : the target to call (can be any Object). @@ -2175,14 +2295,24 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ JumpIfSmi(x1, &non_callable); __ Bind(&non_smi); __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE); - __ Jump(masm->isolate()->builtins()->CallFunction(mode), + __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode), RelocInfo::CODE_TARGET, eq); __ Cmp(x5, JS_BOUND_FUNCTION_TYPE); - __ Jump(masm->isolate()->builtins()->CallBoundFunction(), + __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode), RelocInfo::CODE_TARGET, eq); + + // Check if target has a [[Call]] internal method. + __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset)); + __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable); + __ Cmp(x5, JS_PROXY_TYPE); __ B(ne, &non_function); + // 0. Prepare for tail call if necessary. + if (tail_call_mode == TailCallMode::kAllow) { + PrepareForTailCall(masm, x0, x3, x4, x5); + } + // 1. Runtime fallback for Proxy [[Call]]. __ Push(x1); // Increase the arguments size to include the pushed function and the @@ -2195,15 +2325,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). __ Bind(&non_function); - // Check if target has a [[Call]] internal method. - __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset)); - __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable); // Overwrite the original receiver with the (original) target. __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2)); // Let the "call_as_function_delegate" take care of the rest. __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1); __ Jump(masm->isolate()->builtins()->CallFunction( - ConvertReceiverMode::kNotNullOrUndefined), + ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode), RelocInfo::CODE_TARGET); // 3. Call to something that is not callable. @@ -2341,7 +2468,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // static -void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) { +void Builtins::Generate_InterpreterPushArgsAndCallImpl( + MacroAssembler* masm, TailCallMode tail_call_mode) { // ----------- S t a t e ------------- // -- x0 : the number of arguments (not including the receiver) // -- x2 : the address of the first argument to be pushed. Subsequent @@ -2369,7 +2497,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) { __ B(gt, &loop_header); // Call the target. - __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); + __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny, + tail_call_mode), + RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc index a1e920755d..57a0ffde92 100644 --- a/deps/v8/src/arm64/code-stubs-arm64.cc +++ b/deps/v8/src/arm64/code-stubs-arm64.cc @@ -207,8 +207,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left, Register right, Register scratch, FPRegister double_scratch, - Label* slow, Condition cond, - Strength strength) { + Label* slow, Condition cond) { DCHECK(!AreAliased(left, right, scratch)); Label not_identical, return_equal, heap_number; Register result = x0; @@ -231,14 +230,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left, // Call runtime on identical SIMD values since we must throw a TypeError. __ Cmp(right_type, SIMD128_VALUE_TYPE); __ B(eq, slow); - if (is_strong(strength)) { - // Call the runtime on anything that is converted in the semantics, since - // we need to throw a TypeError. Smis have already been ruled out. - __ Cmp(right_type, Operand(HEAP_NUMBER_TYPE)); - __ B(eq, &return_equal); - __ Tst(right_type, Operand(kIsNotStringMask)); - __ B(ne, slow); - } } else if (cond == eq) { __ JumpIfHeapNumber(right, &heap_number); } else { @@ -253,13 +244,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left, // Call runtime on identical SIMD values since we must throw a TypeError. __ Cmp(right_type, SIMD128_VALUE_TYPE); __ B(eq, slow); - if (is_strong(strength)) { - // Call the runtime on anything that is converted in the semantics, - // since we need to throw a TypeError. Smis and heap numbers have - // already been ruled out. - __ Tst(right_type, Operand(kIsNotStringMask)); - __ B(ne, slow); - } // Normally here we fall through to return_equal, but undefined is // special: (undefined == undefined) == true, but // (undefined <= undefined) == false! See ECMAScript 11.8.5. @@ -443,54 +427,49 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Fast negative check for internalized-to-internalized equality. // See call site for description. -static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, - Register left, - Register right, - Register left_map, - Register right_map, - Register left_type, - Register right_type, - Label* possible_strings, - Label* not_both_strings) { +static void EmitCheckForInternalizedStringsOrObjects( + MacroAssembler* masm, Register left, Register right, Register left_map, + Register right_map, Register left_type, Register right_type, + Label* possible_strings, Label* runtime_call) { DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type)); Register result = x0; + DCHECK(left.is(x0) || right.is(x0)); - Label object_test; + Label object_test, return_unequal, undetectable; STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); // TODO(all): reexamine this branch sequence for optimisation wrt branch // prediction. __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test); __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings); - __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings); + __ Tbnz(left_type, MaskToBit(kIsNotStringMask), runtime_call); __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings); - // Both are internalized. We already checked that they weren't the same - // pointer, so they are not equal. - __ Mov(result, NOT_EQUAL); + // Both are internalized. We already checked they weren't the same pointer so + // they are not equal. Return non-equal by returning the non-zero object + // pointer in x0. __ Ret(); __ Bind(&object_test); - __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE); - - // If right >= FIRST_JS_RECEIVER_TYPE, test left. - // Otherwise, right < FIRST_JS_RECEIVER_TYPE, so set lt condition. - __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NFlag, ge); - - __ B(lt, not_both_strings); - - // If both objects are undetectable, they are equal. Otherwise, they are not - // equal, since they are different objects and an object is not equal to - // undefined. - - // Returning here, so we can corrupt right_type and left_type. - Register right_bitfield = right_type; Register left_bitfield = left_type; + Register right_bitfield = right_type; __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset)); __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset)); - __ And(result, right_bitfield, left_bitfield); - __ And(result, result, 1 << Map::kIsUndetectable); - __ Eor(result, result, 1 << Map::kIsUndetectable); + __ Tbnz(right_bitfield, MaskToBit(1 << Map::kIsUndetectable), &undetectable); + __ Tbnz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal); + + __ CompareInstanceType(right_map, right_type, FIRST_JS_RECEIVER_TYPE); + __ B(lt, runtime_call); + __ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE); + __ B(lt, runtime_call); + + __ bind(&return_unequal); + // Return non-equal by returning the non-zero object pointer in x0. + __ Ret(); + + __ bind(&undetectable); + __ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal); + __ Mov(result, EQUAL); __ Ret(); } @@ -536,8 +515,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { // Handle the case where the objects are identical. Either returns the answer // or goes to slow. Only falls through if the objects were not identical. - EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond, - strength()); + EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond); // If either is a smi (we know that at least one is not a smi), then they can // only be strictly equal if the other is a HeapNumber. @@ -667,8 +645,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. - __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong - : Runtime::kCompare); + __ TailCallRuntime(Runtime::kCompare); } __ Bind(&miss); @@ -971,8 +948,6 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1, result_double); DCHECK(result_tagged.is(x0)); - __ IncrementCounter( - isolate()->counters()->math_pow(), 1, scratch0, scratch1); __ Ret(); } else { AllowExternalCallThatCantCauseGC scope(masm); @@ -984,8 +959,6 @@ void MathPowStub::Generate(MacroAssembler* masm) { 0, 2); __ Mov(lr, saved_lr); __ Bind(&done); - __ IncrementCounter( - isolate()->counters()->math_pow(), 1, scratch0, scratch1); __ Ret(); } } @@ -1104,10 +1077,13 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ Sub(temp_argv, temp_argv, 1 * kPointerSize); } - // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved - // registers. + // Reserve three slots to preserve x21-x23 callee-saved registers. If the + // result size is too large to be returned in registers then also reserve + // space for the return value. + int extra_stack_space = 3 + (result_size() <= 2 ? 0 : result_size()); + // Enter the exit frame. FrameScope scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(save_doubles(), x10, 3); + __ EnterExitFrame(save_doubles(), x10, extra_stack_space); DCHECK(csp.Is(__ StackPointer())); // Poke callee-saved registers into reserved space. @@ -1115,6 +1091,11 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ Poke(argc, 2 * kPointerSize); __ Poke(target, 3 * kPointerSize); + if (result_size() > 2) { + // Save the location of the return value into x8 for call. + __ Add(x8, __ StackPointer(), Operand(4 * kPointerSize)); + } + // We normally only keep tagged values in callee-saved registers, as they // could be pushed onto the stack by called stubs and functions, and on the // stack they can confuse the GC. However, we're only calling C functions @@ -1184,7 +1165,18 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ Blr(target); __ Bind(&return_location); - // x0 result The return code from the call. + if (result_size() > 2) { + DCHECK_EQ(3, result_size()); + // Read result values stored on stack. + __ Ldr(x0, MemOperand(__ StackPointer(), 4 * kPointerSize)); + __ Ldr(x1, MemOperand(__ StackPointer(), 5 * kPointerSize)); + __ Ldr(x2, MemOperand(__ StackPointer(), 6 * kPointerSize)); + } + // Result returned in x0, x1:x0 or x2:x1:x0 - do not destroy these registers! + + // x0 result0 The return code from the call. + // x1 result1 For calls which return ObjectPair or ObjectTriple. + // x2 result2 For calls which return ObjectTriple. // x21 argv // x22 argc // x23 target @@ -1616,363 +1608,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) { } -void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { - Register arg_count = ArgumentsAccessReadDescriptor::parameter_count(); - Register key = ArgumentsAccessReadDescriptor::index(); - DCHECK(arg_count.is(x0)); - DCHECK(key.is(x1)); - - // The displacement is the offset of the last parameter (if any) relative - // to the frame pointer. - static const int kDisplacement = - StandardFrameConstants::kCallerSPOffset - kPointerSize; - - // Check that the key is a smi. - Label slow; - __ JumpIfNotSmi(key, &slow); - - // Check if the calling frame is an arguments adaptor frame. - Register local_fp = x11; - Register caller_fp = x11; - Register caller_ctx = x12; - Label skip_adaptor; - __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(caller_ctx, MemOperand(caller_fp, - StandardFrameConstants::kContextOffset)); - __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ Csel(local_fp, fp, caller_fp, ne); - __ B(ne, &skip_adaptor); - - // Load the actual arguments limit found in the arguments adaptor frame. - __ Ldr(arg_count, MemOperand(caller_fp, - ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ Bind(&skip_adaptor); - - // Check index against formal parameters count limit. Use unsigned comparison - // to get negative check for free: branch if key < 0 or key >= arg_count. - __ Cmp(key, arg_count); - __ B(hs, &slow); - - // Read the argument from the stack and return it. - __ Sub(x10, arg_count, key); - __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2)); - __ Ldr(x0, MemOperand(x10, kDisplacement)); - __ Ret(); - - // Slow case: handle non-smi or out-of-bounds access to arguments by calling - // the runtime system. - __ Bind(&slow); - __ Push(key); - __ TailCallRuntime(Runtime::kArguments); -} - - -void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { - // x1 : function - // x2 : number of parameters (tagged) - // x3 : parameters pointer - - DCHECK(x1.is(ArgumentsAccessNewDescriptor::function())); - DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count())); - DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer())); - - // Check if the calling frame is an arguments adaptor frame. - Label runtime; - Register caller_fp = x10; - __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - // Load and untag the context. - __ Ldr(w11, UntagSmiMemOperand(caller_fp, - StandardFrameConstants::kContextOffset)); - __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR); - __ B(ne, &runtime); - - // Patch the arguments.length and parameters pointer in the current frame. - __ Ldr(x2, - MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ Add(x3, caller_fp, Operand::UntagSmiAndScale(x2, kPointerSizeLog2)); - __ Add(x3, x3, StandardFrameConstants::kCallerSPOffset); - - __ Bind(&runtime); - __ Push(x1, x3, x2); - __ TailCallRuntime(Runtime::kNewSloppyArguments); -} - - -void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { - // x1 : function - // x2 : number of parameters (tagged) - // x3 : parameters pointer - // - // Returns pointer to result object in x0. - - DCHECK(x1.is(ArgumentsAccessNewDescriptor::function())); - DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count())); - DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer())); - - // Make an untagged copy of the parameter count. - // Note: arg_count_smi is an alias of param_count_smi. - Register function = x1; - Register arg_count_smi = x2; - Register param_count_smi = x2; - Register recv_arg = x3; - Register param_count = x7; - __ SmiUntag(param_count, param_count_smi); - - // Check if the calling frame is an arguments adaptor frame. - Register caller_fp = x11; - Register caller_ctx = x12; - Label runtime; - Label adaptor_frame, try_allocate; - __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(caller_ctx, MemOperand(caller_fp, - StandardFrameConstants::kContextOffset)); - __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ B(eq, &adaptor_frame); - - // No adaptor, parameter count = argument count. - - // x1 function function pointer - // x2 arg_count_smi number of function arguments (smi) - // x3 recv_arg pointer to receiver arguments - // x4 mapped_params number of mapped params, min(params, args) (uninit) - // x7 param_count number of function parameters - // x11 caller_fp caller's frame pointer - // x14 arg_count number of function arguments (uninit) - - Register arg_count = x14; - Register mapped_params = x4; - __ Mov(arg_count, param_count); - __ Mov(mapped_params, param_count); - __ B(&try_allocate); - - // We have an adaptor frame. Patch the parameters pointer. - __ Bind(&adaptor_frame); - __ Ldr(arg_count_smi, - MemOperand(caller_fp, - ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(arg_count, arg_count_smi); - __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2)); - __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset); - - // Compute the mapped parameter count = min(param_count, arg_count) - __ Cmp(param_count, arg_count); - __ Csel(mapped_params, param_count, arg_count, lt); - - __ Bind(&try_allocate); - - // x0 alloc_obj pointer to allocated objects: param map, backing - // store, arguments (uninit) - // x1 function function pointer - // x2 arg_count_smi number of function arguments (smi) - // x3 recv_arg pointer to receiver arguments - // x4 mapped_params number of mapped parameters, min(params, args) - // x7 param_count number of function parameters - // x10 size size of objects to allocate (uninit) - // x14 arg_count number of function arguments - - // Compute the size of backing store, parameter map, and arguments object. - // 1. Parameter map, has two extra words containing context and backing - // store. - const int kParameterMapHeaderSize = - FixedArray::kHeaderSize + 2 * kPointerSize; - - // Calculate the parameter map size, assuming it exists. - Register size = x10; - __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2)); - __ Add(size, size, kParameterMapHeaderSize); - - // If there are no mapped parameters, set the running size total to zero. - // Otherwise, use the parameter map size calculated earlier. - __ Cmp(mapped_params, 0); - __ CzeroX(size, eq); - - // 2. Add the size of the backing store and arguments object. - __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2)); - __ Add(size, size, - FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize); - - // Do the allocation of all three objects in one go. Assign this to x0, as it - // will be returned to the caller. - Register alloc_obj = x0; - __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT); - - // Get the arguments boilerplate from the current (global) context. - - // x0 alloc_obj pointer to allocated objects (param map, backing - // store, arguments) - // x1 function function pointer - // x2 arg_count_smi number of function arguments (smi) - // x3 recv_arg pointer to receiver arguments - // x4 mapped_params number of mapped parameters, min(params, args) - // x7 param_count number of function parameters - // x11 sloppy_args_map offset to args (or aliased args) map (uninit) - // x14 arg_count number of function arguments - - Register global_ctx = x10; - Register sloppy_args_map = x11; - Register aliased_args_map = x10; - __ Ldr(global_ctx, NativeContextMemOperand()); - - __ Ldr(sloppy_args_map, - ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX)); - __ Ldr( - aliased_args_map, - ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)); - __ Cmp(mapped_params, 0); - __ CmovX(sloppy_args_map, aliased_args_map, ne); - - // Copy the JS object part. - __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset)); - __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex); - __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset)); - __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); - - // Set up the callee in-object property. - STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); - const int kCalleeOffset = JSObject::kHeaderSize + - Heap::kArgumentsCalleeIndex * kPointerSize; - __ AssertNotSmi(function); - __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset)); - - // Use the length and set that as an in-object property. - STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); - const int kLengthOffset = JSObject::kHeaderSize + - Heap::kArgumentsLengthIndex * kPointerSize; - __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset)); - - // Set up the elements pointer in the allocated arguments object. - // If we allocated a parameter map, "elements" will point there, otherwise - // it will point to the backing store. - - // x0 alloc_obj pointer to allocated objects (param map, backing - // store, arguments) - // x1 function function pointer - // x2 arg_count_smi number of function arguments (smi) - // x3 recv_arg pointer to receiver arguments - // x4 mapped_params number of mapped parameters, min(params, args) - // x5 elements pointer to parameter map or backing store (uninit) - // x6 backing_store pointer to backing store (uninit) - // x7 param_count number of function parameters - // x14 arg_count number of function arguments - - Register elements = x5; - __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize); - __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); - - // Initialize parameter map. If there are no mapped arguments, we're done. - Label skip_parameter_map; - __ Cmp(mapped_params, 0); - // Set up backing store address, because it is needed later for filling in - // the unmapped arguments. - Register backing_store = x6; - __ CmovX(backing_store, elements, eq); - __ B(eq, &skip_parameter_map); - - __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex); - __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); - __ Add(x10, mapped_params, 2); - __ SmiTag(x10); - __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset)); - __ Str(cp, FieldMemOperand(elements, - FixedArray::kHeaderSize + 0 * kPointerSize)); - __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2)); - __ Add(x10, x10, kParameterMapHeaderSize); - __ Str(x10, FieldMemOperand(elements, - FixedArray::kHeaderSize + 1 * kPointerSize)); - - // Copy the parameter slots and the holes in the arguments. - // We need to fill in mapped_parameter_count slots. Then index the context, - // where parameters are stored in reverse order, at: - // - // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1 - // - // The mapped parameter thus needs to get indices: - // - // MIN_CONTEXT_SLOTS + parameter_count - 1 .. - // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count - // - // We loop from right to left. - - // x0 alloc_obj pointer to allocated objects (param map, backing - // store, arguments) - // x1 function function pointer - // x2 arg_count_smi number of function arguments (smi) - // x3 recv_arg pointer to receiver arguments - // x4 mapped_params number of mapped parameters, min(params, args) - // x5 elements pointer to parameter map or backing store (uninit) - // x6 backing_store pointer to backing store (uninit) - // x7 param_count number of function parameters - // x11 loop_count parameter loop counter (uninit) - // x12 index parameter index (smi, uninit) - // x13 the_hole hole value (uninit) - // x14 arg_count number of function arguments - - Register loop_count = x11; - Register index = x12; - Register the_hole = x13; - Label parameters_loop, parameters_test; - __ Mov(loop_count, mapped_params); - __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS)); - __ Sub(index, index, mapped_params); - __ SmiTag(index); - __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); - __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2)); - __ Add(backing_store, backing_store, kParameterMapHeaderSize); - - __ B(¶meters_test); - - __ Bind(¶meters_loop); - __ Sub(loop_count, loop_count, 1); - __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2)); - __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag); - __ Str(index, MemOperand(elements, x10)); - __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize); - __ Str(the_hole, MemOperand(backing_store, x10)); - __ Add(index, index, Smi::FromInt(1)); - __ Bind(¶meters_test); - __ Cbnz(loop_count, ¶meters_loop); - - __ Bind(&skip_parameter_map); - // Copy arguments header and remaining slots (if there are any.) - __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex); - __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset)); - __ Str(arg_count_smi, FieldMemOperand(backing_store, - FixedArray::kLengthOffset)); - - // x0 alloc_obj pointer to allocated objects (param map, backing - // store, arguments) - // x1 function function pointer - // x2 arg_count_smi number of function arguments (smi) - // x3 recv_arg pointer to receiver arguments - // x4 mapped_params number of mapped parameters, min(params, args) - // x6 backing_store pointer to backing store (uninit) - // x14 arg_count number of function arguments - - Label arguments_loop, arguments_test; - __ Mov(x10, mapped_params); - __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2)); - __ B(&arguments_test); - - __ Bind(&arguments_loop); - __ Sub(recv_arg, recv_arg, kPointerSize); - __ Ldr(x11, MemOperand(recv_arg)); - __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2)); - __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize)); - __ Add(x10, x10, 1); - - __ Bind(&arguments_test); - __ Cmp(x10, arg_count); - __ B(lt, &arguments_loop); - - __ Ret(); - - // Do the runtime call to allocate the arguments object. - __ Bind(&runtime); - __ Push(function, recv_arg, arg_count_smi); - __ TailCallRuntime(Runtime::kNewSloppyArguments); -} - - void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) { // Return address is in lr. Label slow; @@ -1993,182 +1628,6 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) { } -void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { - // x1 : function - // x2 : number of parameters (tagged) - // x3 : parameters pointer - // - // Returns pointer to result object in x0. - - DCHECK(x1.is(ArgumentsAccessNewDescriptor::function())); - DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count())); - DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer())); - - // Make an untagged copy of the parameter count. - Register function = x1; - Register param_count_smi = x2; - Register params = x3; - Register param_count = x13; - __ SmiUntag(param_count, param_count_smi); - - // Test if arguments adaptor needed. - Register caller_fp = x11; - Register caller_ctx = x12; - Label try_allocate, runtime; - __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(caller_ctx, MemOperand(caller_fp, - StandardFrameConstants::kContextOffset)); - __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ B(ne, &try_allocate); - - // x1 function function pointer - // x2 param_count_smi number of parameters passed to function (smi) - // x3 params pointer to parameters - // x11 caller_fp caller's frame pointer - // x13 param_count number of parameters passed to function - - // Patch the argument length and parameters pointer. - __ Ldr(param_count_smi, - MemOperand(caller_fp, - ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(param_count, param_count_smi); - __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2)); - __ Add(params, x10, StandardFrameConstants::kCallerSPOffset); - - // Try the new space allocation. Start out with computing the size of the - // arguments object and the elements array in words. - Register size = x10; - __ Bind(&try_allocate); - __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize); - __ Cmp(param_count, 0); - __ CzeroX(size, eq); - __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize); - - // Do the allocation of both objects in one go. Assign this to x0, as it will - // be returned to the caller. - Register alloc_obj = x0; - __ Allocate(size, alloc_obj, x11, x12, &runtime, - static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); - - // Get the arguments boilerplate from the current (native) context. - Register strict_args_map = x4; - __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, - strict_args_map); - - // x0 alloc_obj pointer to allocated objects: parameter array and - // arguments object - // x1 function function pointer - // x2 param_count_smi number of parameters passed to function (smi) - // x3 params pointer to parameters - // x4 strict_args_map offset to arguments map - // x13 param_count number of parameters passed to function - __ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset)); - __ LoadRoot(x5, Heap::kEmptyFixedArrayRootIndex); - __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset)); - __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); - - // Set the smi-tagged length as an in-object property. - STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); - const int kLengthOffset = JSObject::kHeaderSize + - Heap::kArgumentsLengthIndex * kPointerSize; - __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset)); - - // If there are no actual arguments, we're done. - Label done; - __ Cbz(param_count, &done); - - // Set up the elements pointer in the allocated arguments object and - // initialize the header in the elements fixed array. - Register elements = x5; - __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize); - __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); - __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex); - __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); - __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset)); - - // x0 alloc_obj pointer to allocated objects: parameter array and - // arguments object - // x1 function function pointer - // x2 param_count_smi number of parameters passed to function (smi) - // x3 params pointer to parameters - // x4 array pointer to array slot (uninit) - // x5 elements pointer to elements array of alloc_obj - // x13 param_count number of parameters passed to function - - // Copy the fixed array slots. - Label loop; - Register array = x4; - // Set up pointer to first array slot. - __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag); - - __ Bind(&loop); - // Pre-decrement the parameters pointer by kPointerSize on each iteration. - // Pre-decrement in order to skip receiver. - __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex)); - // Post-increment elements by kPointerSize on each iteration. - __ Str(x10, MemOperand(array, kPointerSize, PostIndex)); - __ Sub(param_count, param_count, 1); - __ Cbnz(param_count, &loop); - - // Return from stub. - __ Bind(&done); - __ Ret(); - - // Do the runtime call to allocate the arguments object. - __ Bind(&runtime); - __ Push(function, params, param_count_smi); - __ TailCallRuntime(Runtime::kNewStrictArguments); -} - - -void RestParamAccessStub::GenerateNew(MacroAssembler* masm) { - // x2 : number of parameters (tagged) - // x3 : parameters pointer - // x4 : rest parameter index (tagged) - // - // Returns pointer to result object in x0. - - DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count())); - DCHECK(x3.is(RestParamAccessDescriptor::parameter_pointer())); - DCHECK(x4.is(RestParamAccessDescriptor::rest_parameter_index())); - - // Get the stub arguments from the frame, and make an untagged copy of the - // parameter count. - Register rest_index_smi = x4; - Register param_count_smi = x2; - Register params = x3; - Register param_count = x13; - __ SmiUntag(param_count, param_count_smi); - - // Test if arguments adaptor needed. - Register caller_fp = x11; - Register caller_ctx = x12; - Label runtime; - __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(caller_ctx, - MemOperand(caller_fp, StandardFrameConstants::kContextOffset)); - __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ B(ne, &runtime); - - // x4 rest_index_smi index of rest parameter - // x2 param_count_smi number of parameters passed to function (smi) - // x3 params pointer to parameters - // x11 caller_fp caller's frame pointer - // x13 param_count number of parameters passed to function - - // Patch the argument length and parameters pointer. - __ Ldr(param_count_smi, - MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiUntag(param_count, param_count_smi); - __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2)); - __ Add(params, x10, StandardFrameConstants::kCallerSPOffset); - - __ Bind(&runtime); - __ Push(param_count_smi, params, rest_index_smi); - __ TailCallRuntime(Runtime::kNewRestParam); -} - - void RegExpExecStub::Generate(MacroAssembler* masm) { #ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec); @@ -2917,7 +2376,8 @@ void CallICStub::Generate(MacroAssembler* masm) { __ Bind(&call_function); __ Mov(x0, argc); - __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()), + __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(), + tail_call_mode()), RelocInfo::CODE_TARGET); __ bind(&extra_checks_or_miss); @@ -2951,7 +2411,7 @@ void CallICStub::Generate(MacroAssembler* masm) { __ Bind(&call); __ Mov(x0, argc); - __ Jump(masm->isolate()->builtins()->Call(convert_mode()), + __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()), RelocInfo::CODE_TARGET); __ bind(&uninitialized); @@ -3151,18 +2611,14 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) { __ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); __ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); - if (op() != Token::EQ_STRICT && is_strong(strength())) { - __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion); - } else { - if (!Token::IsEqualityOp(op())) { - __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset)); - __ AssertSmi(x1); - __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset)); - __ AssertSmi(x0); - } - __ Sub(x0, x1, x0); - __ Ret(); + if (!Token::IsEqualityOp(op())) { + __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset)); + __ AssertSmi(x1); + __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset)); + __ AssertSmi(x0); } + __ Sub(x0, x1, x0); + __ Ret(); __ Bind(&miss); GenerateMiss(masm); @@ -3236,7 +2692,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) { __ Ret(); __ Bind(&unordered); - CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC, + CompareICStub stub(isolate(), op(), CompareICState::GENERIC, CompareICState::GENERIC, CompareICState::GENERIC); __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); @@ -3467,8 +2923,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) { if (Token::IsEqualityOp(op())) { __ Sub(result, rhs, lhs); __ Ret(); - } else if (is_strong(strength())) { - __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion); } else { Register ncr = x2; if (op() == Token::LT || op() == Token::LTE) { @@ -3859,6 +3313,39 @@ void ToStringStub::Generate(MacroAssembler* masm) { } +void ToNameStub::Generate(MacroAssembler* masm) { + // The ToName stub takes one argument in x0. + Label is_number; + __ JumpIfSmi(x0, &is_number); + + Label not_name; + STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); + __ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, ¬_name, hi); + // x0: receiver + // x1: receiver instance type + __ Ret(); + __ Bind(¬_name); + + Label not_heap_number; + __ Cmp(x1, HEAP_NUMBER_TYPE); + __ B(ne, ¬_heap_number); + __ Bind(&is_number); + NumberToStringStub stub(isolate()); + __ TailCallStub(&stub); + __ Bind(¬_heap_number); + + Label not_oddball; + __ Cmp(x1, ODDBALL_TYPE); + __ B(ne, ¬_oddball); + __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset)); + __ Ret(); + __ Bind(¬_oddball); + + __ Push(x0); // Push argument. + __ TailCallRuntime(Runtime::kToName); +} + + void StringHelper::GenerateFlatOneByteStringEquals( MacroAssembler* masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3) { @@ -4042,8 +3529,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { __ Ldr(val, MemOperand(regs_.address())); __ JumpIfNotInNewSpace(val, &dont_need_remembered_set); - __ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE, - &dont_need_remembered_set); + __ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set); // First notify the incremental marker if necessary, then update the // remembered set. @@ -5343,6 +4829,672 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { } +void FastNewObjectStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- x1 : target + // -- x3 : new target + // -- cp : context + // -- lr : return address + // ----------------------------------- + __ AssertFunction(x1); + __ AssertReceiver(x3); + + // Verify that the new target is a JSFunction. + Label new_object; + __ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object); + + // Load the initial map and verify that it's in fact a map. + __ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset)); + __ JumpIfSmi(x2, &new_object); + __ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object); + + // Fall back to runtime if the target differs from the new target's + // initial map constructor. + __ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset)); + __ CompareAndBranch(x0, x1, ne, &new_object); + + // Allocate the JSObject on the heap. + Label allocate, done_allocate; + __ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset)); + __ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS); + __ Bind(&done_allocate); + + // Initialize the JSObject fields. + __ Mov(x1, x0); + STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize); + __ Str(x2, MemOperand(x1, kPointerSize, PostIndex)); + __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex); + STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize); + STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize); + __ Stp(x3, x3, MemOperand(x1, 2 * kPointerSize, PostIndex)); + STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); + + // ----------- S t a t e ------------- + // -- x0 : result (untagged) + // -- x1 : result fields (untagged) + // -- x5 : result end (untagged) + // -- x2 : initial map + // -- cp : context + // -- lr : return address + // ----------------------------------- + + // Perform in-object slack tracking if requested. + Label slack_tracking; + STATIC_ASSERT(Map::kNoSlackTracking == 0); + __ LoadRoot(x6, Heap::kUndefinedValueRootIndex); + __ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset)); + __ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask, + &slack_tracking); + { + // Initialize all in-object fields with undefined. + __ InitializeFieldsWithFiller(x1, x5, x6); + + // Add the object tag to make the JSObject real. + STATIC_ASSERT(kHeapObjectTag == 1); + __ Add(x0, x0, kHeapObjectTag); + __ Ret(); + } + __ Bind(&slack_tracking); + { + // Decrease generous allocation count. + STATIC_ASSERT(Map::ConstructionCounter::kNext == 32); + __ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift); + __ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset)); + + // Initialize the in-object fields with undefined. + __ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset)); + __ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2)); + __ InitializeFieldsWithFiller(x1, x4, x6); + + // Initialize the remaining (reserved) fields with one pointer filler map. + __ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex); + __ InitializeFieldsWithFiller(x1, x5, x6); + + // Add the object tag to make the JSObject real. + STATIC_ASSERT(kHeapObjectTag == 1); + __ Add(x0, x0, kHeapObjectTag); + + // Check if we can finalize the instance size. + Label finalize; + STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1); + __ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize); + __ Ret(); + + // Finalize the instance size. + __ Bind(&finalize); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(x0, x2); + __ CallRuntime(Runtime::kFinalizeInstanceSize); + __ Pop(x0); + } + __ Ret(); + } + + // Fall back to %AllocateInNewSpace. + __ Bind(&allocate); + { + FrameScope scope(masm, StackFrame::INTERNAL); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + __ Mov(x4, + Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize)); + __ Push(x2, x4); + __ CallRuntime(Runtime::kAllocateInNewSpace); + __ Pop(x2); + } + STATIC_ASSERT(kHeapObjectTag == 1); + __ Sub(x0, x0, kHeapObjectTag); + __ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset)); + __ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2)); + __ B(&done_allocate); + + // Fall back to %NewObject. + __ Bind(&new_object); + __ Push(x1, x3); + __ TailCallRuntime(Runtime::kNewObject); +} + + +void FastNewRestParameterStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- x1 : function + // -- cp : context + // -- fp : frame pointer + // -- lr : return address + // ----------------------------------- + __ AssertFunction(x1); + + // For Ignition we need to skip all possible handler/stub frames until + // we reach the JavaScript frame for the function (similar to what the + // runtime fallback implementation does). So make x2 point to that + // JavaScript frame. + { + Label loop, loop_entry; + __ Mov(x2, fp); + __ B(&loop_entry); + __ Bind(&loop); + __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset)); + __ Bind(&loop_entry); + __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset)); + __ Cmp(x3, x1); + __ B(ne, &loop); + } + + // Check if we have rest parameters (only possible if we have an + // arguments adaptor frame below the function frame). + Label no_rest_parameters; + __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset)); + __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kContextOffset)); + __ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ B(ne, &no_rest_parameters); + + // Check if the arguments adaptor frame contains more arguments than + // specified by the function's internal formal parameter count. + Label rest_parameters; + __ Ldrsw(x0, UntagSmiMemOperand( + x2, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); + __ Ldrsw( + x1, FieldMemOperand(x1, SharedFunctionInfo::kFormalParameterCountOffset)); + __ Subs(x0, x0, x1); + __ B(gt, &rest_parameters); + + // Return an empty rest parameter array. + __ Bind(&no_rest_parameters); + { + // ----------- S t a t e ------------- + // -- cp : context + // -- lr : return address + // ----------------------------------- + + // Allocate an empty rest parameter array. + Label allocate, done_allocate; + __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, TAG_OBJECT); + __ Bind(&done_allocate); + + // Setup the rest parameter array in x0. + __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1); + __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset)); + __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex); + __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset)); + __ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset)); + __ Mov(x1, Smi::FromInt(0)); + __ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset)); + STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize); + __ Ret(); + + // Fall back to %AllocateInNewSpace. + __ Bind(&allocate); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(Smi::FromInt(JSArray::kSize)); + __ CallRuntime(Runtime::kAllocateInNewSpace); + } + __ B(&done_allocate); + } + + __ Bind(&rest_parameters); + { + // Compute the pointer to the first rest parameter (skippping the receiver). + __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2)); + __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize); + + // ----------- S t a t e ------------- + // -- cp : context + // -- x0 : number of rest parameters + // -- x2 : pointer to first rest parameters + // -- lr : return address + // ----------------------------------- + + // Allocate space for the rest parameter array plus the backing store. + Label allocate, done_allocate; + __ Mov(x1, JSArray::kSize + FixedArray::kHeaderSize); + __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2)); + __ Allocate(x1, x3, x4, x5, &allocate, TAG_OBJECT); + __ Bind(&done_allocate); + + // Compute arguments.length in x6. + __ SmiTag(x6, x0); + + // Setup the elements array in x3. + __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex); + __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset)); + __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset)); + __ Add(x4, x3, FixedArray::kHeaderSize); + { + Label loop, done_loop; + __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2)); + __ Bind(&loop); + __ Cmp(x4, x0); + __ B(eq, &done_loop); + __ Ldr(x5, MemOperand(x2, 0 * kPointerSize)); + __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize)); + __ Sub(x2, x2, Operand(1 * kPointerSize)); + __ Add(x4, x4, Operand(1 * kPointerSize)); + __ B(&loop); + __ Bind(&done_loop); + } + + // Setup the rest parameter array in x0. + __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1); + __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset)); + __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex); + __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset)); + __ Str(x3, FieldMemOperand(x0, JSArray::kElementsOffset)); + __ Str(x6, FieldMemOperand(x0, JSArray::kLengthOffset)); + STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize); + __ Ret(); + + // Fall back to %AllocateInNewSpace. + __ Bind(&allocate); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ SmiTag(x0); + __ SmiTag(x1); + __ Push(x0, x2, x1); + __ CallRuntime(Runtime::kAllocateInNewSpace); + __ Mov(x3, x0); + __ Pop(x2, x0); + __ SmiUntag(x0); + } + __ B(&done_allocate); + } +} + + +void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- x1 : function + // -- cp : context + // -- fp : frame pointer + // -- lr : return address + // ----------------------------------- + __ AssertFunction(x1); + + // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub. + __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); + __ Ldrsw( + x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset)); + __ Add(x3, fp, Operand(x2, LSL, kPointerSizeLog2)); + __ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset)); + __ SmiTag(x2); + + // x1 : function + // x2 : number of parameters (tagged) + // x3 : parameters pointer + // + // Returns pointer to result object in x0. + + // Make an untagged copy of the parameter count. + // Note: arg_count_smi is an alias of param_count_smi. + Register function = x1; + Register arg_count_smi = x2; + Register param_count_smi = x2; + Register recv_arg = x3; + Register param_count = x7; + __ SmiUntag(param_count, param_count_smi); + + // Check if the calling frame is an arguments adaptor frame. + Register caller_fp = x11; + Register caller_ctx = x12; + Label runtime; + Label adaptor_frame, try_allocate; + __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ Ldr(caller_ctx, MemOperand(caller_fp, + StandardFrameConstants::kContextOffset)); + __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ B(eq, &adaptor_frame); + + // No adaptor, parameter count = argument count. + + // x1 function function pointer + // x2 arg_count_smi number of function arguments (smi) + // x3 recv_arg pointer to receiver arguments + // x4 mapped_params number of mapped params, min(params, args) (uninit) + // x7 param_count number of function parameters + // x11 caller_fp caller's frame pointer + // x14 arg_count number of function arguments (uninit) + + Register arg_count = x14; + Register mapped_params = x4; + __ Mov(arg_count, param_count); + __ Mov(mapped_params, param_count); + __ B(&try_allocate); + + // We have an adaptor frame. Patch the parameters pointer. + __ Bind(&adaptor_frame); + __ Ldr(arg_count_smi, + MemOperand(caller_fp, + ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiUntag(arg_count, arg_count_smi); + __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2)); + __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset); + + // Compute the mapped parameter count = min(param_count, arg_count) + __ Cmp(param_count, arg_count); + __ Csel(mapped_params, param_count, arg_count, lt); + + __ Bind(&try_allocate); + + // x0 alloc_obj pointer to allocated objects: param map, backing + // store, arguments (uninit) + // x1 function function pointer + // x2 arg_count_smi number of function arguments (smi) + // x3 recv_arg pointer to receiver arguments + // x4 mapped_params number of mapped parameters, min(params, args) + // x7 param_count number of function parameters + // x10 size size of objects to allocate (uninit) + // x14 arg_count number of function arguments + + // Compute the size of backing store, parameter map, and arguments object. + // 1. Parameter map, has two extra words containing context and backing + // store. + const int kParameterMapHeaderSize = + FixedArray::kHeaderSize + 2 * kPointerSize; + + // Calculate the parameter map size, assuming it exists. + Register size = x10; + __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2)); + __ Add(size, size, kParameterMapHeaderSize); + + // If there are no mapped parameters, set the running size total to zero. + // Otherwise, use the parameter map size calculated earlier. + __ Cmp(mapped_params, 0); + __ CzeroX(size, eq); + + // 2. Add the size of the backing store and arguments object. + __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2)); + __ Add(size, size, FixedArray::kHeaderSize + JSSloppyArgumentsObject::kSize); + + // Do the allocation of all three objects in one go. Assign this to x0, as it + // will be returned to the caller. + Register alloc_obj = x0; + __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT); + + // Get the arguments boilerplate from the current (global) context. + + // x0 alloc_obj pointer to allocated objects (param map, backing + // store, arguments) + // x1 function function pointer + // x2 arg_count_smi number of function arguments (smi) + // x3 recv_arg pointer to receiver arguments + // x4 mapped_params number of mapped parameters, min(params, args) + // x7 param_count number of function parameters + // x11 sloppy_args_map offset to args (or aliased args) map (uninit) + // x14 arg_count number of function arguments + + Register global_ctx = x10; + Register sloppy_args_map = x11; + Register aliased_args_map = x10; + __ Ldr(global_ctx, NativeContextMemOperand()); + + __ Ldr(sloppy_args_map, + ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX)); + __ Ldr( + aliased_args_map, + ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)); + __ Cmp(mapped_params, 0); + __ CmovX(sloppy_args_map, aliased_args_map, ne); + + // Copy the JS object part. + __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset)); + __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex); + __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset)); + __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); + + // Set up the callee in-object property. + __ AssertNotSmi(function); + __ Str(function, + FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kCalleeOffset)); + + // Use the length and set that as an in-object property. + __ Str(arg_count_smi, + FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kLengthOffset)); + + // Set up the elements pointer in the allocated arguments object. + // If we allocated a parameter map, "elements" will point there, otherwise + // it will point to the backing store. + + // x0 alloc_obj pointer to allocated objects (param map, backing + // store, arguments) + // x1 function function pointer + // x2 arg_count_smi number of function arguments (smi) + // x3 recv_arg pointer to receiver arguments + // x4 mapped_params number of mapped parameters, min(params, args) + // x5 elements pointer to parameter map or backing store (uninit) + // x6 backing_store pointer to backing store (uninit) + // x7 param_count number of function parameters + // x14 arg_count number of function arguments + + Register elements = x5; + __ Add(elements, alloc_obj, JSSloppyArgumentsObject::kSize); + __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); + + // Initialize parameter map. If there are no mapped arguments, we're done. + Label skip_parameter_map; + __ Cmp(mapped_params, 0); + // Set up backing store address, because it is needed later for filling in + // the unmapped arguments. + Register backing_store = x6; + __ CmovX(backing_store, elements, eq); + __ B(eq, &skip_parameter_map); + + __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex); + __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); + __ Add(x10, mapped_params, 2); + __ SmiTag(x10); + __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset)); + __ Str(cp, FieldMemOperand(elements, + FixedArray::kHeaderSize + 0 * kPointerSize)); + __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2)); + __ Add(x10, x10, kParameterMapHeaderSize); + __ Str(x10, FieldMemOperand(elements, + FixedArray::kHeaderSize + 1 * kPointerSize)); + + // Copy the parameter slots and the holes in the arguments. + // We need to fill in mapped_parameter_count slots. Then index the context, + // where parameters are stored in reverse order, at: + // + // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1 + // + // The mapped parameter thus needs to get indices: + // + // MIN_CONTEXT_SLOTS + parameter_count - 1 .. + // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count + // + // We loop from right to left. + + // x0 alloc_obj pointer to allocated objects (param map, backing + // store, arguments) + // x1 function function pointer + // x2 arg_count_smi number of function arguments (smi) + // x3 recv_arg pointer to receiver arguments + // x4 mapped_params number of mapped parameters, min(params, args) + // x5 elements pointer to parameter map or backing store (uninit) + // x6 backing_store pointer to backing store (uninit) + // x7 param_count number of function parameters + // x11 loop_count parameter loop counter (uninit) + // x12 index parameter index (smi, uninit) + // x13 the_hole hole value (uninit) + // x14 arg_count number of function arguments + + Register loop_count = x11; + Register index = x12; + Register the_hole = x13; + Label parameters_loop, parameters_test; + __ Mov(loop_count, mapped_params); + __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS)); + __ Sub(index, index, mapped_params); + __ SmiTag(index); + __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); + __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2)); + __ Add(backing_store, backing_store, kParameterMapHeaderSize); + + __ B(¶meters_test); + + __ Bind(¶meters_loop); + __ Sub(loop_count, loop_count, 1); + __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2)); + __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag); + __ Str(index, MemOperand(elements, x10)); + __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize); + __ Str(the_hole, MemOperand(backing_store, x10)); + __ Add(index, index, Smi::FromInt(1)); + __ Bind(¶meters_test); + __ Cbnz(loop_count, ¶meters_loop); + + __ Bind(&skip_parameter_map); + // Copy arguments header and remaining slots (if there are any.) + __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex); + __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset)); + __ Str(arg_count_smi, FieldMemOperand(backing_store, + FixedArray::kLengthOffset)); + + // x0 alloc_obj pointer to allocated objects (param map, backing + // store, arguments) + // x1 function function pointer + // x2 arg_count_smi number of function arguments (smi) + // x3 recv_arg pointer to receiver arguments + // x4 mapped_params number of mapped parameters, min(params, args) + // x6 backing_store pointer to backing store (uninit) + // x14 arg_count number of function arguments + + Label arguments_loop, arguments_test; + __ Mov(x10, mapped_params); + __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2)); + __ B(&arguments_test); + + __ Bind(&arguments_loop); + __ Sub(recv_arg, recv_arg, kPointerSize); + __ Ldr(x11, MemOperand(recv_arg)); + __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2)); + __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize)); + __ Add(x10, x10, 1); + + __ Bind(&arguments_test); + __ Cmp(x10, arg_count); + __ B(lt, &arguments_loop); + + __ Ret(); + + // Do the runtime call to allocate the arguments object. + __ Bind(&runtime); + __ Push(function, recv_arg, arg_count_smi); + __ TailCallRuntime(Runtime::kNewSloppyArguments); +} + + +void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- x1 : function + // -- cp : context + // -- fp : frame pointer + // -- lr : return address + // ----------------------------------- + __ AssertFunction(x1); + + // For Ignition we need to skip all possible handler/stub frames until + // we reach the JavaScript frame for the function (similar to what the + // runtime fallback implementation does). So make x2 point to that + // JavaScript frame. + { + Label loop, loop_entry; + __ Mov(x2, fp); + __ B(&loop_entry); + __ Bind(&loop); + __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset)); + __ Bind(&loop_entry); + __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset)); + __ Cmp(x3, x1); + __ B(ne, &loop); + } + + // Check if we have an arguments adaptor frame below the function frame. + Label arguments_adaptor, arguments_done; + __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset)); + __ Ldr(x4, MemOperand(x3, StandardFrameConstants::kContextOffset)); + __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ B(eq, &arguments_adaptor); + { + __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); + __ Ldrsw(x0, FieldMemOperand( + x1, SharedFunctionInfo::kFormalParameterCountOffset)); + __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2)); + __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize); + } + __ B(&arguments_done); + __ Bind(&arguments_adaptor); + { + __ Ldrsw(x0, UntagSmiMemOperand( + x3, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ Add(x2, x3, Operand(x0, LSL, kPointerSizeLog2)); + __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize); + } + __ Bind(&arguments_done); + + // ----------- S t a t e ------------- + // -- cp : context + // -- x0 : number of rest parameters + // -- x2 : pointer to first rest parameters + // -- lr : return address + // ----------------------------------- + + // Allocate space for the strict arguments object plus the backing store. + Label allocate, done_allocate; + __ Mov(x1, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize); + __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2)); + __ Allocate(x1, x3, x4, x5, &allocate, TAG_OBJECT); + __ Bind(&done_allocate); + + // Compute arguments.length in x6. + __ SmiTag(x6, x0); + + // Setup the elements array in x3. + __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex); + __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset)); + __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset)); + __ Add(x4, x3, FixedArray::kHeaderSize); + { + Label loop, done_loop; + __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2)); + __ Bind(&loop); + __ Cmp(x4, x0); + __ B(eq, &done_loop); + __ Ldr(x5, MemOperand(x2, 0 * kPointerSize)); + __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize)); + __ Sub(x2, x2, Operand(1 * kPointerSize)); + __ Add(x4, x4, Operand(1 * kPointerSize)); + __ B(&loop); + __ Bind(&done_loop); + } + + // Setup the strict arguments object in x0. + __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, x1); + __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kMapOffset)); + __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex); + __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kPropertiesOffset)); + __ Str(x3, FieldMemOperand(x0, JSStrictArgumentsObject::kElementsOffset)); + __ Str(x6, FieldMemOperand(x0, JSStrictArgumentsObject::kLengthOffset)); + STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize); + __ Ret(); + + // Fall back to %AllocateInNewSpace. + __ Bind(&allocate); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ SmiTag(x0); + __ SmiTag(x1); + __ Push(x0, x2, x1); + __ CallRuntime(Runtime::kAllocateInNewSpace); + __ Mov(x3, x0); + __ Pop(x2, x0); + __ SmiUntag(x0); + } + __ B(&done_allocate); +} + + void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) { Register context = cp; Register result = x0; @@ -5656,11 +5808,10 @@ static void CallApiFunctionAndReturn( __ B(&leave_exit_frame); } - static void CallApiFunctionStubHelper(MacroAssembler* masm, const ParameterCount& argc, bool return_first_arg, - bool call_data_undefined) { + bool call_data_undefined, bool is_lazy) { // ----------- S t a t e ------------- // -- x0 : callee // -- x4 : call_data @@ -5697,8 +5848,10 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm, // FunctionCallbackArguments: context, callee and call data. __ Push(context, callee, call_data); - // Load context from callee - __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset)); + if (!is_lazy) { + // Load context from callee + __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset)); + } if (!call_data_undefined) { __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); @@ -5783,7 +5936,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm, void CallApiFunctionStub::Generate(MacroAssembler* masm) { bool call_data_undefined = this->call_data_undefined(); CallApiFunctionStubHelper(masm, ParameterCount(x3), false, - call_data_undefined); + call_data_undefined, false); } @@ -5791,24 +5944,29 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) { bool is_store = this->is_store(); int argc = this->argc(); bool call_data_undefined = this->call_data_undefined(); + bool is_lazy = this->is_lazy(); CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store, - call_data_undefined); + call_data_undefined, is_lazy); } void CallApiGetterStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- sp[0] : name - // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object + // -- sp[0] : name + // -- sp[8 .. (8 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_ // -- ... - // -- x2 : api_function_address + // -- x2 : api_function_address // ----------------------------------- Register api_function_address = ApiGetterDescriptor::function_address(); DCHECK(api_function_address.is(x2)); + // v8::PropertyCallbackInfo::args_ array and name handle. + const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; + + // Load address of v8::PropertyAccessorInfo::args_ array and name handle. __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name> - __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA + __ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_ const int kApiStackSpace = 1; @@ -5819,20 +5977,22 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace); - // Create PropertyAccessorInfo instance on the stack above the exit frame with - // x1 (internal::Object** args_) as the data. + // Create v8::PropertyCallbackInfo object on the stack and initialize + // it's args_ field. __ Poke(x1, 1 * kPointerSize); - __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo& - - const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; + __ Add(x1, masm->StackPointer(), 1 * kPointerSize); + // x1 = v8::PropertyCallbackInfo& ExternalReference thunk_ref = ExternalReference::invoke_accessor_getter_callback(isolate()); const int spill_offset = 1 + kApiStackSpace; + // +3 is to skip prolog, return address and name handle. + MemOperand return_value_operand( + fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, kStackUnwindSpace, NULL, spill_offset, - MemOperand(fp, 6 * kPointerSize), NULL); + return_value_operand, NULL); } diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc index cf2cc57215..37bb4a22ba 100644 --- a/deps/v8/src/arm64/cpu-arm64.cc +++ b/deps/v8/src/arm64/cpu-arm64.cc @@ -19,8 +19,8 @@ class CacheLineSizes { cache_type_register_ = 0; #else // Copy the content of the cache type register to a core register. - __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT - : [ctr] "=r" (cache_type_register_)); + __asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT + : [ctr] "=r"(cache_type_register_)); #endif } @@ -37,7 +37,6 @@ class CacheLineSizes { uint32_t cache_type_register_; }; - void CpuFeatures::FlushICache(void* address, size_t length) { #ifdef V8_HOST_ARCH_ARM64 // The code below assumes user space cache operations are allowed. The goal diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc index 081405037a..3aa1e4dfa1 100644 --- a/deps/v8/src/arm64/deoptimizer-arm64.cc +++ b/deps/v8/src/arm64/deoptimizer-arm64.cc @@ -65,30 +65,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { - // Set the register values. The values are not important as there are no - // callee saved registers in JavaScript frames, so all registers are - // spilled. Registers fp and sp are set to the correct values though. - for (int i = 0; i < Register::NumRegisters(); i++) { - input_->SetRegister(i, 0); - } - - // TODO(all): Do we also need to set a value to csp? - input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp())); - input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); - - for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) { - input_->SetDoubleRegister(i, 0.0); - } - - // Fill the frame content from the actual data on the frame. - for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { - input_->SetFrameSlot(i, Memory::uint64_at(tos + i)); - } -} - - -bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { +bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) { // There is no dynamic alignment padding on ARM64 in the input frame. return false; } diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc index 485aa780e3..c6ae37e733 100644 --- a/deps/v8/src/arm64/interface-descriptors-arm64.cc +++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc @@ -56,20 +56,6 @@ const Register StringCompareDescriptor::LeftRegister() { return x1; } const Register StringCompareDescriptor::RightRegister() { return x0; } -const Register ArgumentsAccessReadDescriptor::index() { return x1; } -const Register ArgumentsAccessReadDescriptor::parameter_count() { return x0; } - - -const Register ArgumentsAccessNewDescriptor::function() { return x1; } -const Register ArgumentsAccessNewDescriptor::parameter_count() { return x2; } -const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return x3; } - - -const Register RestParamAccessDescriptor::parameter_count() { return x2; } -const Register RestParamAccessDescriptor::parameter_pointer() { return x3; } -const Register RestParamAccessDescriptor::rest_parameter_index() { return x4; } - - const Register ApiGetterDescriptor::function_address() { return x2; } @@ -98,6 +84,35 @@ void FastNewContextDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } +void FastNewObjectDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + Register registers[] = {x1, x3}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + +void FastNewRestParameterDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // x1: function + Register registers[] = {x1}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + + +void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // x1: function + Register registers[] = {x1}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + + +void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // x1: function + Register registers[] = {x1}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + void ToNumberDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { @@ -116,6 +131,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return x0; } // static +const Register ToNameDescriptor::ReceiverRegister() { return x0; } + + +// static const Register ToObjectDescriptor::ReceiverRegister() { return x0; } @@ -185,13 +204,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific( } -void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {x3, x0}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - - void CallFunctionDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // x1 function the function to call @@ -465,6 +477,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific( &default_descriptor); } +void InterpreterDispatchDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + Register registers[] = { + kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister, + kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister, + kInterpreterDispatchTableRegister}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { @@ -476,7 +496,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } - void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = { @@ -488,7 +507,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } - void InterpreterCEntryDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = { diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc index fbf459db46..953c3fd7f2 100644 --- a/deps/v8/src/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/arm64/macro-assembler-arm64.cc @@ -1488,18 +1488,15 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder, } -void MacroAssembler::CheckEnumCache(Register object, - Register null_value, - Register scratch0, - Register scratch1, - Register scratch2, - Register scratch3, +void MacroAssembler::CheckEnumCache(Register object, Register scratch0, + Register scratch1, Register scratch2, + Register scratch3, Register scratch4, Label* call_runtime) { - DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2, - scratch3)); + DCHECK(!AreAliased(object, scratch0, scratch1, scratch2, scratch3, scratch4)); Register empty_fixed_array_value = scratch0; Register current_object = scratch1; + Register null_value = scratch4; LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); Label next, start; @@ -1516,6 +1513,7 @@ void MacroAssembler::CheckEnumCache(Register object, Cmp(enum_length, kInvalidEnumCacheSentinel); B(eq, call_runtime); + LoadRoot(null_value, Heap::kNullValueRootIndex); B(&start); Bind(&next); @@ -1576,10 +1574,9 @@ void MacroAssembler::InNewSpace(Register object, Label* branch) { DCHECK(cond == eq || cond == ne); UseScratchRegisterScope temps(this); - Register temp = temps.AcquireX(); - And(temp, object, ExternalReference::new_space_mask(isolate())); - Cmp(temp, ExternalReference::new_space_start(isolate())); - B(cond, branch); + const int mask = + (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE); + CheckPageFlag(object, temps.AcquireSameSizeAs(object), mask, cond, branch); } @@ -1641,6 +1638,20 @@ void MacroAssembler::AssertBoundFunction(Register object) { } +void MacroAssembler::AssertReceiver(Register object) { + if (emit_debug_code()) { + AssertNotSmi(object, kOperandIsASmiAndNotAReceiver); + + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + + STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); + CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE); + Check(hs, kOperandIsNotAReceiver); + } +} + + void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, Register scratch) { if (emit_debug_code()) { @@ -1679,6 +1690,15 @@ void MacroAssembler::AssertPositiveOrZero(Register value) { } } +void MacroAssembler::AssertNumber(Register value) { + if (emit_debug_code()) { + Label done; + JumpIfSmi(value, &done); + JumpIfHeapNumber(value, &done); + Abort(kOperandIsNotANumber); + Bind(&done); + } +} void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. @@ -1727,19 +1747,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { } -void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag, - const CallWrapper& call_wrapper) { - ASM_LOCATION("MacroAssembler::InvokeBuiltin"); - // You can't call a builtin without a valid frame. - DCHECK(flag == JUMP_FUNCTION || has_frame()); - - // Fake a parameter count to avoid emitting code to do the check. - ParameterCount expected(0); - LoadNativeContextSlot(native_context_index, x1); - InvokeFunctionCode(x1, no_reg, expected, expected, flag, call_wrapper); -} - - void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { const Runtime::Function* function = Runtime::FunctionForId(fid); DCHECK_EQ(1, function->result_size); @@ -2423,7 +2430,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target, } Push(fun); Push(fun); - CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1); + CallRuntime(Runtime::kDebugPrepareStepInIfStepping); Pop(fun); if (new_target.is_valid()) { Pop(new_target); @@ -3824,6 +3831,65 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, Ldr(result, FieldMemOperand(scratch2, kValueOffset)); } +void MacroAssembler::RecordWriteCodeEntryField(Register js_function, + Register code_entry, + Register scratch) { + const int offset = JSFunction::kCodeEntryOffset; + + // Since a code entry (value) is always in old space, we don't need to update + // remembered set. If incremental marking is off, there is nothing for us to + // do. + if (!FLAG_incremental_marking) return; + + DCHECK(js_function.is(x1)); + DCHECK(code_entry.is(x7)); + DCHECK(scratch.is(x5)); + AssertNotSmi(js_function); + + if (emit_debug_code()) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + Add(scratch, js_function, offset - kHeapObjectTag); + Ldr(temp, MemOperand(scratch)); + Cmp(temp, code_entry); + Check(eq, kWrongAddressOrValuePassedToRecordWrite); + } + + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis and stores into young gen. + Label done; + + CheckPageFlagClear(code_entry, scratch, + MemoryChunk::kPointersToHereAreInterestingMask, &done); + CheckPageFlagClear(js_function, scratch, + MemoryChunk::kPointersFromHereAreInterestingMask, &done); + + const Register dst = scratch; + Add(dst, js_function, offset - kHeapObjectTag); + + // Save caller-saved registers.Both input registers (x1 and x7) are caller + // saved, so there is no need to push them. + PushCPURegList(kCallerSaved); + + int argument_count = 3; + + Mov(x0, js_function); + Mov(x1, dst); + Mov(x2, ExternalReference::isolate_address(isolate())); + + { + AllowExternalCallThatCantCauseGC scope(this); + CallCFunction( + ExternalReference::incremental_marking_record_write_code_entry_function( + isolate()), + argument_count); + } + + // Restore caller-saved registers. + PopCPURegList(kCallerSaved); + + Bind(&done); +} void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. Register address, @@ -3938,6 +4004,17 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { } } +void MacroAssembler::CheckPageFlag(const Register& object, + const Register& scratch, int mask, + Condition cc, Label* condition_met) { + And(scratch, object, ~Page::kPageAlignmentMask); + Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); + if (cc == eq) { + TestAndBranchIfAnySet(scratch, mask, condition_met); + } else { + TestAndBranchIfAllClear(scratch, mask, condition_met); + } +} void MacroAssembler::CheckPageFlagSet(const Register& object, const Register& scratch, @@ -4409,9 +4486,9 @@ void MacroAssembler::Abort(BailoutReason reason) { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(this, StackFrame::NONE); - CallRuntime(Runtime::kAbort, 1); + CallRuntime(Runtime::kAbort); } else { - CallRuntime(Runtime::kAbort, 1); + CallRuntime(Runtime::kAbort); } } else { // Load the string to pass to Printf. diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h index 78997d6d02..ff41c4f27f 100644 --- a/deps/v8/src/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/arm64/macro-assembler-arm64.h @@ -34,9 +34,9 @@ namespace v8 { namespace internal { // Give alias names to registers for calling conventions. -// TODO(titzer): arm64 is a pain for aliasing; get rid of these macros #define kReturnRegister0 x0 #define kReturnRegister1 x1 +#define kReturnRegister2 x2 #define kJSFunctionRegister x1 #define kContextRegister cp #define kInterpreterAccumulatorRegister x0 @@ -970,6 +970,9 @@ class MacroAssembler : public Assembler { // enabled via --debug-code. void AssertBoundFunction(Register object); + // Abort execution if argument is not a JSReceiver, enabled via --debug-code. + void AssertReceiver(Register object); + // Abort execution if argument is not undefined or an AllocationSite, enabled // via --debug-code. void AssertUndefinedOrAllocationSite(Register object, Register scratch); @@ -981,6 +984,9 @@ class MacroAssembler : public Assembler { // --debug-code. void AssertPositiveOrZero(Register value); + // Abort execution if argument is not a number (heap number or smi). + void AssertNumber(Register value); + void JumpIfHeapNumber(Register object, Label* on_heap_number, SmiCheckType smi_check_type = DONT_DO_SMI_CHECK); void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number, @@ -1138,10 +1144,6 @@ class MacroAssembler : public Assembler { int num_arguments); - // Invoke specified builtin JavaScript function. - void InvokeBuiltin(int native_context_index, InvokeFlag flag, - const CallWrapper& call_wrapper = NullCallWrapper()); - void Jump(Register target); void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); @@ -1586,12 +1588,8 @@ class MacroAssembler : public Assembler { void LeaveFrame(StackFrame::Type type); // Returns map with validated enum cache in object register. - void CheckEnumCache(Register object, - Register null_value, - Register scratch0, - Register scratch1, - Register scratch2, - Register scratch3, + void CheckEnumCache(Register object, Register scratch0, Register scratch1, + Register scratch2, Register scratch3, Register scratch4, Label* call_runtime); // AllocationMemento support. Arrays may have an associated @@ -1730,6 +1728,9 @@ class MacroAssembler : public Assembler { Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize); } + void CheckPageFlag(const Register& object, const Register& scratch, int mask, + Condition cc, Label* condition_met); + void CheckPageFlagSet(const Register& object, const Register& scratch, int mask, @@ -1793,6 +1794,11 @@ class MacroAssembler : public Assembler { pointers_to_here_check_for_value); } + // Notify the garbage collector that we wrote a code entry into a + // JSFunction. Only scratch is clobbered by the operation. + void RecordWriteCodeEntryField(Register js_function, Register code_entry, + Register scratch); + void RecordWriteForMap( Register object, Register map, diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc index 8f72669f49..81dbdf8850 100644 --- a/deps/v8/src/arm64/simulator-arm64.cc +++ b/deps/v8/src/arm64/simulator-arm64.cc @@ -15,6 +15,7 @@ #include "src/disasm.h" #include "src/macro-assembler.h" #include "src/ostreams.h" +#include "src/runtime/runtime-utils.h" namespace v8 { namespace internal { @@ -533,12 +534,6 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) { // uses the ObjectPair structure. // The simulator assumes all runtime calls return two 64-bits values. If they // don't, register x1 is clobbered. This is fine because x1 is caller-saved. -struct ObjectPair { - int64_t res0; - int64_t res1; -}; - - typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1, int64_t arg2, @@ -548,6 +543,11 @@ typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg6, int64_t arg7); +typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1, + int64_t arg2, int64_t arg3, + int64_t arg4, int64_t arg5, + int64_t arg6, int64_t arg7); + typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2); typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2); typedef double (*SimulatorRuntimeFPCall)(double arg1); @@ -589,8 +589,10 @@ void Simulator::DoRuntimeCall(Instruction* instr) { UNREACHABLE(); break; - case ExternalReference::BUILTIN_CALL: { - // Object* f(v8::internal::Arguments). + case ExternalReference::BUILTIN_CALL: + case ExternalReference::BUILTIN_CALL_PAIR: { + // Object* f(v8::internal::Arguments) or + // ObjectPair f(v8::internal::Arguments). TraceSim("Type: BUILTIN_CALL\n"); SimulatorRuntimeCall target = reinterpret_cast<SimulatorRuntimeCall>(external); @@ -607,13 +609,41 @@ void Simulator::DoRuntimeCall(Instruction* instr) { xreg(4), xreg(5), xreg(6), xreg(7)); ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3), xreg(4), xreg(5), xreg(6), xreg(7)); - TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64 "}\n", - result.res0, result.res1); + TraceSim("Returned: {%p, %p}\n", result.x, result.y); +#ifdef DEBUG + CorruptAllCallerSavedCPURegisters(); +#endif + set_xreg(0, reinterpret_cast<int64_t>(result.x)); + set_xreg(1, reinterpret_cast<int64_t>(result.y)); + break; + } + + case ExternalReference::BUILTIN_CALL_TRIPLE: { + // ObjectTriple f(v8::internal::Arguments). + TraceSim("Type: BUILTIN_CALL TRIPLE\n"); + SimulatorRuntimeTripleCall target = + reinterpret_cast<SimulatorRuntimeTripleCall>(external); + + // We don't know how many arguments are being passed, but we can + // pass 8 without touching the stack. They will be ignored by the + // host function if they aren't used. + TraceSim( + "Arguments: " + "0x%016" PRIx64 ", 0x%016" PRIx64 ", " + "0x%016" PRIx64 ", 0x%016" PRIx64 ", " + "0x%016" PRIx64 ", 0x%016" PRIx64 ", " + "0x%016" PRIx64 ", 0x%016" PRIx64, + xreg(0), xreg(1), xreg(2), xreg(3), xreg(4), xreg(5), xreg(6), + xreg(7)); + // Return location passed in x8. + ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(xreg(8)); + ObjectTriple result = target(xreg(0), xreg(1), xreg(2), xreg(3), xreg(4), + xreg(5), xreg(6), xreg(7)); + TraceSim("Returned: {%p, %p, %p}\n", result.x, result.y, result.z); #ifdef DEBUG CorruptAllCallerSavedCPURegisters(); #endif - set_xreg(0, result.res0); - set_xreg(1, result.res1); + *sim_result = result; break; } @@ -1966,10 +1996,10 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) { switch (instr->Mask(DataProcessing1SourceMask)) { case RBIT_w: - set_wreg(dst, ReverseBits(wreg(src))); + set_wreg(dst, base::bits::ReverseBits(wreg(src))); break; case RBIT_x: - set_xreg(dst, ReverseBits(xreg(src))); + set_xreg(dst, base::bits::ReverseBits(xreg(src))); break; case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), 1)); @@ -3510,7 +3540,8 @@ void Simulator::Debug() { HeapObject* obj = reinterpret_cast<HeapObject*>(*cur); int64_t value = *cur; Heap* current_heap = isolate_->heap(); - if (((value & 1) == 0) || current_heap->Contains(obj)) { + if (((value & 1) == 0) || + current_heap->ContainsSlow(obj->address())) { PrintF(" ("); if ((value & kSmiTagMask) == 0) { STATIC_ASSERT(kSmiValueSize == 32); diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h index 1e1c0a33c2..35d9824837 100644 --- a/deps/v8/src/arm64/utils-arm64.h +++ b/deps/v8/src/arm64/utils-arm64.h @@ -55,19 +55,6 @@ int MaskToBit(uint64_t mask); template <typename T> -T ReverseBits(T value) { - DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) || - (sizeof(value) == 8)); - T result = 0; - for (unsigned i = 0; i < (sizeof(value) * 8); i++) { - result = (result << 1) | (value & 1); - value >>= 1; - } - return result; -} - - -template <typename T> T ReverseBytes(T value, int block_bytes_log2) { DCHECK((sizeof(value) == 4) || (sizeof(value) == 8)); DCHECK((1U << block_bytes_log2) <= sizeof(value)); |