diff options
Diffstat (limited to 'deps/v8/src/x64/code-stubs-x64.cc')
-rw-r--r-- | deps/v8/src/x64/code-stubs-x64.cc | 1122 |
1 files changed, 551 insertions, 571 deletions
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index 1416fbd8b9..0942b2fb3c 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -14,6 +14,7 @@ #include "src/regexp/jsregexp.h" #include "src/regexp/regexp-macro-assembler.h" #include "src/runtime/runtime.h" +#include "src/x64/code-stubs-x64.h" namespace v8 { namespace internal { @@ -595,47 +596,46 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { - // Stack layout: - // rsp[0] : return address - // rsp[8] : number of parameters (tagged) - // rsp[16] : receiver displacement - // rsp[24] : function + // rcx : number of parameters (tagged) + // rdx : parameters pointer + // rdi : function + // rsp[0] : return address // Registers used over the whole function: // rbx: the mapped parameter count (untagged) // rax: the allocated object (tagged). Factory* factory = isolate()->factory(); - StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER); - __ SmiToInteger64(rbx, args.GetArgumentOperand(2)); + DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function())); + DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count())); + DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer())); + + __ SmiToInteger64(rbx, rcx); // rbx = parameter count (untagged) // Check if the calling frame is an arguments adaptor frame. - Label runtime; - Label adaptor_frame, try_allocate; - __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); - __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + Label adaptor_frame, try_allocate, runtime; + __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ movp(r8, Operand(rax, StandardFrameConstants::kContextOffset)); + __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); __ j(equal, &adaptor_frame); // No adaptor, parameter count = argument count. - __ movp(rcx, rbx); + __ movp(r11, rbx); __ jmp(&try_allocate, Label::kNear); // We have an adaptor frame. Patch the parameters pointer. __ bind(&adaptor_frame); - __ SmiToInteger64(rcx, - Operand(rdx, - ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ leap(rdx, Operand(rdx, rcx, times_pointer_size, - StandardFrameConstants::kCallerSPOffset)); - __ movp(args.GetArgumentOperand(1), rdx); + __ SmiToInteger64( + r11, Operand(rax, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ leap(rdx, Operand(rax, r11, times_pointer_size, + StandardFrameConstants::kCallerSPOffset)); // rbx = parameter count (untagged) - // rcx = argument count (untagged) - // Compute the mapped parameter count = min(rbx, rcx) in rbx. - __ cmpp(rbx, rcx); + // r11 = argument count (untagged) + // Compute the mapped parameter count = min(rbx, r11) in rbx. + __ cmpp(rbx, r11); __ j(less_equal, &try_allocate, Label::kNear); - __ movp(rbx, rcx); + __ movp(rbx, r11); __ bind(&try_allocate); @@ -651,66 +651,65 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { __ bind(&no_parameter_map); // 2. Backing store. - __ leap(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize)); + __ leap(r8, Operand(r8, r11, times_pointer_size, FixedArray::kHeaderSize)); // 3. Arguments object. __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize)); // Do the allocation of all three objects in one go. - __ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT); + __ Allocate(r8, rax, r9, no_reg, &runtime, TAG_OBJECT); // rax = address of new object(s) (tagged) - // rcx = argument count (untagged) - // Get the arguments map from the current native context into rdi. + // r11 = argument count (untagged) + // Get the arguments map from the current native context into r9. Label has_mapped_parameters, instantiate; - __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset)); + __ movp(r9, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ movp(r9, FieldOperand(r9, GlobalObject::kNativeContextOffset)); __ testp(rbx, rbx); __ j(not_zero, &has_mapped_parameters, Label::kNear); const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX; - __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex))); + __ movp(r9, Operand(r9, Context::SlotOffset(kIndex))); __ jmp(&instantiate, Label::kNear); const int kAliasedIndex = Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX; __ bind(&has_mapped_parameters); - __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex))); + __ movp(r9, Operand(r9, Context::SlotOffset(kAliasedIndex))); __ bind(&instantiate); // rax = address of new object (tagged) // rbx = mapped parameter count (untagged) - // rcx = argument count (untagged) - // rdi = address of arguments map (tagged) - __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi); + // r11 = argument count (untagged) + // r9 = address of arguments map (tagged) + __ movp(FieldOperand(rax, JSObject::kMapOffset), r9); __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex); __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister); __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister); // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); - __ movp(rdx, args.GetArgumentOperand(0)); - __ AssertNotSmi(rdx); + __ AssertNotSmi(rdi); __ movp(FieldOperand(rax, JSObject::kHeaderSize + - Heap::kArgumentsCalleeIndex * kPointerSize), - rdx); + Heap::kArgumentsCalleeIndex * kPointerSize), + rdi); // Use the length (smi tagged) and set that as an in-object property too. - // Note: rcx is tagged from here on. + // Note: r11 is tagged from here on. STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); - __ Integer32ToSmi(rcx, rcx); + __ Integer32ToSmi(r11, r11); __ movp(FieldOperand(rax, JSObject::kHeaderSize + - Heap::kArgumentsLengthIndex * kPointerSize), - rcx); + Heap::kArgumentsLengthIndex * kPointerSize), + r11); // Set up the elements pointer in the allocated arguments object. - // If we allocated a parameter map, edi will point there, otherwise to the + // If we allocated a parameter map, rdi will point there, otherwise to the // backing store. __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize)); __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi); // rax = address of new object (tagged) // rbx = mapped parameter count (untagged) - // rcx = argument count (tagged) + // r11 = argument count (tagged) // rdi = address of parameter map or backing store (tagged) // Initialize parameter map. If there are no mapped arguments, we're done. @@ -740,48 +739,42 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { // Load tagged parameter count into r9. __ Integer32ToSmi(r9, rbx); __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS)); - __ addp(r8, args.GetArgumentOperand(2)); + __ addp(r8, rcx); __ subp(r8, r9); - __ Move(r11, factory->the_hole_value()); - __ movp(rdx, rdi); + __ movp(rcx, rdi); __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize)); - // r9 = loop variable (tagged) + __ SmiToInteger64(r9, r9); + // r9 = loop variable (untagged) // r8 = mapping index (tagged) - // r11 = the hole value - // rdx = address of parameter map (tagged) + // rcx = address of parameter map (tagged) // rdi = address of backing store (tagged) __ jmp(¶meters_test, Label::kNear); __ bind(¶meters_loop); - __ SmiSubConstant(r9, r9, Smi::FromInt(1)); - __ SmiToInteger64(kScratchRegister, r9); - __ movp(FieldOperand(rdx, kScratchRegister, - times_pointer_size, - kParameterMapHeaderSize), + __ subp(r9, Immediate(1)); + __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); + __ movp(FieldOperand(rcx, r9, times_pointer_size, kParameterMapHeaderSize), r8); - __ movp(FieldOperand(rdi, kScratchRegister, - times_pointer_size, - FixedArray::kHeaderSize), - r11); + __ movp(FieldOperand(rdi, r9, times_pointer_size, FixedArray::kHeaderSize), + kScratchRegister); __ SmiAddConstant(r8, r8, Smi::FromInt(1)); __ bind(¶meters_test); - __ SmiTest(r9); + __ testp(r9, r9); __ j(not_zero, ¶meters_loop, Label::kNear); __ bind(&skip_parameter_map); - // rcx = argument count (tagged) + // r11 = argument count (tagged) // rdi = address of backing store (tagged) // Copy arguments header and remaining slots (if there are any). __ Move(FieldOperand(rdi, FixedArray::kMapOffset), factory->fixed_array_map()); - __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); + __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r11); Label arguments_loop, arguments_test; __ movp(r8, rbx); - __ movp(rdx, args.GetArgumentOperand(1)); - // Untag rcx for the loop below. - __ SmiToInteger64(rcx, rcx); + // Untag r11 for the loop below. + __ SmiToInteger64(r11, r11); __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0)); __ subp(rdx, kScratchRegister); __ jmp(&arguments_test, Label::kNear); @@ -796,73 +789,56 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { __ addp(r8, Immediate(1)); __ bind(&arguments_test); - __ cmpp(r8, rcx); + __ cmpp(r8, r11); __ j(less, &arguments_loop, Label::kNear); - // Return and remove the on-stack parameters. - __ ret(3 * kPointerSize); + // Return. + __ ret(0); // Do the runtime call to allocate the arguments object. - // rcx = argument count (untagged) + // r11 = argument count (untagged) __ bind(&runtime); - __ Integer32ToSmi(rcx, rcx); - __ movp(args.GetArgumentOperand(2), rcx); // Patch argument count. + __ Integer32ToSmi(r11, r11); + __ PopReturnAddressTo(rax); + __ Push(rdi); // Push function. + __ Push(rdx); // Push parameters pointer. + __ Push(r11); // Push parameter count. + __ PushReturnAddressFrom(rax); __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { - // rsp[0] : return address - // rsp[8] : number of parameters - // rsp[16] : receiver displacement - // rsp[24] : function + // rcx : number of parameters (tagged) + // rdx : parameters pointer + // rdi : function + // rsp[0] : return address - // Check if the calling frame is an arguments adaptor frame. - Label runtime; - __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); - __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ j(not_equal, &runtime); - - // Patch the arguments.length and the parameters pointer. - StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER); - __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ movp(args.GetArgumentOperand(2), rcx); - __ SmiToInteger64(rcx, rcx); - __ leap(rdx, Operand(rdx, rcx, times_pointer_size, - StandardFrameConstants::kCallerSPOffset)); - __ movp(args.GetArgumentOperand(1), rdx); - - __ bind(&runtime); - __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); -} - - -void RestParamAccessStub::GenerateNew(MacroAssembler* masm) { - // rsp[0] : return address - // rsp[8] : language mode - // rsp[16] : index of rest parameter - // rsp[24] : number of parameters - // rsp[32] : receiver displacement + DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function())); + DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count())); + DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer())); // Check if the calling frame is an arguments adaptor frame. Label runtime; - __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); - __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ movp(rax, Operand(rbx, StandardFrameConstants::kContextOffset)); + __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); __ j(not_equal, &runtime); // Patch the arguments.length and the parameters pointer. - StackArgumentsAccessor args(rsp, 4, ARGUMENTS_DONT_CONTAIN_RECEIVER); - __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ movp(args.GetArgumentOperand(1), rcx); - __ SmiToInteger64(rcx, rcx); - __ leap(rdx, Operand(rdx, rcx, times_pointer_size, + StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER); + __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiToInteger64(rax, rcx); + __ leap(rdx, Operand(rbx, rax, times_pointer_size, StandardFrameConstants::kCallerSPOffset)); - __ movp(args.GetArgumentOperand(0), rdx); __ bind(&runtime); - __ TailCallRuntime(Runtime::kNewRestParam, 4, 1); + __ PopReturnAddressTo(rax); + __ Push(rdi); // Push function. + __ Push(rdx); // Push parameters pointer. + __ Push(rcx); // Push parameter count. + __ PushReturnAddressFrom(rax); + __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); } @@ -928,46 +904,45 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) { void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { - // rsp[0] : return address - // rsp[8] : number of parameters - // rsp[16] : receiver displacement - // rsp[24] : function + // rcx : number of parameters (tagged) + // rdx : parameters pointer + // rdi : function + // rsp[0] : return address + + DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function())); + DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count())); + DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer())); // Check if the calling frame is an arguments adaptor frame. Label adaptor_frame, try_allocate, runtime; - __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); - __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ movp(rax, Operand(rbx, StandardFrameConstants::kContextOffset)); + __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); __ j(equal, &adaptor_frame); // Get the length from the frame. - StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER); - __ movp(rcx, args.GetArgumentOperand(2)); - __ SmiToInteger64(rcx, rcx); + __ SmiToInteger64(rax, rcx); __ jmp(&try_allocate); // Patch the arguments.length and the parameters pointer. __ bind(&adaptor_frame); - __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - - __ movp(args.GetArgumentOperand(2), rcx); - __ SmiToInteger64(rcx, rcx); - __ leap(rdx, Operand(rdx, rcx, times_pointer_size, - StandardFrameConstants::kCallerSPOffset)); - __ movp(args.GetArgumentOperand(1), rdx); + __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiToInteger64(rax, rcx); + __ leap(rdx, Operand(rbx, rax, times_pointer_size, + StandardFrameConstants::kCallerSPOffset)); // Try the new space allocation. Start out with computing the size of // the arguments object and the elements array. Label add_arguments_object; __ bind(&try_allocate); - __ testp(rcx, rcx); + __ testp(rax, rax); __ j(zero, &add_arguments_object, Label::kNear); - __ leap(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); + __ leap(rax, Operand(rax, times_pointer_size, FixedArray::kHeaderSize)); __ bind(&add_arguments_object); - __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize)); + __ addp(rax, Immediate(Heap::kStrictArgumentsObjectSize)); // Do the allocation of both objects in one go. - __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); + __ Allocate(rax, rax, rbx, no_reg, &runtime, TAG_OBJECT); // Get the arguments map from the current native context. __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); @@ -982,7 +957,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Get the length (smi tagged) and set that as an in-object property too. STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); - __ movp(rcx, args.GetArgumentOperand(2)); __ movp(FieldOperand(rax, JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize), rcx); @@ -992,18 +966,14 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ testp(rcx, rcx); __ j(zero, &done); - // Get the parameters pointer from the stack. - __ movp(rdx, args.GetArgumentOperand(1)); - // Set up the elements pointer in the allocated arguments object and // initialize the header in the elements fixed array. __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize)); __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi); __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); - - __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); + // Untag the length for the loop below. __ SmiToInteger64(rcx, rcx); @@ -1017,12 +987,17 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ decp(rcx); __ j(not_zero, &loop); - // Return and remove the on-stack parameters. + // Return. __ bind(&done); - __ ret(3 * kPointerSize); + __ ret(0); // Do the runtime call to allocate the arguments object. __ bind(&runtime); + __ PopReturnAddressTo(rax); + __ Push(rdi); // Push function. + __ Push(rdx); // Push parameters pointer. + __ Push(rcx); // Push parameter count. + __ PushReturnAddressFrom(rax); __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); } @@ -1761,24 +1736,16 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { __ Push(rax); // Figure out which native to call and setup the arguments. - if (cc == equal && strict()) { + if (cc == equal) { __ PushReturnAddressFrom(rcx); - __ TailCallRuntime(Runtime::kStrictEquals, 2, 1); + __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2, + 1); } else { - Builtins::JavaScript builtin; - if (cc == equal) { - builtin = Builtins::EQUALS; - } else { - builtin = - is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE; - __ Push(Smi::FromInt(NegativeComparisonResult(cc))); - } - + __ Push(Smi::FromInt(NegativeComparisonResult(cc))); __ PushReturnAddressFrom(rcx); - - // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ InvokeBuiltin(builtin, JUMP_FUNCTION); + __ TailCallRuntime( + is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3, + 1); } __ bind(&miss); @@ -1849,28 +1816,26 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) { __ j(equal, &done, Label::kFar); __ CompareRoot(FieldOperand(r11, HeapObject::kMapOffset), Heap::kWeakCellMapRootIndex); - __ j(not_equal, FLAG_pretenuring_call_new ? &miss : &check_allocation_site); + __ j(not_equal, &check_allocation_site); // If the weak cell is cleared, we have a new chance to become monomorphic. __ CheckSmi(FieldOperand(r11, WeakCell::kValueOffset)); __ j(equal, &initialize); __ jmp(&megamorphic); - if (!FLAG_pretenuring_call_new) { - __ bind(&check_allocation_site); - // If we came here, we need to see if we are the array function. - // If we didn't have a matching function, and we didn't find the megamorph - // sentinel, then we have in the slot either some other function or an - // AllocationSite. - __ CompareRoot(FieldOperand(r11, 0), Heap::kAllocationSiteMapRootIndex); - __ j(not_equal, &miss); + __ bind(&check_allocation_site); + // If we came here, we need to see if we are the array function. + // If we didn't have a matching function, and we didn't find the megamorph + // sentinel, then we have in the slot either some other function or an + // AllocationSite. + __ CompareRoot(FieldOperand(r11, 0), Heap::kAllocationSiteMapRootIndex); + __ j(not_equal, &miss); - // Make sure the function is the Array() function - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11); - __ cmpp(rdi, r11); - __ j(not_equal, &megamorphic); - __ jmp(&done); - } + // Make sure the function is the Array() function + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11); + __ cmpp(rdi, r11); + __ j(not_equal, &megamorphic); + __ jmp(&done); __ bind(&miss); @@ -1889,23 +1854,20 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) { // indicate the ElementsKind if function is the Array constructor. __ bind(&initialize); - if (!FLAG_pretenuring_call_new) { - // Make sure the function is the Array() function - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11); - __ cmpp(rdi, r11); - __ j(not_equal, ¬_array_function); - - CreateAllocationSiteStub create_stub(isolate); - CallStubInRecordCallTarget(masm, &create_stub, is_super); - __ jmp(&done_no_smi_convert); + // Make sure the function is the Array() function + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11); + __ cmpp(rdi, r11); + __ j(not_equal, ¬_array_function); - __ bind(¬_array_function); - } - - CreateWeakCellStub create_stub(isolate); + CreateAllocationSiteStub create_stub(isolate); CallStubInRecordCallTarget(masm, &create_stub, is_super); __ jmp(&done_no_smi_convert); + __ bind(¬_array_function); + CreateWeakCellStub weak_cell_stub(isolate); + CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super); + __ jmp(&done_no_smi_convert); + __ bind(&done); __ Integer32ToSmi(rdx, rdx); @@ -1928,36 +1890,10 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { } -static void EmitSlowCase(Isolate* isolate, - MacroAssembler* masm, - StackArgumentsAccessor* args, - int argc, - Label* non_function) { - // Check for function proxy. - __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE); - __ j(not_equal, non_function); - __ PopReturnAddressTo(rcx); - __ Push(rdi); // put proxy as additional argument under return address - __ PushReturnAddressFrom(rcx); - __ Set(rax, argc + 1); - __ Set(rbx, 0); - __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY); - { - Handle<Code> adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); - __ jmp(adaptor, RelocInfo::CODE_TARGET); - } - - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ bind(non_function); - __ movp(args->GetReceiverOperand(), rdi); +static void EmitSlowCase(MacroAssembler* masm, StackArgumentsAccessor* args, + int argc) { __ Set(rax, argc); - __ Set(rbx, 0); - __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); - Handle<Code> adaptor = - isolate->builtins()->ArgumentsAdaptorTrampoline(); - __ Jump(adaptor, RelocInfo::CODE_TARGET); + __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); } @@ -1982,13 +1918,12 @@ static void CallFunctionNoFeedback(MacroAssembler* masm, // rdi : the function to call // wrap_and_call can only be true if we are compiling a monomorphic method. - Isolate* isolate = masm->isolate(); - Label slow, non_function, wrap, cont; + Label slow, wrap, cont; StackArgumentsAccessor args(rsp, argc); if (needs_checks) { // Check that the function really is a JavaScript function. - __ JumpIfSmi(rdi, &non_function); + __ JumpIfSmi(rdi, &slow); // Goto slow case if we do not have a function. __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); @@ -2023,7 +1958,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm, if (needs_checks) { // Slow-case: Non-function called. __ bind(&slow); - EmitSlowCase(isolate, masm, &args, argc, &non_function); + EmitSlowCase(masm, &args, argc); } if (call_as_method) { @@ -2044,34 +1979,26 @@ void CallConstructStub::Generate(MacroAssembler* masm) { // rcx : original constructor (for IsSuperConstructorCall) // rdx : slot in feedback vector (Smi, for RecordCallTarget) // rdi : constructor function - Label slow, non_function_call; - // Check that function is not a smi. - __ JumpIfSmi(rdi, &non_function_call); - // Check that function is a JSFunction. + Label non_function; + // Check that the constructor is not a smi. + __ JumpIfSmi(rdi, &non_function); + // Check that constructor is a JSFunction. __ CmpObjectType(rdi, JS_FUNCTION_TYPE, r11); - __ j(not_equal, &slow); + __ j(not_equal, &non_function); if (RecordCallTarget()) { GenerateRecordCallTarget(masm, IsSuperConstructorCall()); __ SmiToInteger32(rdx, rdx); - if (FLAG_pretenuring_call_new) { - // Put the AllocationSite from the feedback vector into ebx. - // By adding kPointerSize we encode that we know the AllocationSite - // entry is at the feedback vector slot given by rdx + 1. - __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size, - FixedArray::kHeaderSize + kPointerSize)); - } else { - Label feedback_register_initialized; - // Put the AllocationSite from the feedback vector into rbx, or undefined. - __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size, - FixedArray::kHeaderSize)); - __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex); - __ j(equal, &feedback_register_initialized); - __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); - __ bind(&feedback_register_initialized); - } + Label feedback_register_initialized; + // Put the AllocationSite from the feedback vector into rbx, or undefined. + __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size, + FixedArray::kHeaderSize)); + __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex); + __ j(equal, &feedback_register_initialized); + __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); + __ bind(&feedback_register_initialized); __ AssertUndefinedOrAllocationSite(rbx); } @@ -2083,64 +2010,29 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ movp(rdx, rdi); } - // Jump to the function-specific construct stub. - Register jmp_reg = rcx; - __ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ movp(jmp_reg, FieldOperand(jmp_reg, - SharedFunctionInfo::kConstructStubOffset)); - __ leap(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize)); - __ jmp(jmp_reg); - - // rdi: called object - // rax: number of arguments - // r11: object map - Label do_call; - __ bind(&slow); - __ CmpInstanceType(r11, JS_FUNCTION_PROXY_TYPE); - __ j(not_equal, &non_function_call); - __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); - __ jmp(&do_call); - - __ bind(&non_function_call); - __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); - __ bind(&do_call); - // Set expected number of arguments to zero (not changing rax). - __ Set(rbx, 0); - __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); -} - + // Tail call to the function-specific construct stub (still in the caller + // context at this point). + __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset)); + __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize)); + __ jmp(rcx); -static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { - __ movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); - __ movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset)); - __ movp(vector, FieldOperand(vector, - SharedFunctionInfo::kFeedbackVectorOffset)); + __ bind(&non_function); + __ movp(rdx, rdi); + __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); } -void CallIC_ArrayStub::Generate(MacroAssembler* masm) { +void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) { // rdi - function - // rdx - slot id (as integer) + // rdx - slot id // rbx - vector - Label miss; - int argc = arg_count(); - ParameterCount actual(argc); - - __ SmiToInteger32(rdx, rdx); - - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx); - __ cmpp(rdi, rcx); - __ j(not_equal, &miss); + // rcx - allocation site (loaded from vector[slot]). + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8); + __ cmpp(rdi, r8); + __ j(not_equal, miss); __ movp(rax, Immediate(arg_count())); - __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size, - FixedArray::kHeaderSize)); - // Verify that ecx contains an AllocationSite - Factory* factory = masm->isolate()->factory(); - __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), - factory->allocation_site_map()); - __ j(not_equal, &miss); // Increment the call count for monomorphic function calls. __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size, @@ -2151,18 +2043,6 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) { __ movp(rdx, rdi); ArrayConstructorStub stub(masm->isolate(), arg_count()); __ TailCallStub(&stub); - - __ bind(&miss); - GenerateMiss(masm); - - // The slow case, we need this no matter what to complete a call after a miss. - CallFunctionNoFeedback(masm, - arg_count(), - true, - CallAsMethod()); - - // Unreachable. - __ int3(); } @@ -2176,7 +2056,7 @@ void CallICStub::Generate(MacroAssembler* masm) { const int generic_offset = FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex); Label extra_checks_or_miss, slow_start; - Label slow, non_function, wrap, cont; + Label slow, wrap, cont; Label have_js_function; int argc = arg_count(); StackArgumentsAccessor args(rsp, argc); @@ -2231,7 +2111,7 @@ void CallICStub::Generate(MacroAssembler* masm) { __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper()); __ bind(&slow); - EmitSlowCase(isolate, masm, &args, argc, &non_function); + EmitSlowCase(masm, &args, argc); if (CallAsMethod()) { __ bind(&wrap); @@ -2239,11 +2119,21 @@ void CallICStub::Generate(MacroAssembler* masm) { } __ bind(&extra_checks_or_miss); - Label uninitialized, miss; + Label uninitialized, miss, not_allocation_site; __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate)); __ j(equal, &slow_start); + // Check if we have an allocation site. + __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset), + Heap::kAllocationSiteMapRootIndex); + __ j(not_equal, ¬_allocation_site); + + // We have an allocation site. + HandleArrayCase(masm, &miss); + + __ bind(¬_allocation_site); + // The following cases attempt to handle MISS cases without going to the // runtime. if (FLAG_trace_ic) { @@ -2312,7 +2202,7 @@ void CallICStub::Generate(MacroAssembler* masm) { // the slow case __ bind(&slow_start); // Check that function is not a smi. - __ JumpIfSmi(rdi, &non_function); + __ JumpIfSmi(rdi, &slow); // Check that function is a JSFunction. __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); __ j(not_equal, &slow); @@ -2333,10 +2223,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) { __ Push(rdx); // Call the entry. - Runtime::FunctionId id = GetICState() == DEFAULT - ? Runtime::kCallIC_Miss - : Runtime::kCallIC_Customization_Miss; - __ CallRuntime(id, 3); + __ CallRuntime(Runtime::kCallIC_Miss, 3); // Move result to edi and exit the internal frame. __ movp(rdi, rax); @@ -2684,220 +2571,108 @@ void JSEntryStub::Generate(MacroAssembler* masm) { } -void InstanceofStub::Generate(MacroAssembler* masm) { - // Implements "value instanceof function" operator. - // Expected input state with no inline cache: - // rsp[0] : return address - // rsp[8] : function pointer - // rsp[16] : value - // Expected input state with an inline one-element cache: - // rsp[0] : return address - // rsp[8] : offset from return address to location of inline cache - // rsp[16] : function pointer - // rsp[24] : value - // Returns a bitwise zero to indicate that the value - // is and instance of the function and anything else to - // indicate that the value is not an instance. - - // Fixed register usage throughout the stub. - Register object = rax; // Object (lhs). - Register map = rbx; // Map of the object. - Register function = rdx; // Function (rhs). - Register prototype = rdi; // Prototype of the function. - Register scratch = rcx; - - static const int kOffsetToMapCheckValue = 2; - static const int kOffsetToResultValue = kPointerSize == kInt64Size ? 18 : 14; - // The last 4 bytes of the instruction sequence - // movp(rdi, FieldOperand(rax, HeapObject::kMapOffset)) - // Move(kScratchRegister, Factory::the_hole_value()) - // in front of the hole value address. - static const unsigned int kWordBeforeMapCheckValue = - kPointerSize == kInt64Size ? 0xBA49FF78 : 0xBA41FF78; - // The last 4 bytes of the instruction sequence - // __ j(not_equal, &cache_miss); - // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); - // before the offset of the hole value in the root array. - static const unsigned int kWordBeforeResultValue = - kPointerSize == kInt64Size ? 0x458B4906 : 0x458B4106; - - int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0; - - DCHECK_EQ(object.code(), InstanceofStub::left().code()); - DCHECK_EQ(function.code(), InstanceofStub::right().code()); - - // Get the object and function - they are always both needed. - // Go slow case if the object is a smi. - Label slow; - StackArgumentsAccessor args(rsp, 2 + extra_argument_offset, - ARGUMENTS_DONT_CONTAIN_RECEIVER); - if (!HasArgsInRegisters()) { - __ movp(object, args.GetArgumentOperand(0)); - __ movp(function, args.GetArgumentOperand(1)); - } - __ JumpIfSmi(object, &slow); - - // Check that the left hand is a JS object. Leave its map in rax. - __ CmpObjectType(object, FIRST_SPEC_OBJECT_TYPE, map); - __ j(below, &slow); - __ CmpInstanceType(map, LAST_SPEC_OBJECT_TYPE); - __ j(above, &slow); - - // If there is a call site cache don't look in the global cache, but do the - // real lookup and update the call site cache. - if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) { - // Look up the function and the map in the instanceof cache. - Label miss; - __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); - __ j(not_equal, &miss, Label::kNear); - __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex); - __ j(not_equal, &miss, Label::kNear); - __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); - __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); - __ bind(&miss); - } +void InstanceOfStub::Generate(MacroAssembler* masm) { + Register const object = rdx; // Object (lhs). + Register const function = rax; // Function (rhs). + Register const object_map = rcx; // Map of {object}. + Register const function_map = r8; // Map of {function}. + Register const function_prototype = rdi; // Prototype of {function}. + + DCHECK(object.is(InstanceOfDescriptor::LeftRegister())); + DCHECK(function.is(InstanceOfDescriptor::RightRegister())); + + // Check if {object} is a smi. + Label object_is_smi; + __ JumpIfSmi(object, &object_is_smi, Label::kNear); + + // Lookup the {function} and the {object} map in the global instanceof cache. + // Note: This is safe because we clear the global instanceof cache whenever + // we change the prototype of any object. + Label fast_case, slow_case; + __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset)); + __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); + __ j(not_equal, &fast_case, Label::kNear); + __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex); + __ j(not_equal, &fast_case, Label::kNear); + __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); + __ ret(0); - // Get the prototype of the function. - __ TryGetFunctionPrototype(function, prototype, &slow, true); - - // Check that the function prototype is a JS object. - __ JumpIfSmi(prototype, &slow); - __ CmpObjectType(prototype, FIRST_SPEC_OBJECT_TYPE, kScratchRegister); - __ j(below, &slow); - __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE); - __ j(above, &slow); - - // Update the global instanceof or call site inlined cache with the current - // map and function. The cached answer will be set when it is known below. - if (!HasCallSiteInlineCheck()) { - __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); - __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); - } else { - // The constants for the code patching are based on push instructions - // at the call site. - DCHECK(!HasArgsInRegisters()); - // Get return address and delta to inlined map check. - __ movq(kScratchRegister, StackOperandForReturnAddress(0)); - __ subp(kScratchRegister, args.GetArgumentOperand(2)); - if (FLAG_debug_code) { - __ movl(scratch, Immediate(kWordBeforeMapCheckValue)); - __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), scratch); - __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck); - } - __ movp(kScratchRegister, - Operand(kScratchRegister, kOffsetToMapCheckValue)); - __ movp(Operand(kScratchRegister, 0), map); - - __ movp(r8, map); - // Scratch points at the cell payload. Calculate the start of the object. - __ subp(kScratchRegister, Immediate(Cell::kValueOffset - 1)); - __ RecordWriteField(kScratchRegister, Cell::kValueOffset, r8, function, - kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); - } + // If {object} is a smi we can safely return false if {function} is a JS + // function, otherwise we have to miss to the runtime and throw an exception. + __ bind(&object_is_smi); + __ JumpIfSmi(function, &slow_case); + __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map); + __ j(not_equal, &slow_case); + __ LoadRoot(rax, Heap::kFalseValueRootIndex); + __ ret(0); - // Loop through the prototype chain looking for the function prototype. - __ movp(scratch, FieldOperand(map, Map::kPrototypeOffset)); - Label loop, is_instance, is_not_instance; - __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex); + // Fast-case: The {function} must be a valid JSFunction. + __ bind(&fast_case); + __ JumpIfSmi(function, &slow_case); + __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map); + __ j(not_equal, &slow_case); + + // Ensure that {function} has an instance prototype. + __ testb(FieldOperand(function_map, Map::kBitFieldOffset), + Immediate(1 << Map::kHasNonInstancePrototype)); + __ j(not_zero, &slow_case); + + // Ensure that {function} is not bound. + Register const shared_info = kScratchRegister; + __ movp(shared_info, + FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); + __ TestBitSharedFunctionInfoSpecialField( + shared_info, SharedFunctionInfo::kCompilerHintsOffset, + SharedFunctionInfo::kBoundFunction); + __ j(not_zero, &slow_case); + + // Get the "prototype" (or initial map) of the {function}. + __ movp(function_prototype, + FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); + __ AssertNotSmi(function_prototype); + + // Resolve the prototype if the {function} has an initial map. Afterwards the + // {function_prototype} will be either the JSReceiver prototype object or the + // hole value, which means that no instances of the {function} were created so + // far and hence we should return false. + Label function_prototype_valid; + Register const function_prototype_map = kScratchRegister; + __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map); + __ j(not_equal, &function_prototype_valid, Label::kNear); + __ movp(function_prototype, + FieldOperand(function_prototype, Map::kPrototypeOffset)); + __ bind(&function_prototype_valid); + __ AssertNotSmi(function_prototype); + + // Update the global instanceof cache with the current {object} map and + // {function}. The cached answer will be set when it is known below. + __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); + __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex); + + // Loop through the prototype chain looking for the {function} prototype. + // Assume true, and change to false if not found. + Register const object_prototype = object_map; + Label done, loop; + __ LoadRoot(rax, Heap::kTrueValueRootIndex); __ bind(&loop); - __ cmpp(scratch, prototype); - __ j(equal, &is_instance, Label::kNear); - __ cmpp(scratch, kScratchRegister); - // The code at is_not_instance assumes that kScratchRegister contains a - // non-zero GCable value (the null object in this case). - __ j(equal, &is_not_instance, Label::kNear); - __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); - __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset)); - __ jmp(&loop); - - __ bind(&is_instance); - if (!HasCallSiteInlineCheck()) { - __ xorl(rax, rax); - // Store bitwise zero in the cache. This is a Smi in GC terms. - STATIC_ASSERT(kSmiTag == 0); - __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); - if (ReturnTrueFalseObject()) { - __ LoadRoot(rax, Heap::kTrueValueRootIndex); - } - } else { - // Store offset of true in the root array at the inline check site. - int true_offset = 0x100 + - (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; - // Assert it is a 1-byte signed value. - DCHECK(true_offset >= 0 && true_offset < 0x100); - __ movl(rax, Immediate(true_offset)); - __ movq(kScratchRegister, StackOperandForReturnAddress(0)); - __ subp(kScratchRegister, args.GetArgumentOperand(2)); - __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); - if (FLAG_debug_code) { - __ movl(rax, Immediate(kWordBeforeResultValue)); - __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax); - __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov); - } - if (!ReturnTrueFalseObject()) { - __ Set(rax, 0); - } - } - __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) * - kPointerSize); - - __ bind(&is_not_instance); - if (!HasCallSiteInlineCheck()) { - // We have to store a non-zero value in the cache. - __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); - if (ReturnTrueFalseObject()) { - __ LoadRoot(rax, Heap::kFalseValueRootIndex); - } - } else { - // Store offset of false in the root array at the inline check site. - int false_offset = 0x100 + - (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; - // Assert it is a 1-byte signed value. - DCHECK(false_offset >= 0 && false_offset < 0x100); - __ movl(rax, Immediate(false_offset)); - __ movq(kScratchRegister, StackOperandForReturnAddress(0)); - __ subp(kScratchRegister, args.GetArgumentOperand(2)); - __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); - if (FLAG_debug_code) { - __ movl(rax, Immediate(kWordBeforeResultValue)); - __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax); - __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov); - } - } - __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) * - kPointerSize); + __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset)); + __ cmpp(object_prototype, function_prototype); + __ j(equal, &done, Label::kNear); + __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); + __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset)); + __ j(not_equal, &loop); + __ LoadRoot(rax, Heap::kFalseValueRootIndex); + __ bind(&done); + __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); + __ ret(0); - // Slow-case: Go through the JavaScript implementation. - __ bind(&slow); - if (!ReturnTrueFalseObject()) { - // Tail call the builtin which returns 0 or 1. - DCHECK(!HasArgsInRegisters()); - if (HasCallSiteInlineCheck()) { - // Remove extra value from the stack. - __ PopReturnAddressTo(rcx); - __ Pop(rax); - __ PushReturnAddressFrom(rcx); - } - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); - } else { - // Call the builtin and convert 0/1 to true/false. - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(object); - __ Push(function); - __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); - } - Label true_value, done; - __ testq(rax, rax); - __ j(zero, &true_value, Label::kNear); - __ LoadRoot(rax, Heap::kFalseValueRootIndex); - __ jmp(&done, Label::kNear); - __ bind(&true_value); - __ LoadRoot(rax, Heap::kTrueValueRootIndex); - __ bind(&done); - __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) * - kPointerSize); - } + // Slow-case: Call the runtime function. + __ bind(&slow_case); + __ PopReturnAddressTo(kScratchRegister); + __ Push(object); + __ Push(function); + __ PushReturnAddressFrom(kScratchRegister); + __ TailCallRuntime(Runtime::kInstanceOf, 2, 1); } @@ -3333,7 +3108,42 @@ void ToNumberStub::Generate(MacroAssembler* masm) { __ PopReturnAddressTo(rcx); // Pop return address. __ Push(rax); // Push argument. __ PushReturnAddressFrom(rcx); // Push return address. - __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); + __ TailCallRuntime(Runtime::kToNumber, 1, 1); +} + + +void ToStringStub::Generate(MacroAssembler* masm) { + // The ToString stub takes one argument in rax. + Label is_number; + __ JumpIfSmi(rax, &is_number, Label::kNear); + + Label not_string; + __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi); + // rax: receiver + // rdi: receiver map + __ j(above_equal, ¬_string, Label::kNear); + __ Ret(); + __ bind(¬_string); + + Label not_heap_number; + __ CompareRoot(rdi, Heap::kHeapNumberMapRootIndex); + __ j(not_equal, ¬_heap_number, Label::kNear); + __ bind(&is_number); + NumberToStringStub stub(isolate()); + __ TailCallStub(&stub); + __ bind(¬_heap_number); + + Label not_oddball; + __ CmpInstanceType(rdi, ODDBALL_TYPE); + __ j(not_equal, ¬_oddball, Label::kNear); + __ movp(rax, FieldOperand(rax, Oddball::kToStringOffset)); + __ Ret(); + __ bind(¬_oddball); + + __ PopReturnAddressTo(rcx); // Pop return address. + __ Push(rax); // Push argument. + __ PushReturnAddressFrom(rcx); // Push return address. + __ TailCallRuntime(Runtime::kToString, 1, 1); } @@ -3474,43 +3284,40 @@ void StringHelper::GenerateOneByteCharsCompareLoop( void StringCompareStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // rsp[0] : return address - // rsp[8] : right string - // rsp[16] : left string - - StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER); - __ movp(rdx, args.GetArgumentOperand(0)); // left - __ movp(rax, args.GetArgumentOperand(1)); // right + // ----------- S t a t e ------------- + // -- rdx : left string + // -- rax : right string + // -- rsp[0] : return address + // ----------------------------------- + __ AssertString(rdx); + __ AssertString(rax); // Check for identity. Label not_same; __ cmpp(rdx, rax); __ j(not_equal, ¬_same, Label::kNear); __ Move(rax, Smi::FromInt(EQUAL)); - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->string_compare_native(), 1); - __ ret(2 * kPointerSize); + __ IncrementCounter(isolate()->counters()->string_compare_native(), 1); + __ Ret(); __ bind(¬_same); // Check that both are sequential one-byte strings. + Label runtime; __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx, &runtime); // Inline comparison of one-byte strings. - __ IncrementCounter(counters->string_compare_native(), 1); - // Drop arguments from the stack - __ PopReturnAddressTo(rcx); - __ addp(rsp, Immediate(2 * kPointerSize)); - __ PushReturnAddressFrom(rcx); + __ IncrementCounter(isolate()->counters()->string_compare_native(), 1); StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx, rdi, r8); // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ bind(&runtime); + __ PopReturnAddressTo(rcx); + __ Push(rdx); + __ Push(rax); + __ PushReturnAddressFrom(rcx); __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } @@ -3543,6 +3350,37 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { } +void CompareICStub::GenerateBooleans(MacroAssembler* masm) { + DCHECK_EQ(CompareICState::BOOLEAN, state()); + Label miss; + Label::Distance const miss_distance = + masm->emit_debug_code() ? Label::kFar : Label::kNear; + + __ JumpIfSmi(rdx, &miss, miss_distance); + __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset)); + __ JumpIfSmi(rax, &miss, miss_distance); + __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset)); + __ JumpIfNotRoot(rcx, Heap::kBooleanMapRootIndex, &miss, miss_distance); + __ JumpIfNotRoot(rbx, Heap::kBooleanMapRootIndex, &miss, miss_distance); + if (op() != Token::EQ_STRICT && is_strong(strength())) { + __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1); + } else { + if (!Token::IsEqualityOp(op())) { + __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset)); + __ AssertSmi(rax); + __ movp(rdx, FieldOperand(rdx, Oddball::kToNumberOffset)); + __ AssertSmi(rdx); + __ xchgp(rax, rdx); + } + __ subp(rax, rdx); + __ Ret(); + } + + __ bind(&miss); + GenerateMiss(masm); +} + + void CompareICStub::GenerateSmis(MacroAssembler* masm) { DCHECK(state() == CompareICState::SMI); Label miss; @@ -3844,15 +3682,24 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) { __ j(either_smi, &miss, Label::kNear); __ GetWeakValue(rdi, cell); - __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset)); - __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); - __ cmpp(rcx, rdi); + __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rdi); __ j(not_equal, &miss, Label::kNear); - __ cmpp(rbx, rdi); + __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rdi); __ j(not_equal, &miss, Label::kNear); - __ subp(rax, rdx); - __ ret(0); + if (Token::IsEqualityOp(op())) { + __ subp(rax, rdx); + __ ret(0); + } else if (is_strong(strength())) { + __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1); + } else { + __ PopReturnAddressTo(rcx); + __ Push(rdx); + __ Push(rax); + __ Push(Smi::FromInt(NegativeComparisonResult(GetCondition()))); + __ PushReturnAddressFrom(rcx); + __ TailCallRuntime(Runtime::kCompare, 3, 1); + } __ bind(&miss); GenerateMiss(masm); @@ -4372,25 +4219,23 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { void LoadICTrampolineStub::Generate(MacroAssembler* masm) { - EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister()); + __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister()); LoadICStub stub(isolate(), state()); stub.GenerateForTrampoline(masm); } void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) { - EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister()); + __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister()); KeyedLoadICStub stub(isolate(), state()); stub.GenerateForTrampoline(masm); } -static void HandleArrayCases(MacroAssembler* masm, Register receiver, - Register key, Register vector, Register slot, - Register feedback, Register receiver_map, - Register scratch1, Register scratch2, - Register scratch3, bool is_polymorphic, - Label* miss) { +static void HandleArrayCases(MacroAssembler* masm, Register feedback, + Register receiver_map, Register scratch1, + Register scratch2, Register scratch3, + bool is_polymorphic, Label* miss) { // feedback initially contains the feedback array Label next_loop, prepare_next; Label start_polymorphic; @@ -4492,8 +4337,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { __ bind(&try_array); __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex); __ j(not_equal, ¬_array); - HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map, - integer_slot, r11, r15, true, &miss); + HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true, + &miss); __ bind(¬_array); __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); @@ -4551,8 +4396,8 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { // We have a polymorphic element handler. Label polymorphic, try_poly_name; __ bind(&polymorphic); - HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map, - integer_slot, r11, r15, true, &miss); + HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true, + &miss); __ bind(¬_array); // Is it generic? @@ -4570,8 +4415,8 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { // at least one map/handler pair. __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size, FixedArray::kHeaderSize + kPointerSize)); - HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map, - integer_slot, r11, r15, false, &miss); + HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, false, + &miss); __ bind(&miss); KeyedLoadIC::GenerateMiss(masm); @@ -4583,14 +4428,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) { - EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister()); + __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister()); VectorStoreICStub stub(isolate(), state()); stub.GenerateForTrampoline(masm); } void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) { - EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister()); + __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister()); VectorKeyedStoreICStub stub(isolate(), state()); stub.GenerateForTrampoline(masm); } @@ -4607,11 +4452,50 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // rdx + Register key = VectorStoreICDescriptor::NameRegister(); // rcx + Register vector = VectorStoreICDescriptor::VectorRegister(); // rbx + Register slot = VectorStoreICDescriptor::SlotRegister(); // rdi + DCHECK(VectorStoreICDescriptor::ValueRegister().is(rax)); // rax + Register feedback = r8; + Register integer_slot = r9; + Register receiver_map = r11; + DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map)); + + __ SmiToInteger32(integer_slot, slot); + __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size, + FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, + integer_slot, &compare_map, &load_smi_map, &try_array); + + // Is it a fixed array? + __ bind(&try_array); + __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex); + __ j(not_equal, ¬_array); + HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, true, + &miss); + + __ bind(¬_array); + __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); + __ j(not_equal, &miss); + + Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); + masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags, + receiver, key, feedback, no_reg); - // TODO(mvstanton): Implement. __ bind(&miss); StoreIC::GenerateMiss(masm); + + __ bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ jmp(&compare_map); } @@ -4625,29 +4509,125 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { } +static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm, + Register receiver_map, + Register feedback, Register scratch, + Register scratch1, + Register scratch2, Label* miss) { + // feedback initially contains the feedback array + Label next, next_loop, prepare_next; + Label transition_call; + + Register cached_map = scratch; + Register counter = scratch1; + Register length = scratch2; + + // Polymorphic, we have to loop from 0 to N - 1 + __ movp(counter, Immediate(0)); + __ movp(length, FieldOperand(feedback, FixedArray::kLengthOffset)); + __ SmiToInteger32(length, length); + + __ bind(&next_loop); + __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size, + FixedArray::kHeaderSize)); + __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); + __ j(not_equal, &prepare_next); + __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex); + __ j(not_equal, &transition_call); + __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size, + FixedArray::kHeaderSize + 2 * kPointerSize)); + __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize)); + __ jmp(feedback); + + __ bind(&transition_call); + DCHECK(receiver_map.is(VectorStoreTransitionDescriptor::MapRegister())); + __ movp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); + // The weak cell may have been cleared. + __ JumpIfSmi(receiver_map, miss); + // Get the handler in value. + __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size, + FixedArray::kHeaderSize + 2 * kPointerSize)); + __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize)); + __ jmp(feedback); + + __ bind(&prepare_next); + __ addl(counter, Immediate(3)); + __ cmpl(counter, length); + __ j(less, &next_loop); + + // We exhausted our array of map handler pairs. + __ jmp(miss); +} + + void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // rdx + Register key = VectorStoreICDescriptor::NameRegister(); // rcx + Register vector = VectorStoreICDescriptor::VectorRegister(); // rbx + Register slot = VectorStoreICDescriptor::SlotRegister(); // rdi + DCHECK(VectorStoreICDescriptor::ValueRegister().is(rax)); // rax + Register feedback = r8; + Register integer_slot = r9; + Register receiver_map = r11; + DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map)); + + __ SmiToInteger32(integer_slot, slot); + __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size, + FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, + integer_slot, &compare_map, &load_smi_map, &try_array); + + // Is it a fixed array? + __ bind(&try_array); + __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex); + __ j(not_equal, ¬_array); + HandlePolymorphicKeyedStoreCase(masm, receiver_map, feedback, integer_slot, + r15, r14, &miss); + + __ bind(¬_array); + Label try_poly_name; + __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); + __ j(not_equal, &try_poly_name); + + Handle<Code> megamorphic_stub = + KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); + __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET); + + __ bind(&try_poly_name); + // We might have a name in feedback, and a fixed array in the next slot. + __ cmpp(key, feedback); + __ j(not_equal, &miss); + // If the name comparison succeeded, we know we have a fixed array with + // at least one map/handler pair. + __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, false, + &miss); - // TODO(mvstanton): Implement. __ bind(&miss); KeyedStoreIC::GenerateMiss(masm); + + __ bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ jmp(&compare_map); } void CallICTrampolineStub::Generate(MacroAssembler* masm) { - EmitLoadTypeFeedbackVector(masm, rbx); + __ EmitLoadTypeFeedbackVector(rbx); CallICStub stub(isolate(), state()); __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); } -void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { - EmitLoadTypeFeedbackVector(masm, rbx); - CallIC_ArrayStub stub(isolate(), state()); - __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); -} - - void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { ProfileEntryHookStub stub(masm->isolate()); |