diff options
author | Ali Ijaz Sheikh <ofrobots@google.com> | 2016-03-01 08:58:05 -0800 |
---|---|---|
committer | Ali Sheikh <ofrobots@lemonhope.roam.corp.google.com> | 2016-03-03 20:35:20 -0800 |
commit | 069e02ab47656b3efd1b6829c65856b2e1c2d1db (patch) | |
tree | eb643e0a2e88fd64bb9fc927423458d2ae96c2db /deps/v8/src/ic | |
parent | 8938355398c79f583a468284b768652d12ba9bc9 (diff) | |
download | android-node-v8-069e02ab47656b3efd1b6829c65856b2e1c2d1db.tar.gz android-node-v8-069e02ab47656b3efd1b6829c65856b2e1c2d1db.tar.bz2 android-node-v8-069e02ab47656b3efd1b6829c65856b2e1c2d1db.zip |
deps: upgrade to V8 4.9.385.18
Pick up the current branch head for V8 4.9
https://github.com/v8/v8/commit/1ecba0f
PR-URL: https://github.com/nodejs/node/pull/4722
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Michaƫl Zasso <mic.besace@gmail.com>
Diffstat (limited to 'deps/v8/src/ic')
44 files changed, 795 insertions, 2143 deletions
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc index 951966e7de..0f1b7b9bf1 100644 --- a/deps/v8/src/ic/access-compiler.cc +++ b/deps/v8/src/ic/access-compiler.cc @@ -55,8 +55,7 @@ Register PropertyAccessCompiler::slot() const { if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) { return LoadDescriptor::SlotRegister(); } - DCHECK(FLAG_vector_stores && - (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC)); + DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC); return VectorStoreICDescriptor::SlotRegister(); } @@ -65,8 +64,7 @@ Register PropertyAccessCompiler::vector() const { if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) { return LoadWithVectorDescriptor::VectorRegister(); } - DCHECK(FLAG_vector_stores && - (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC)); + DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC); return VectorStoreICDescriptor::VectorRegister(); } } // namespace internal diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h index 223bde479a..50c2cc7303 100644 --- a/deps/v8/src/ic/access-compiler.h +++ b/deps/v8/src/ic/access-compiler.h @@ -40,7 +40,7 @@ class PropertyAccessCompiler BASE_EMBEDDED { kind_(kind), cache_holder_(cache_holder), isolate_(isolate), - masm_(isolate, NULL, 256) { + masm_(isolate, NULL, 256, CodeObjectRequired::kYes) { // TODO(yangguo): remove this once we can serialize IC stubs. masm_.enable_serializer(); } diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc index 62f554792f..d360f5a62b 100644 --- a/deps/v8/src/ic/arm/access-compiler-arm.cc +++ b/deps/v8/src/ic/arm/access-compiler-arm.cc @@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(FLAG_vector_stores || r3.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, r3, r4, r5}; return registers; } diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc index 1b6b51538e..e293965e6f 100644 --- a/deps/v8/src/ic/arm/handler-compiler-arm.cc +++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc @@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( ParameterCount actual(0); ParameterCount expected(expected_arguments); __ LoadAccessor(r1, holder, accessor_index, ACCESSOR_GETTER); - __ InvokeFunction(r1, expected, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(r1, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( ParameterCount actual(1); ParameterCount expected(expected_arguments); __ LoadAccessor(r1, holder, accessor_index, ACCESSOR_SETTER); - __ InvokeFunction(r1, expected, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(r1, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -143,7 +145,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( // Check that receiver is a JSObject. __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ cmp(scratch0, Operand(FIRST_JS_RECEIVER_TYPE)); __ b(lt, miss_label); // Load properties array. @@ -169,10 +171,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register result, Label* miss) { - const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); - __ ldr(result, MemOperand(cp, offset)); - __ ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset)); - __ ldr(result, MemOperand(result, Context::SlotOffset(index))); + __ LoadNativeContextSlot(index, result); // Load its initial map. The global functions all have initial maps. __ ldr(result, FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset)); @@ -223,8 +222,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, static void CompileCallLoadPropertyWithInterceptor( MacroAssembler* masm, Register receiver, Register holder, Register name, Handle<JSObject> holder_obj, Runtime::FunctionId id) { + DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength == + Runtime::FunctionForId(id)->nargs); PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength); + __ CallRuntime(id); } @@ -293,6 +294,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( __ ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset)); } + if (api_call_info->fast_handler()->IsCode()) { + // Just tail call into the fast handler if present. + __ Jump(handle(Code::cast(api_call_info->fast_handler())), + RelocInfo::CODE_TARGET); + return; + } + // Put api_function_address in place. Address function_address = v8::ToCData<Address>(api_call_info->callback()); ApiFunction fun(function_address); @@ -307,15 +315,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( static void StoreIC_PushArgs(MacroAssembler* masm) { - if (FLAG_vector_stores) { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), - VectorStoreICDescriptor::SlotRegister(), - VectorStoreICDescriptor::VectorRegister()); - } else { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); - } + __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), + StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); } @@ -324,7 +327,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow); } @@ -333,8 +336,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, - 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow); } @@ -707,8 +709,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) { PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), holder()); - __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor, - NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); + __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor); } @@ -733,7 +734,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( __ Push(ip, value()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1); + __ TailCallRuntime(Runtime::kStoreCallbackProperty); // Return the generated code. return GetCode(kind(), Code::FAST, name); @@ -745,7 +746,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( __ Push(receiver(), this->name(), value()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1); + __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor); // Return the generated code. return GetCode(kind(), Code::FAST, name); diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc index 89b3cc38d4..f59ac074be 100644 --- a/deps/v8/src/ic/arm/ic-arm.cc +++ b/deps/v8/src/ic/arm/ic-arm.cc @@ -309,8 +309,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kLoadIC_Miss); } @@ -323,8 +322,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong - : Runtime::kGetProperty, - 2, 1); + : Runtime::kGetProperty); } @@ -339,8 +337,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss); } @@ -353,8 +350,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Perform tail call to the entry. // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong - : Runtime::kKeyedGetProperty, - 2, 1); + : Runtime::kKeyedGetProperty); } @@ -462,23 +458,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm, static void StoreIC_PushArgs(MacroAssembler* masm) { - if (FLAG_vector_stores) { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), - VectorStoreICDescriptor::SlotRegister(), - VectorStoreICDescriptor::VectorRegister()); - } else { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); - } + __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), + StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); } void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss); } @@ -493,8 +483,11 @@ static void KeyedStoreGenerateMegamorphicHelper( // Fast case: Do the store, could be either Object or double. __ bind(fast_object); - Register scratch_value = r4; + Register scratch = r4; Register address = r5; + DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements, + scratch, address)); + if (check_map == kCheckMap) { __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); __ cmp(elements_map, @@ -507,12 +500,10 @@ static void KeyedStoreGenerateMegamorphicHelper( // there may be a callback on the element Label holecheck_passed1; __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(scratch_value, - MemOperand::PointerAddressFromSmiKey(address, key, PreIndex)); - __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value())); + __ ldr(scratch, MemOperand::PointerAddressFromSmiKey(address, key, PreIndex)); + __ cmp(scratch, Operand(masm->isolate()->factory()->the_hole_value())); __ b(ne, &holecheck_passed1); - __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, - slow); + __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); __ bind(&holecheck_passed1); @@ -522,8 +513,8 @@ static void KeyedStoreGenerateMegamorphicHelper( if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ add(scratch_value, key, Operand(Smi::FromInt(1))); - __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ add(scratch, key, Operand(Smi::FromInt(1))); + __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); } // It's irrelevant whether array is smi-only or not when writing a smi. __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); @@ -532,22 +523,21 @@ static void KeyedStoreGenerateMegamorphicHelper( __ bind(&non_smi_value); // Escape to elements kind transition case. - __ CheckFastObjectElements(receiver_map, scratch_value, - &transition_smi_elements); + __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements); // Fast elements array, store the value to the elements backing store. __ bind(&finish_object_store); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ add(scratch_value, key, Operand(Smi::FromInt(1))); - __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ add(scratch, key, Operand(Smi::FromInt(1))); + __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); } __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ add(address, address, Operand::PointerOffsetFromSmiKey(key)); __ str(value, MemOperand(address)); // Update write barrier for the elements array address. - __ mov(scratch_value, value); // Preserve the value which is returned. - __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved, + __ mov(scratch, value); // Preserve the value which is returned. + __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ Ret(); @@ -565,33 +555,31 @@ static void KeyedStoreGenerateMegamorphicHelper( __ add(address, elements, Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) - kHeapObjectTag)); - __ ldr(scratch_value, - MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex)); - __ cmp(scratch_value, Operand(kHoleNanUpper32)); + __ ldr(scratch, MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex)); + __ cmp(scratch, Operand(kHoleNanUpper32)); __ b(ne, &fast_double_without_map_check); - __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, - slow); + __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(value, key, elements, r3, d0, + __ StoreNumberToDoubleElements(value, key, elements, scratch, d0, &transition_double_elements); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ add(scratch_value, key, Operand(Smi::FromInt(1))); - __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ add(scratch, key, Operand(Smi::FromInt(1))); + __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); } __ Ret(); __ bind(&transition_smi_elements); // Transition the array appropriately depending on the value type. - __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset)); - __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); + __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); + __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); __ b(ne, &non_double_value); // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. __ LoadTransitionedArrayMapConditional( - FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow); + FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow); AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, @@ -602,7 +590,7 @@ static void KeyedStoreGenerateMegamorphicHelper( __ bind(&non_double_value); // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, - receiver_map, r4, slow); + receiver_map, scratch, slow); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateMapChangeElementsTransition( masm, receiver, key, value, receiver_map, mode, slow); @@ -614,7 +602,7 @@ static void KeyedStoreGenerateMegamorphicHelper( // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, - receiver_map, r4, slow); + receiver_map, scratch, slow); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateDoubleToObject( masm, receiver, key, value, receiver_map, mode, slow); @@ -690,27 +678,24 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ JumpIfNotUniqueNameInstanceType(r4, &slow); - // We use register r8 when FLAG_vector_stores is enabled, because otherwise - // probing the megamorphic stub cache would require pushing temporaries on - // the stack. + // We use register r8, because otherwise probing the megamorphic stub cache + // would require pushing temporaries on the stack. // TODO(mvstanton): quit using register r8 when // FLAG_enable_embedded_constant_pool is turned on. - DCHECK(!FLAG_vector_stores || !FLAG_enable_embedded_constant_pool); - Register temporary2 = FLAG_vector_stores ? r8 : r4; - if (FLAG_vector_stores) { - // The handlers in the stub cache expect a vector and slot. Since we won't - // change the IC from any downstream misses, a dummy vector can be used. - Register vector = VectorStoreICDescriptor::VectorRegister(); - Register slot = VectorStoreICDescriptor::SlotRegister(); - - DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9)); - Handle<TypeFeedbackVector> dummy_vector = - TypeFeedbackVector::DummyVector(masm->isolate()); - int slot_index = dummy_vector->GetIndex( - FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); - __ LoadRoot(vector, Heap::kDummyVectorRootIndex); - __ mov(slot, Operand(Smi::FromInt(slot_index))); - } + DCHECK(!FLAG_enable_embedded_constant_pool); + Register temporary2 = r8; + // The handlers in the stub cache expect a vector and slot. Since we won't + // change the IC from any downstream misses, a dummy vector can be used. + Register vector = VectorStoreICDescriptor::VectorRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + + DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9)); + Handle<TypeFeedbackVector> dummy_vector = + TypeFeedbackVector::DummyVector(masm->isolate()); + int slot_index = dummy_vector->GetIndex( + FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); + __ LoadRoot(vector, Heap::kDummyVectorRootIndex); + __ mov(slot, Operand(Smi::FromInt(slot_index))); Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); @@ -788,8 +773,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Perform tail call to the entry. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kStoreIC_Miss); } @@ -853,7 +837,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) { } -void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { +void PatchInlinedSmiCode(Isolate* isolate, Address address, + InlinedSmiCheck check) { Address cmp_instruction_address = Assembler::return_address_from_call_start(address); @@ -892,7 +877,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // tst rx, #kSmiTagMask // b ne/eq, <target> // and vice-versa to be disabled again. - CodePatcher patcher(patch_address, 2); + CodePatcher patcher(isolate, patch_address, 2); Register reg = Assembler::GetRn(instr_at_patch); if (check == ENABLE_INLINED_SMI_CHECK) { DCHECK(Assembler::IsCmpRegister(instr_at_patch)); diff --git a/deps/v8/src/ic/arm/ic-compiler-arm.cc b/deps/v8/src/ic/arm/ic-compiler-arm.cc index 9b8abd3298..318523199a 100644 --- a/deps/v8/src/ic/arm/ic-compiler-arm.cc +++ b/deps/v8/src/ic/arm/ic-compiler-arm.cc @@ -22,109 +22,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty( __ Push(r0); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 4, 1); -} - - -#undef __ -#define __ ACCESS_MASM(masm()) - - -Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { - Label miss; - - if (check == PROPERTY && - (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - // In case we are compiling an IC for dictionary loads or stores, just - // check whether the name is unique. - if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - // Keyed loads with dictionaries shouldn't be here, they go generic. - // The DCHECK is to protect assumptions when --vector-ics is on. - DCHECK(kind() != Code::KEYED_LOAD_IC); - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset)); - __ ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); - } else { - __ cmp(this->name(), Operand(name)); - __ b(ne, &miss); - } - } - - Label number_case; - Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss; - __ JumpIfSmi(receiver(), smi_target); - - // Polymorphic keyed stores may use the map register - Register map_reg = scratch1(); - DCHECK(kind() != Code::KEYED_STORE_IC || - map_reg.is(StoreTransitionDescriptor::MapRegister())); - - int receiver_count = maps->length(); - int number_of_handled_maps = 0; - __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map = maps->at(current); - if (!map->is_deprecated()) { - number_of_handled_maps++; - Handle<WeakCell> cell = Map::WeakCellForMap(map); - __ CmpWeakValue(map_reg, cell, scratch2()); - if (map->instance_type() == HEAP_NUMBER_TYPE) { - DCHECK(!number_case.is_unused()); - __ bind(&number_case); - } - __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq); - } - } - DCHECK(number_of_handled_maps != 0); - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - InlineCacheState state = - number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetCode(kind(), type, name, state); -} - - -Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( - MapHandleList* receiver_maps, CodeHandleList* handler_stubs, - MapHandleList* transitioned_maps) { - Label miss; - __ JumpIfSmi(receiver(), &miss); - - int receiver_count = receiver_maps->length(); - Register map_reg = scratch1(); - __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); - for (int i = 0; i < receiver_count; ++i) { - Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i)); - __ CmpWeakValue(map_reg, cell, scratch2()); - if (transitioned_maps->at(i).is_null()) { - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq); - } else { - Label next_map; - __ b(ne, &next_map); - Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i)); - Register transition_map = scratch1(); - DCHECK(!FLAG_vector_stores && - transition_map.is(StoreTransitionDescriptor::MapRegister())); - __ LoadWeakValue(transition_map, cell, &miss); - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al); - __ bind(&next_map); - } - } - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); + __ TailCallRuntime(Runtime::kSetProperty); } diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc index 13b0887a82..892ce85dfb 100644 --- a/deps/v8/src/ic/arm64/access-compiler-arm64.cc +++ b/deps/v8/src/ic/arm64/access-compiler-arm64.cc @@ -38,7 +38,6 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, value, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(FLAG_vector_stores || x3.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, x3, x4, x5}; return registers; } diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc index 576d333428..7cfef6a1b4 100644 --- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc +++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc @@ -59,7 +59,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( // Check that receiver is a JSObject. __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE); + __ Cmp(scratch0, FIRST_JS_RECEIVER_TYPE); __ B(lt, miss_label); // Load properties array. @@ -78,9 +78,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register result, Label* miss) { - __ Ldr(result, GlobalObjectMemOperand()); - __ Ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset)); - __ Ldr(result, ContextMemOperand(result, index)); + __ LoadNativeContextSlot(index, result); // Load its initial map. The global functions all have initial maps. __ Ldr(result, FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset)); @@ -132,9 +130,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, static void CompileCallLoadPropertyWithInterceptor( MacroAssembler* masm, Register receiver, Register holder, Register name, Handle<JSObject> holder_obj, Runtime::FunctionId id) { + DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength == + Runtime::FunctionForId(id)->nargs); PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - - __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength); + __ CallRuntime(id); } @@ -207,6 +206,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( __ Ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset)); } + if (api_call_info->fast_handler()->IsCode()) { + // Just tail call into the fast handler if present. + __ Jump(handle(Code::cast(api_call_info->fast_handler())), + RelocInfo::CODE_TARGET); + return; + } + // Put api_function_address in place. Address function_address = v8::ToCData<Address>(api_call_info->callback()); ApiFunction fun(function_address); @@ -248,7 +254,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( ParameterCount actual(1); ParameterCount expected(expected_arguments); __ LoadAccessor(x1, holder, accessor_index, ACCESSOR_SETTER); - __ InvokeFunction(x1, expected, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(x1, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -285,7 +292,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( ParameterCount actual(0); ParameterCount expected(expected_arguments); __ LoadAccessor(x1, holder, accessor_index, ACCESSOR_GETTER); - __ InvokeFunction(x1, expected, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(x1, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -300,15 +308,10 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( static void StoreIC_PushArgs(MacroAssembler* masm) { - if (FLAG_vector_stores) { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), - VectorStoreICDescriptor::SlotRegister(), - VectorStoreICDescriptor::VectorRegister()); - } else { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); - } + __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), + StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); } @@ -317,7 +320,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow); } @@ -327,8 +330,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, - 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow); } @@ -378,7 +380,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( __ Push(receiver(), this->name(), value()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1); + __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor); // Return the generated code. return GetCode(kind(), Code::FAST, name); @@ -767,8 +769,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) { PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), holder()); - __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor, - NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); + __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor); } @@ -796,7 +797,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( __ Push(receiver(), holder_reg, scratch1(), scratch2(), value()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1); + __ TailCallRuntime(Runtime::kStoreCallbackProperty); // Return the generated code. return GetCode(kind(), Code::FAST, name); diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc index 90b89018fe..eb933c78ec 100644 --- a/deps/v8/src/ic/arm64/ic-arm64.cc +++ b/deps/v8/src/ic/arm64/ic-arm64.cc @@ -293,8 +293,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { LoadWithVectorDescriptor::NameRegister(), LoadWithVectorDescriptor::SlotRegister(), LoadWithVectorDescriptor::VectorRegister()); - int arg_count = 4; - __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kLoadIC_Miss); } @@ -305,8 +304,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong - : Runtime::kGetProperty, - 2, 1); + : Runtime::kGetProperty); } @@ -324,8 +322,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { LoadWithVectorDescriptor::VectorRegister()); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss); } @@ -336,8 +333,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong - : Runtime::kKeyedGetProperty, - 2, 1); + : Runtime::kKeyedGetProperty); } @@ -470,24 +466,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm, static void StoreIC_PushArgs(MacroAssembler* masm) { - if (FLAG_vector_stores) { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), - VectorStoreICDescriptor::SlotRegister(), - VectorStoreICDescriptor::VectorRegister()); - } else { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); - } + __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), + StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); } void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { ASM_LOCATION("KeyedStoreIC::GenerateMiss"); StoreIC_PushArgs(masm); - - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss); } @@ -690,19 +679,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset)); __ JumpIfNotUniqueNameInstanceType(x10, &slow); - if (FLAG_vector_stores) { - // The handlers in the stub cache expect a vector and slot. Since we won't - // change the IC from any downstream misses, a dummy vector can be used. - Register vector = VectorStoreICDescriptor::VectorRegister(); - Register slot = VectorStoreICDescriptor::SlotRegister(); - DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8)); - Handle<TypeFeedbackVector> dummy_vector = - TypeFeedbackVector::DummyVector(masm->isolate()); - int slot_index = dummy_vector->GetIndex( - FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); - __ LoadRoot(vector, Heap::kDummyVectorRootIndex); - __ Mov(slot, Operand(Smi::FromInt(slot_index))); - } + // The handlers in the stub cache expect a vector and slot. Since we won't + // change the IC from any downstream misses, a dummy vector can be used. + Register vector = VectorStoreICDescriptor::VectorRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8)); + Handle<TypeFeedbackVector> dummy_vector = + TypeFeedbackVector::DummyVector(masm->isolate()); + int slot_index = dummy_vector->GetIndex( + FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); + __ LoadRoot(vector, Heap::kDummyVectorRootIndex); + __ Mov(slot, Operand(Smi::FromInt(slot_index))); Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); @@ -778,8 +765,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Tail call to the entry. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kStoreIC_Miss); } @@ -839,7 +825,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) { // Activate a SMI fast-path by patching the instructions generated by // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by // JumpPatchSite::EmitPatchInfo(). -void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { +void PatchInlinedSmiCode(Isolate* isolate, Address address, + InlinedSmiCheck check) { // The patch information is encoded in the instruction stream using // instructions which have no side effects, so we can safely execute them. // The patch information is encoded directly after the call to the helper @@ -864,7 +851,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // to // tb(!n)z test_reg, #0, <target> Instruction* to_patch = info.SmiCheck(); - PatchingAssembler patcher(to_patch, 1); + PatchingAssembler patcher(isolate, to_patch, 1); DCHECK(to_patch->IsTestBranch()); DCHECK(to_patch->ImmTestBranchBit5() == 0); DCHECK(to_patch->ImmTestBranchBit40() == 0); diff --git a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc index b4a4163fed..c99c637ab1 100644 --- a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc +++ b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc @@ -23,115 +23,9 @@ void PropertyICCompiler::GenerateRuntimeSetProperty( __ Push(x10); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 4, 1); + __ TailCallRuntime(Runtime::kSetProperty); } - -#undef __ -#define __ ACCESS_MASM(masm()) - - -Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { - Label miss; - - if (check == PROPERTY && - (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - // In case we are compiling an IC for dictionary loads or stores, just - // check whether the name is unique. - if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - // Keyed loads with dictionaries shouldn't be here, they go generic. - // The DCHECK is to protect assumptions when --vector-ics is on. - DCHECK(kind() != Code::KEYED_LOAD_IC); - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset)); - __ Ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); - } else { - __ CompareAndBranch(this->name(), Operand(name), ne, &miss); - } - } - - Label number_case; - Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss; - __ JumpIfSmi(receiver(), smi_target); - - // Polymorphic keyed stores may use the map register - Register map_reg = scratch1(); - DCHECK(kind() != Code::KEYED_STORE_IC || - map_reg.is(StoreTransitionDescriptor::MapRegister())); - __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); - int receiver_count = maps->length(); - int number_of_handled_maps = 0; - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map = maps->at(current); - if (!map->is_deprecated()) { - number_of_handled_maps++; - Handle<WeakCell> cell = Map::WeakCellForMap(map); - __ CmpWeakValue(map_reg, cell, scratch2()); - Label try_next; - __ B(ne, &try_next); - if (map->instance_type() == HEAP_NUMBER_TYPE) { - DCHECK(!number_case.is_unused()); - __ Bind(&number_case); - } - __ Jump(handlers->at(current), RelocInfo::CODE_TARGET); - __ Bind(&try_next); - } - } - DCHECK(number_of_handled_maps != 0); - - __ Bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - InlineCacheState state = - (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC; - return GetCode(kind(), type, name, state); -} - - -Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( - MapHandleList* receiver_maps, CodeHandleList* handler_stubs, - MapHandleList* transitioned_maps) { - Label miss; - - ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic"); - - __ JumpIfSmi(receiver(), &miss); - - int receiver_count = receiver_maps->length(); - Register map_reg = scratch1(); - __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); - for (int i = 0; i < receiver_count; i++) { - Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i)); - __ CmpWeakValue(map_reg, cell, scratch2()); - Label skip; - __ B(&skip, ne); - if (!transitioned_maps->at(i).is_null()) { - // This argument is used by the handler stub. For example, see - // ElementsTransitionGenerator::GenerateMapChangeElementsTransition. - Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i)); - Register transition_map = scratch1(); - DCHECK(!FLAG_vector_stores && - transition_map.is(StoreTransitionDescriptor::MapRegister())); - __ LoadWeakValue(transition_map, cell, &miss); - } - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); - __ Bind(&skip); - } - - __ Bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); -} - - #undef __ } // namespace internal } // namespace v8 diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc index 2b6f88ac95..b353628053 100644 --- a/deps/v8/src/ic/handler-compiler.cc +++ b/deps/v8/src/ic/handler-compiler.cc @@ -427,7 +427,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition( Handle<Map> transition, Handle<Name> name) { Label miss; - if (FLAG_vector_stores) PushVectorAndSlot(); + PushVectorAndSlot(); // Check that we are allowed to write this. bool is_nonexistent = holder()->map() == transition->GetBackPointer(); @@ -471,7 +471,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition( if (virtual_args) { // This will move the map from tmp into map_reg. RearrangeVectorAndSlot(tmp, map_reg); - } else if (FLAG_vector_stores) { + } else { PopVectorAndSlot(); } GenerateRestoreName(name); @@ -493,7 +493,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition( GenerateRestoreMap(transition, tmp, scratch2(), &miss); if (virtual_args) { RearrangeVectorAndSlot(tmp, map_reg); - } else if (FLAG_vector_stores) { + } else { PopVectorAndSlot(); } GenerateRestoreName(name); @@ -504,7 +504,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition( } GenerateRestoreName(&miss, name); - if (FLAG_vector_stores) PopVectorAndSlot(); + PopVectorAndSlot(); TailCallBuiltin(masm(), MissBuiltin(kind())); return GetCode(kind(), Code::FAST, name); diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc index acb3526d9d..1825202366 100644 --- a/deps/v8/src/ic/ia32/access-compiler-ia32.cc +++ b/deps/v8/src/ic/ia32/access-compiler-ia32.cc @@ -30,8 +30,6 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(FLAG_vector_stores || - ebx.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, ebx, edi, no_reg}; return registers; } diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc index d5011fb7e9..0b380b3ee2 100644 --- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc +++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc @@ -36,7 +36,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( ParameterCount expected(expected_arguments); __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER); __ InvokeFunction(edi, expected, actual, CALL_FUNCTION, - NullCallWrapper()); + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -92,7 +92,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( __ j(not_zero, miss_label); // Check that receiver is a JSObject. - __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE); + __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE); __ j(below, miss_label); // Load properties array. @@ -114,10 +114,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register result, Label* miss) { - const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); - __ mov(result, Operand(esi, offset)); - __ mov(result, FieldOperand(result, JSGlobalObject::kNativeContextOffset)); - __ mov(result, Operand(result, Context::SlotOffset(index))); + __ LoadGlobalFunction(index, result); // Load its initial map. The global functions all have initial maps. __ mov(result, FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset)); @@ -206,6 +203,12 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( __ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset)); } + if (api_call_info->fast_handler()->IsCode()) { + // Just tail call into the code. + __ Jump(handle(Code::cast(api_call_info->fast_handler())), + RelocInfo::CODE_TARGET); + return; + } // Put api_function_address in place. Address function_address = v8::ToCData<Address>(api_call_info->callback()); __ mov(api_function_address, Immediate(function_address)); @@ -261,7 +264,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( ParameterCount expected(expected_arguments); __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER); __ InvokeFunction(edi, expected, actual, CALL_FUNCTION, - NullCallWrapper()); + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -294,8 +297,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, static void CompileCallLoadPropertyWithInterceptor( MacroAssembler* masm, Register receiver, Register holder, Register name, Handle<JSObject> holder_obj, Runtime::FunctionId id) { + DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength == + Runtime::FunctionForId(id)->nargs); PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength); + __ CallRuntime(id); } @@ -303,25 +308,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + Register vector = VectorStoreICDescriptor::VectorRegister(); - if (FLAG_vector_stores) { - Register slot = VectorStoreICDescriptor::SlotRegister(); - Register vector = VectorStoreICDescriptor::VectorRegister(); - - __ xchg(receiver, Operand(esp, 0)); - __ push(name); - __ push(value); - __ push(slot); - __ push(vector); - __ push(receiver); // which contains the return address. - } else { - DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value)); - __ pop(ebx); - __ push(receiver); - __ push(name); - __ push(value); - __ push(ebx); - } + __ xchg(receiver, Operand(esp, 0)); + __ push(name); + __ push(value); + __ push(slot); + __ push(vector); + __ push(receiver); // which contains the return address. } @@ -330,7 +325,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow); } @@ -339,8 +334,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, - 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow); } @@ -732,8 +726,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) { holder()); __ push(scratch2()); // restore old return address - __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor, - NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); + __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor); } @@ -758,7 +751,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( __ push(scratch1()); // restore return address // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1); + __ TailCallRuntime(Runtime::kStoreCallbackProperty); // Return the generated code. return GetCode(kind(), Code::FAST, name); @@ -774,7 +767,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( __ push(scratch1()); // restore return address // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1); + __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor); // Return the generated code. return GetCode(kind(), Code::FAST, name); diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc index d0a2e0bd54..d93b67bffc 100644 --- a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc +++ b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc @@ -27,104 +27,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty( __ push(ebx); // return address // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 4, 1); -} - - -#undef __ -#define __ ACCESS_MASM(masm()) - -Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { - Label miss; - - if (check == PROPERTY && - (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) { - // In case we are compiling an IC for dictionary loads or stores, just - // check whether the name is unique. - if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - // Keyed loads with dictionaries shouldn't be here, they go generic. - // The DCHECK is to protect assumptions when --vector-ics is on. - DCHECK(kind() != Code::KEYED_LOAD_IC); - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset)); - __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); - } else { - __ cmp(this->name(), Immediate(name)); - __ j(not_equal, &miss); - } - } - - Label number_case; - Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss; - __ JumpIfSmi(receiver(), smi_target); - - // Polymorphic keyed stores may use the map register - Register map_reg = scratch1(); - DCHECK(kind() != Code::KEYED_STORE_IC || - map_reg.is(StoreTransitionDescriptor::MapRegister())); - __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); - int receiver_count = maps->length(); - int number_of_handled_maps = 0; - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map = maps->at(current); - if (!map->is_deprecated()) { - number_of_handled_maps++; - Handle<WeakCell> cell = Map::WeakCellForMap(map); - __ CmpWeakValue(map_reg, cell, scratch2()); - if (map->instance_type() == HEAP_NUMBER_TYPE) { - DCHECK(!number_case.is_unused()); - __ bind(&number_case); - } - __ j(equal, handlers->at(current)); - } - } - DCHECK(number_of_handled_maps != 0); - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - InlineCacheState state = - number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetCode(kind(), type, name, state); -} - - -Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( - MapHandleList* receiver_maps, CodeHandleList* handler_stubs, - MapHandleList* transitioned_maps) { - Label miss; - __ JumpIfSmi(receiver(), &miss); - Register map_reg = scratch1(); - __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); - for (int i = 0; i < receiver_maps->length(); ++i) { - Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i)); - __ CmpWeakValue(map_reg, cell, scratch2()); - if (transitioned_maps->at(i).is_null()) { - __ j(equal, handler_stubs->at(i)); - } else { - Label next_map; - __ j(not_equal, &next_map, Label::kNear); - Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i)); - Register transition_map = scratch1(); - DCHECK(!FLAG_vector_stores && - transition_map.is(StoreTransitionDescriptor::MapRegister())); - __ LoadWeakValue(transition_map, cell, &miss); - __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET); - __ bind(&next_map); - } - } - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); + __ TailCallRuntime(Runtime::kSetProperty); } diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc index 1754d5a6fc..88947e47e7 100644 --- a/deps/v8/src/ic/ia32/ic-ia32.cc +++ b/deps/v8/src/ic/ia32/ic-ia32.cc @@ -561,26 +561,22 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, __ JumpIfNotUniqueNameInstanceType(ebx, &slow); - if (FLAG_vector_stores) { - // The handlers in the stub cache expect a vector and slot. Since we won't - // change the IC from any downstream misses, a dummy vector can be used. - Handle<TypeFeedbackVector> dummy_vector = - TypeFeedbackVector::DummyVector(masm->isolate()); - int slot = dummy_vector->GetIndex( - FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); - __ push(Immediate(Smi::FromInt(slot))); - __ push(Immediate(dummy_vector)); - } + // The handlers in the stub cache expect a vector and slot. Since we won't + // change the IC from any downstream misses, a dummy vector can be used. + Handle<TypeFeedbackVector> dummy_vector = + TypeFeedbackVector::DummyVector(masm->isolate()); + int slot = dummy_vector->GetIndex( + FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); + __ push(Immediate(Smi::FromInt(slot))); + __ push(Immediate(dummy_vector)); Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, receiver, key, edi, no_reg); - if (FLAG_vector_stores) { - __ pop(VectorStoreICDescriptor::VectorRegister()); - __ pop(VectorStoreICDescriptor::SlotRegister()); - } + __ pop(VectorStoreICDescriptor::VectorRegister()); + __ pop(VectorStoreICDescriptor::SlotRegister()); // Cache miss. __ jmp(&miss); @@ -676,8 +672,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kLoadIC_Miss); } @@ -695,8 +690,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong - : Runtime::kGetProperty, - 2, 1); + : Runtime::kGetProperty); } @@ -707,8 +701,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss); } @@ -726,27 +719,15 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong - : Runtime::kKeyedGetProperty, - 2, 1); + : Runtime::kKeyedGetProperty); } void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { - if (FLAG_vector_stores) { - // This shouldn't be called. - __ int3(); - return; - } - - // Return address is on the stack. - Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( - Code::ComputeHandlerFlags(Code::STORE_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), ebx, no_reg); - - // Cache miss: Jump to runtime. - GenerateMiss(masm); + // This shouldn't be called. + // TODO(mvstanton): remove this method. + __ int3(); + return; } @@ -754,25 +735,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + Register vector = VectorStoreICDescriptor::VectorRegister(); - if (FLAG_vector_stores) { - Register slot = VectorStoreICDescriptor::SlotRegister(); - Register vector = VectorStoreICDescriptor::VectorRegister(); - - __ xchg(receiver, Operand(esp, 0)); - __ push(name); - __ push(value); - __ push(slot); - __ push(vector); - __ push(receiver); // Contains the return address. - } else { - DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value)); - __ pop(ebx); - __ push(receiver); - __ push(name); - __ push(value); - __ push(ebx); - } + __ xchg(receiver, Operand(esp, 0)); + __ push(name); + __ push(value); + __ push(slot); + __ push(vector); + __ push(receiver); // Contains the return address. } @@ -781,8 +752,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Perform tail call to the entry. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kStoreIC_Miss); } @@ -798,25 +768,21 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { // objects. Push and restore receiver but rely on // GenerateDictionaryStore preserving the value and name. __ push(receiver); - if (FLAG_vector_stores) { - __ push(vector); - __ push(slot); - } + __ push(vector); + __ push(slot); Register dictionary = ebx; __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset)); GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value, receiver, edi); - __ Drop(FLAG_vector_stores ? 3 : 1); + __ Drop(3); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->store_normal_hit(), 1); __ ret(0); __ bind(&restore_miss); - if (FLAG_vector_stores) { - __ pop(slot); - __ pop(vector); - } + __ pop(slot); + __ pop(vector); __ pop(receiver); __ IncrementCounter(counters->store_normal_miss(), 1); GenerateMiss(masm); @@ -828,8 +794,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss); } @@ -867,7 +832,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) { } -void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { +void PatchInlinedSmiCode(Isolate* isolate, Address address, + InlinedSmiCheck check) { // The address of the instruction following the call. Address test_instruction_address = address + Assembler::kCallTargetAddressOffset; diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc index 20e4fedc23..ae4b2a5d58 100644 --- a/deps/v8/src/ic/ic-compiler.cc +++ b/deps/v8/src/ic/ic-compiler.cc @@ -33,60 +33,6 @@ bool PropertyICCompiler::IncludesNumberMap(MapHandleList* maps) { } -Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<Map> map, - Handle<Code> handler, - Handle<Name> name, - IcCheckType check) { - MapHandleList maps(1); - CodeHandleList handlers(1); - maps.Add(map); - handlers.Add(handler); - Code::StubType stub_type = handler->type(); - return CompilePolymorphic(&maps, &handlers, name, stub_type, check); -} - - -Handle<Code> PropertyICCompiler::ComputeMonomorphic( - Code::Kind kind, Handle<Name> name, Handle<Map> map, Handle<Code> handler, - ExtraICState extra_ic_state) { - Isolate* isolate = name->GetIsolate(); - if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) || - handler.is_identical_to(isolate->builtins()->LoadIC_Normal_Strong()) || - handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) { - name = isolate->factory()->normal_ic_symbol(); - } - - CacheHolderFlag flag; - Handle<Map> stub_holder = IC::GetICCacheHolder(map, isolate, &flag); - if (kind == Code::KEYED_STORE_IC) { - // Always set the "property" bit. - extra_ic_state = - KeyedStoreIC::IcCheckTypeField::update(extra_ic_state, PROPERTY); - DCHECK(STANDARD_STORE == - KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state)); - } else if (kind == Code::KEYED_LOAD_IC) { - extra_ic_state = KeyedLoadIC::IcCheckTypeField::update(extra_ic_state, - PROPERTY); - } - - Handle<Code> ic; - // There are multiple string maps that all use the same prototype. That - // prototype cannot hold multiple handlers, one for each of the string maps, - // for a single name. Hence, turn off caching of the IC. - bool can_be_cached = map->instance_type() >= FIRST_NONSTRING_TYPE; - if (can_be_cached) { - ic = Find(name, stub_holder, kind, extra_ic_state, flag); - if (!ic.is_null()) return ic; - } - - PropertyICCompiler ic_compiler(isolate, kind, extra_ic_state, flag); - ic = ic_compiler.CompileMonomorphic(map, handler, name, PROPERTY); - - if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic); - return ic; -} - - Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler( Handle<Map> receiver_map, ExtraICState extra_ic_state) { Isolate* isolate = receiver_map->GetIsolate(); @@ -138,35 +84,6 @@ Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler( } -Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic( - Handle<Map> receiver_map, LanguageMode language_mode, - KeyedAccessStoreMode store_mode) { - Isolate* isolate = receiver_map->GetIsolate(); - ExtraICState extra_state = - KeyedStoreIC::ComputeExtraICState(language_mode, store_mode); - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, extra_state); - - DCHECK(store_mode == STANDARD_STORE || - store_mode == STORE_AND_GROW_NO_TRANSITION || - store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS || - store_mode == STORE_NO_TRANSITION_HANDLE_COW); - - Handle<String> name = isolate->factory()->KeyedStoreMonomorphic_string(); - Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate); - if (probe->IsCode()) return Handle<Code>::cast(probe); - - PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state); - Handle<Code> code = - compiler.CompileKeyedStoreMonomorphic(receiver_map, store_mode); - - Map::UpdateCodeCache(receiver_map, name, code); - DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state()) == - store_mode); - return code; -} - - Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind, ExtraICState state) { Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state); @@ -239,17 +156,6 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map, } -Handle<Code> PropertyICCompiler::ComputePolymorphic( - Code::Kind kind, MapHandleList* maps, CodeHandleList* handlers, - int valid_maps, Handle<Name> name, ExtraICState extra_ic_state) { - Handle<Code> handler = handlers->at(0); - Code::StubType type = valid_maps == 1 ? handler->type() : Code::NORMAL; - DCHECK(kind == Code::LOAD_IC || kind == Code::STORE_IC); - PropertyICCompiler ic_compiler(name->GetIsolate(), kind, extra_ic_state); - return ic_compiler.CompilePolymorphic(maps, handlers, name, type, PROPERTY); -} - - void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers( MapHandleList* receiver_maps, MapHandleList* transitioned_maps, CodeHandleList* handlers, KeyedAccessStoreMode store_mode, @@ -267,31 +173,6 @@ void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers( } -Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic( - MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode, - LanguageMode language_mode) { - Isolate* isolate = receiver_maps->at(0)->GetIsolate(); - DCHECK(store_mode == STANDARD_STORE || - store_mode == STORE_AND_GROW_NO_TRANSITION || - store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS || - store_mode == STORE_NO_TRANSITION_HANDLE_COW); - Handle<PolymorphicCodeCache> cache = - isolate->factory()->polymorphic_code_cache(); - ExtraICState extra_state = - KeyedStoreIC::ComputeExtraICState(language_mode, store_mode); - Code::Flags flags = - Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state); - Handle<Object> probe = cache->Lookup(receiver_maps, flags); - if (probe->IsCode()) return Handle<Code>::cast(probe); - - PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state); - Handle<Code> code = - compiler.CompileKeyedStorePolymorphic(receiver_maps, store_mode); - PolymorphicCodeCache::Update(cache, receiver_maps, flags, code); - return code; -} - - Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) { LoadIC::GenerateInitialize(masm()); Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize"); @@ -394,22 +275,6 @@ void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers( } -Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( - MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) { - // Collect MONOMORPHIC stubs for all |receiver_maps|. - CodeHandleList handlers(receiver_maps->length()); - MapHandleList transitioned_maps(receiver_maps->length()); - CompileKeyedStorePolymorphicHandlers(receiver_maps, &transitioned_maps, - &handlers, store_mode); - - Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers, - &transitioned_maps); - isolate()->counters()->keyed_store_polymorphic_stubs()->Increment(); - PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, 0)); - return code; -} - - #define __ ACCESS_MASM(masm()) diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h index ff32404afa..08444df654 100644 --- a/deps/v8/src/ic/ic-compiler.h +++ b/deps/v8/src/ic/ic-compiler.h @@ -21,15 +21,6 @@ class PropertyICCompiler : public PropertyAccessCompiler { static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state, ExtraICState extra_state); - static Handle<Code> ComputeMonomorphic(Code::Kind kind, Handle<Name> name, - Handle<Map> map, Handle<Code> handler, - ExtraICState extra_ic_state); - static Handle<Code> ComputePolymorphic(Code::Kind kind, MapHandleList* maps, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name, - ExtraICState extra_ic_state); - // Keyed static Handle<Code> ComputeKeyedLoadMonomorphicHandler( Handle<Map> receiver_map, ExtraICState extra_ic_state); @@ -37,16 +28,10 @@ class PropertyICCompiler : public PropertyAccessCompiler { static Handle<Code> ComputeKeyedStoreMonomorphicHandler( Handle<Map> receiver_map, LanguageMode language_mode, KeyedAccessStoreMode store_mode); - static Handle<Code> ComputeKeyedStoreMonomorphic( - Handle<Map> receiver_map, LanguageMode language_mode, - KeyedAccessStoreMode store_mode); static void ComputeKeyedStorePolymorphicHandlers( MapHandleList* receiver_maps, MapHandleList* transitioned_maps, CodeHandleList* handlers, KeyedAccessStoreMode store_mode, LanguageMode language_mode); - static Handle<Code> ComputeKeyedStorePolymorphic( - MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode, - LanguageMode language_mode); // Compare nil static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map, @@ -77,25 +62,14 @@ class PropertyICCompiler : public PropertyAccessCompiler { Handle<Code> CompileStoreGeneric(Code::Flags flags); Handle<Code> CompileStoreMegamorphic(Code::Flags flags); - Handle<Code> CompileMonomorphic(Handle<Map> map, Handle<Code> handler, - Handle<Name> name, IcCheckType check); - Handle<Code> CompilePolymorphic(MapHandleList* maps, CodeHandleList* handlers, - Handle<Name> name, Code::StubType type, - IcCheckType check); - Handle<Code> CompileKeyedStoreMonomorphicHandler( Handle<Map> receiver_map, KeyedAccessStoreMode store_mode); Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map, KeyedAccessStoreMode store_mode); - Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps, - KeyedAccessStoreMode store_mode); void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps, MapHandleList* transitioned_maps, CodeHandleList* handlers, KeyedAccessStoreMode store_mode); - Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps, - CodeHandleList* handler_stubs, - MapHandleList* transitioned_maps); bool IncludesNumberMap(MapHandleList* maps); diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h index 65a5a2ddec..6dab006ad5 100644 --- a/deps/v8/src/ic/ic-inl.h +++ b/deps/v8/src/ic/ic-inl.h @@ -60,9 +60,8 @@ void IC::SetTargetAtAddress(Address address, Code* target, DCHECK(!target->is_inline_cache_stub() || (target->kind() != Code::LOAD_IC && target->kind() != Code::KEYED_LOAD_IC && - target->kind() != Code::CALL_IC && - (!FLAG_vector_stores || (target->kind() != Code::STORE_IC && - target->kind() != Code::KEYED_STORE_IC)))); + target->kind() != Code::CALL_IC && target->kind() != Code::STORE_IC && + target->kind() != Code::KEYED_STORE_IC)); Heap* heap = target->GetHeap(); Code* old_target = GetTargetAtAddress(address, constant_pool); @@ -75,7 +74,7 @@ void IC::SetTargetAtAddress(Address address, Code* target, StoreICState::GetLanguageMode(target->extra_ic_state())); } #endif - Assembler::set_target_address_at(address, constant_pool, + Assembler::set_target_address_at(heap->isolate(), address, constant_pool, target->instruction_start()); if (heap->gc_state() == Heap::MARK_COMPACT) { heap->mark_compact_collector()->RecordCodeTargetPatch(address, target); diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc index 298eaa2707..4bdaf3ff03 100644 --- a/deps/v8/src/ic/ic-state.cc +++ b/deps/v8/src/ic/ic-state.cc @@ -191,17 +191,17 @@ void BinaryOpICState::GenerateAheadOfTime( } -Type* BinaryOpICState::GetResultType(Zone* zone) const { +Type* BinaryOpICState::GetResultType() const { Kind result_kind = result_kind_; if (HasSideEffects()) { result_kind = NONE; } else if (result_kind == GENERIC && op_ == Token::ADD) { - return Type::Union(Type::Number(zone), Type::String(zone), zone); + return Type::NumberOrString(); } else if (result_kind == NUMBER && op_ == Token::SHR) { - return Type::Unsigned32(zone); + return Type::Unsigned32(); } DCHECK_NE(GENERIC, result_kind); - return KindToType(result_kind, zone); + return KindToType(result_kind); } @@ -320,20 +320,20 @@ const char* BinaryOpICState::KindToString(Kind kind) { // static -Type* BinaryOpICState::KindToType(Kind kind, Zone* zone) { +Type* BinaryOpICState::KindToType(Kind kind) { switch (kind) { case NONE: - return Type::None(zone); + return Type::None(); case SMI: - return Type::SignedSmall(zone); + return Type::SignedSmall(); case INT32: - return Type::Signed32(zone); + return Type::Signed32(); case NUMBER: - return Type::Number(zone); + return Type::Number(); case STRING: - return Type::String(zone); + return Type::String(); case GENERIC: - return Type::Any(zone); + return Type::Any(); } UNREACHABLE(); return NULL; @@ -356,10 +356,10 @@ const char* CompareICState::GetStateName(State state) { return "STRING"; case UNIQUE_NAME: return "UNIQUE_NAME"; - case OBJECT: - return "OBJECT"; - case KNOWN_OBJECT: - return "KNOWN_OBJECT"; + case RECEIVER: + return "RECEIVER"; + case KNOWN_RECEIVER: + return "KNOWN_RECEIVER"; case GENERIC: return "GENERIC"; } @@ -384,9 +384,9 @@ Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) { return Type::InternalizedString(zone); case UNIQUE_NAME: return Type::UniqueName(zone); - case OBJECT: + case RECEIVER: return Type::Receiver(zone); - case KNOWN_OBJECT: + case KNOWN_RECEIVER: return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone); case GENERIC: return Type::Any(zone); @@ -406,7 +406,7 @@ CompareICState::State CompareICState::NewInputState(State old_state, if (value->IsInternalizedString()) return INTERNALIZED_STRING; if (value->IsString()) return STRING; if (value->IsSymbol()) return UNIQUE_NAME; - if (value->IsJSObject()) return OBJECT; + if (value->IsJSReceiver()) return RECEIVER; break; case BOOLEAN: if (value->IsBoolean()) return BOOLEAN; @@ -429,12 +429,12 @@ CompareICState::State CompareICState::NewInputState(State old_state, case UNIQUE_NAME: if (value->IsUniqueName()) return UNIQUE_NAME; break; - case OBJECT: - if (value->IsJSObject()) return OBJECT; + case RECEIVER: + if (value->IsJSReceiver()) return RECEIVER; break; case GENERIC: break; - case KNOWN_OBJECT: + case KNOWN_RECEIVER: UNREACHABLE(); break; } @@ -465,12 +465,12 @@ CompareICState::State CompareICState::TargetState( return Token::IsEqualityOp(op) ? INTERNALIZED_STRING : STRING; } if (x->IsString() && y->IsString()) return STRING; - if (x->IsJSObject() && y->IsJSObject()) { - if (Handle<JSObject>::cast(x)->map() == - Handle<JSObject>::cast(y)->map()) { - return KNOWN_OBJECT; + if (x->IsJSReceiver() && y->IsJSReceiver()) { + if (Handle<JSReceiver>::cast(x)->map() == + Handle<JSReceiver>::cast(y)->map()) { + return KNOWN_RECEIVER; } else { - return Token::IsEqualityOp(op) ? OBJECT : GENERIC; + return Token::IsEqualityOp(op) ? RECEIVER : GENERIC; } } if (!Token::IsEqualityOp(op)) return GENERIC; @@ -490,15 +490,15 @@ CompareICState::State CompareICState::TargetState( if (old_left == SMI && x->IsHeapNumber()) return NUMBER; if (old_right == SMI && y->IsHeapNumber()) return NUMBER; return GENERIC; - case KNOWN_OBJECT: - if (x->IsJSObject() && y->IsJSObject()) { - return Token::IsEqualityOp(op) ? OBJECT : GENERIC; + case KNOWN_RECEIVER: + if (x->IsJSReceiver() && y->IsJSReceiver()) { + return Token::IsEqualityOp(op) ? RECEIVER : GENERIC; } return GENERIC; case BOOLEAN: case STRING: case UNIQUE_NAME: - case OBJECT: + case RECEIVER: case GENERIC: return GENERIC; } diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h index ebc686b738..1982fbe08b 100644 --- a/deps/v8/src/ic/ic-state.h +++ b/deps/v8/src/ic/ic-state.h @@ -120,9 +120,9 @@ class BinaryOpICState final BASE_EMBEDDED { Token::Value op() const { return op_; } Maybe<int> fixed_right_arg() const { return fixed_right_arg_; } - Type* GetLeftType(Zone* zone) const { return KindToType(left_kind_, zone); } - Type* GetRightType(Zone* zone) const { return KindToType(right_kind_, zone); } - Type* GetResultType(Zone* zone) const; + Type* GetLeftType() const { return KindToType(left_kind_); } + Type* GetRightType() const { return KindToType(right_kind_); } + Type* GetResultType() const; void Update(Handle<Object> left, Handle<Object> right, Handle<Object> result); @@ -136,7 +136,7 @@ class BinaryOpICState final BASE_EMBEDDED { Kind UpdateKind(Handle<Object> object, Kind kind) const; static const char* KindToString(Kind kind); - static Type* KindToType(Kind kind, Zone* zone); + static Type* KindToType(Kind kind); static bool KindMaybeSmi(Kind kind) { return (kind >= SMI && kind <= NUMBER) || kind == GENERIC; } @@ -174,7 +174,7 @@ class CompareICState { // SMI < NUMBER // INTERNALIZED_STRING < STRING // INTERNALIZED_STRING < UNIQUE_NAME - // KNOWN_OBJECT < OBJECT + // KNOWN_RECEIVER < RECEIVER enum State { UNINITIALIZED, BOOLEAN, @@ -182,9 +182,9 @@ class CompareICState { NUMBER, STRING, INTERNALIZED_STRING, - UNIQUE_NAME, // Symbol or InternalizedString - OBJECT, // JSObject - KNOWN_OBJECT, // JSObject with specific map (faster check) + UNIQUE_NAME, // Symbol or InternalizedString + RECEIVER, // JSReceiver + KNOWN_RECEIVER, // JSReceiver with specific map (faster check) GENERIC }; diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc index 3dc3029300..73ac666a41 100644 --- a/deps/v8/src/ic/ic.cc +++ b/deps/v8/src/ic/ic.cc @@ -117,13 +117,10 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state, stdout, true); } - ExtraICState extra_state = new_target->extra_ic_state(); const char* modifier = ""; if (new_target->kind() == Code::KEYED_STORE_IC) { KeyedAccessStoreMode mode = - FLAG_vector_stores - ? casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode() - : KeyedStoreIC::GetKeyedAccessStoreMode(extra_state); + casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode(); modifier = GetTransitionMarkModifier(mode); } PrintF(" (%c->%c%s) ", TransitionMarkFromState(old_state), @@ -418,19 +415,9 @@ void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address, // static -void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host, - TypeFeedbackVector* vector, State old_state, - State new_state) { +void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host) { if (host->kind() != Code::FUNCTION) return; - if (FLAG_type_info_threshold > 0) { - int polymorphic_delta = 0; // "Polymorphic" here includes monomorphic. - int generic_delta = 0; // "Generic" here includes megamorphic. - ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta, - &generic_delta); - vector->change_ic_with_type_info_count(polymorphic_delta); - vector->change_ic_generic_count(generic_delta); - } TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info()); info->change_own_type_change_checksum(); host->set_profiler_ticks(0); @@ -470,13 +457,9 @@ void IC::Clear(Isolate* isolate, Address address, Address constant_pool) { switch (target->kind()) { case Code::LOAD_IC: case Code::KEYED_LOAD_IC: - return; case Code::STORE_IC: - if (FLAG_vector_stores) return; - return StoreIC::Clear(isolate, address, target, constant_pool); case Code::KEYED_STORE_IC: - if (FLAG_vector_stores) return; - return KeyedStoreIC::Clear(isolate, address, target, constant_pool); + return; case Code::COMPARE_IC: return CompareIC::Clear(isolate, address, target, constant_pool); case Code::COMPARE_NIL_IC: @@ -498,9 +481,8 @@ void KeyedLoadIC::Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus) { // Make sure to also clear the map used in inline fast cases. If we // do not clear these maps, cached code can keep objects alive // through the embedded maps. - State state = nexus->StateFromFeedback(); nexus->ConfigurePremonomorphic(); - OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC); + OnTypeFeedbackChanged(isolate, host); } @@ -512,16 +494,15 @@ void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) { if (state != UNINITIALIZED && !feedback->IsAllocationSite()) { nexus->ConfigureUninitialized(); // The change in state must be processed. - OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, UNINITIALIZED); + OnTypeFeedbackChanged(isolate, host); } } void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) { if (IsCleared(nexus)) return; - State state = nexus->StateFromFeedback(); nexus->ConfigurePremonomorphic(); - OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC); + OnTypeFeedbackChanged(isolate, host); } @@ -536,9 +517,8 @@ void StoreIC::Clear(Isolate* isolate, Address address, Code* target, void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) { if (IsCleared(nexus)) return; - State state = nexus->StateFromFeedback(); nexus->ConfigurePremonomorphic(); - OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC); + OnTypeFeedbackChanged(isolate, host); } @@ -554,9 +534,8 @@ void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target, void KeyedStoreIC::Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus) { if (IsCleared(nexus)) return; - State state = nexus->StateFromFeedback(); nexus->ConfigurePremonomorphic(); - OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC); + OnTypeFeedbackChanged(isolate, host); } @@ -565,11 +544,11 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target, DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC); CompareICStub stub(target->stub_key(), isolate); // Only clear CompareICs that can retain objects. - if (stub.state() != CompareICState::KNOWN_OBJECT) return; + if (stub.state() != CompareICState::KNOWN_RECEIVER) return; SetTargetAtAddress(address, GetRawUninitialized(isolate, stub.op(), stub.strength()), constant_pool); - PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK); + PatchInlinedSmiCode(isolate, address, DISABLE_INLINED_SMI_CHECK); } @@ -606,8 +585,7 @@ void IC::ConfigureVectorState(IC::State new_state) { } vector_set_ = true; - OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(), - new_state); + OnTypeFeedbackChanged(isolate(), get_host()); } @@ -630,8 +608,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map, } vector_set_ = true; - OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(), - MONOMORPHIC); + OnTypeFeedbackChanged(isolate(), get_host()); } @@ -654,8 +631,7 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps, } vector_set_ = true; - OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(), - POLYMORPHIC); + OnTypeFeedbackChanged(isolate(), get_host()); } @@ -668,8 +644,7 @@ void IC::ConfigureVectorState(MapHandleList* maps, nexus->ConfigurePolymorphic(maps, transitioned_maps, handlers); vector_set_ = true; - OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(), - POLYMORPHIC); + OnTypeFeedbackChanged(isolate(), get_host()); } @@ -810,12 +785,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) { if (number_of_valid_maps > 1 && target()->is_keyed_stub()) return false; Handle<Code> ic; if (number_of_valid_maps == 1) { - if (UseVector()) { - ConfigureVectorState(name, receiver_map(), code); - } else { - ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, map, code, - extra_ic_state()); - } + ConfigureVectorState(name, receiver_map(), code); } else { if (handler_to_overwrite >= 0) { handlers.Set(handler_to_overwrite, code); @@ -827,13 +797,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) { handlers.Add(code); } - if (UseVector()) { - ConfigureVectorState(name, &maps, &handlers); - } else { - ic = PropertyICCompiler::ComputePolymorphic(kind(), &maps, &handlers, - number_of_valid_maps, name, - extra_ic_state()); - } + ConfigureVectorState(name, &maps, &handlers); } if (!UseVector()) set_target(*ic); @@ -843,13 +807,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) { void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) { DCHECK(handler->is_handler()); - if (UseVector()) { - ConfigureVectorState(name, receiver_map(), handler); - } else { - Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic( - kind(), name, receiver_map(), handler, extra_ic_state()); - set_target(*ic); - } + ConfigureVectorState(name, receiver_map(), handler); } @@ -973,7 +931,7 @@ static Handle<Code> KeyedStoreICInitializeStubHelper( Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate, LanguageMode language_mode, State initialization_state) { - if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) { + if (initialization_state != MEGAMORPHIC) { VectorKeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode)); return stub.GetCode(); } @@ -985,7 +943,7 @@ Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate, Handle<Code> KeyedStoreIC::initialize_stub_in_optimized_code( Isolate* isolate, LanguageMode language_mode, State initialization_state) { - if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) { + if (initialization_state != MEGAMORPHIC) { VectorKeyedStoreICStub stub(isolate, StoreICState(language_mode)); return stub.GetCode(); } @@ -1638,13 +1596,8 @@ Handle<Code> StoreIC::initialize_stub(Isolate* isolate, DCHECK(initialization_state == UNINITIALIZED || initialization_state == PREMONOMORPHIC || initialization_state == MEGAMORPHIC); - if (FLAG_vector_stores) { - VectorStoreICTrampolineStub stub(isolate, StoreICState(language_mode)); - return stub.GetCode(); - } - - return StoreICInitializeStubHelper( - isolate, ComputeExtraICState(language_mode), initialization_state); + VectorStoreICTrampolineStub stub(isolate, StoreICState(language_mode)); + return stub.GetCode(); } @@ -1653,7 +1606,7 @@ Handle<Code> StoreIC::initialize_stub_in_optimized_code( DCHECK(initialization_state == UNINITIALIZED || initialization_state == PREMONOMORPHIC || initialization_state == MEGAMORPHIC); - if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) { + if (initialization_state != MEGAMORPHIC) { VectorStoreICStub stub(isolate, StoreICState(language_mode)); return stub.GetCode(); } @@ -1700,11 +1653,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value, if (state() == UNINITIALIZED) { // This is the first time we execute this inline cache. Set the target to // the pre monomorphic stub to delay setting the monomorphic state. - if (FLAG_vector_stores) { - ConfigureVectorState(PREMONOMORPHIC); - } else { - set_target(*pre_monomorphic_stub()); - } + ConfigureVectorState(PREMONOMORPHIC); TRACE_IC("StoreIC", lookup->name()); return; } @@ -1811,8 +1760,6 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup, TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function"); break; } - // When debugging we need to go the slow path to flood the accessor. - if (GetSharedFunctionInfo()->HasDebugInfo()) break; Handle<JSFunction> function = Handle<JSFunction>::cast(setter); CallOptimization call_optimization(function); NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder); @@ -1900,25 +1847,18 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map, Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver_map, store_mode); store_mode = GetNonTransitioningStoreMode(store_mode); - if (FLAG_vector_stores) { - Handle<Code> handler = - PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler( - monomorphic_map, language_mode(), store_mode); - ConfigureVectorState(Handle<Name>::null(), monomorphic_map, handler); - return null_handle; - } - return PropertyICCompiler::ComputeKeyedStoreMonomorphic( - monomorphic_map, language_mode(), store_mode); + Handle<Code> handler = + PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler( + monomorphic_map, language_mode(), store_mode); + ConfigureVectorState(Handle<Name>::null(), monomorphic_map, handler); + return null_handle; } // There are several special cases where an IC that is MONOMORPHIC can still // transition to a different GetNonTransitioningStoreMode IC that handles a // superset of the original IC. Handle those here if the receiver map hasn't // changed or it has transitioned to a more general kind. - KeyedAccessStoreMode old_store_mode = - FLAG_vector_stores - ? GetKeyedAccessStoreMode() - : KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state()); + KeyedAccessStoreMode old_store_mode = GetKeyedAccessStoreMode(); Handle<Map> previous_receiver_map = target_receiver_maps.at(0); if (state() == MONOMORPHIC) { Handle<Map> transitioned_receiver_map = receiver_map; @@ -1934,16 +1874,12 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map, // if they at least come from the same origin for a transitioning store, // stay MONOMORPHIC and use the map for the most generic ElementsKind. store_mode = GetNonTransitioningStoreMode(store_mode); - if (FLAG_vector_stores) { - Handle<Code> handler = - PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler( - transitioned_receiver_map, language_mode(), store_mode); - ConfigureVectorState(Handle<Name>::null(), transitioned_receiver_map, - handler); - return null_handle; - } - return PropertyICCompiler::ComputeKeyedStoreMonomorphic( - transitioned_receiver_map, language_mode(), store_mode); + Handle<Code> handler = + PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler( + transitioned_receiver_map, language_mode(), store_mode); + ConfigureVectorState(Handle<Name>::null(), transitioned_receiver_map, + handler); + return null_handle; } else if (receiver_map.is_identical_to(previous_receiver_map) && old_store_mode == STANDARD_STORE && (store_mode == STORE_AND_GROW_NO_TRANSITION || @@ -1952,15 +1888,11 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map, // A "normal" IC that handles stores can switch to a version that can // grow at the end of the array, handle OOB accesses or copy COW arrays // and still stay MONOMORPHIC. - if (FLAG_vector_stores) { - Handle<Code> handler = - PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler( - receiver_map, language_mode(), store_mode); - ConfigureVectorState(Handle<Name>::null(), receiver_map, handler); - return null_handle; - } - return PropertyICCompiler::ComputeKeyedStoreMonomorphic( - receiver_map, language_mode(), store_mode); + Handle<Code> handler = + PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler( + receiver_map, language_mode(), store_mode); + ConfigureVectorState(Handle<Name>::null(), receiver_map, handler); + return null_handle; } } @@ -2019,18 +1951,13 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map, } } - if (FLAG_vector_stores) { - MapHandleList transitioned_maps(target_receiver_maps.length()); - CodeHandleList handlers(target_receiver_maps.length()); - PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers( - &target_receiver_maps, &transitioned_maps, &handlers, store_mode, - language_mode()); - ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers); - return null_handle; - } - - return PropertyICCompiler::ComputeKeyedStorePolymorphic( - &target_receiver_maps, store_mode, language_mode()); + MapHandleList transitioned_maps(target_receiver_maps.length()); + CodeHandleList handlers(target_receiver_maps.length()); + PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers( + &target_receiver_maps, &transitioned_maps, &handlers, store_mode, + language_mode()); + ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers); + return null_handle; } @@ -2124,44 +2051,6 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver, } -void KeyedStoreIC::ValidateStoreMode(Handle<Code> stub) { -#ifdef DEBUG - DCHECK(!FLAG_vector_stores); - if (stub.is_null() || *stub == *megamorphic_stub() || *stub == *slow_stub()) { - return; - } - - // Query the keyed store mode. - ExtraICState state = stub->extra_ic_state(); - KeyedAccessStoreMode stub_mode = GetKeyedAccessStoreMode(state); - - MapHandleList map_list; - stub->FindAllMaps(&map_list); - CodeHandleList list; - stub->FindHandlers(&list, map_list.length()); - for (int i = 0; i < list.length(); i++) { - Handle<Code> handler = list.at(i); - CHECK(handler->is_handler()); - CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key()); - uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key()); - // Ensure that we only see handlers we know have the store mode embedded. - CHECK(major_key == CodeStub::KeyedStoreSloppyArguments || - major_key == CodeStub::StoreFastElement || - major_key == CodeStub::StoreElement || - major_key == CodeStub::ElementsTransitionAndStore || - *handler == *isolate()->builtins()->KeyedStoreIC_Slow()); - // Ensure that the store mode matches that of the IC. - CHECK(major_key == CodeStub::NoCache || - stub_mode == CommonStoreModeBits::decode(minor_key)); - // The one exception is the keyed store slow builtin, which doesn't include - // store mode. - CHECK(major_key != CodeStub::NoCache || - *handler == *isolate()->builtins()->KeyedStoreIC_Slow()); - } -#endif // DEBUG -} - - MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object, Handle<Object> key, Handle<Object> value) { @@ -2192,20 +2081,11 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object, StoreIC::Store(object, Handle<Name>::cast(key), value, JSReceiver::MAY_BE_STORE_FROM_KEYED), Object); - if (FLAG_vector_stores) { - if (!is_vector_set()) { - ConfigureVectorState(MEGAMORPHIC); - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", - "unhandled internalized string key"); - TRACE_IC("StoreIC", key); - } - } else { - if (!is_target_set()) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", - "unhandled internalized string key"); - TRACE_IC("StoreIC", key); - set_target(*stub); - } + if (!is_vector_set()) { + ConfigureVectorState(MEGAMORPHIC); + TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", + "unhandled internalized string key"); + TRACE_IC("StoreIC", key); } return store_handle; } @@ -2262,10 +2142,6 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object, // from fast path keyed stores. if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly()) { stub = StoreElementStub(old_receiver_map, store_mode); - - // Validate that the store_mode in the stub can also be derived - // from peeking in the code bits of the handlers. - if (!FLAG_vector_stores) ValidateStoreMode(stub); } else { TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "dictionary or proxy prototype"); @@ -2278,27 +2154,12 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object, } } - if (FLAG_vector_stores) { - if (!is_vector_set() || stub.is_null()) { - Code* megamorphic = *megamorphic_stub(); - if (!stub.is_null() && (*stub == megamorphic || *stub == *slow_stub())) { - ConfigureVectorState(MEGAMORPHIC); - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", - *stub == megamorphic ? "set generic" : "slow stub"); - } - } - } else { - DCHECK(!is_target_set()); + if (!is_vector_set() || stub.is_null()) { Code* megamorphic = *megamorphic_stub(); - if (*stub == megamorphic) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic"); - } else if (*stub == *slow_stub()) { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "slow stub"); - } - - DCHECK(!stub.is_null()); - if (!AddressIsDeoptimizedCode()) { - set_target(*stub); + if (!stub.is_null() && (*stub == megamorphic || *stub == *slow_stub())) { + ConfigureVectorState(MEGAMORPHIC); + TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", + *stub == megamorphic ? "set generic" : "slow stub"); } } TRACE_IC("StoreIC", key); @@ -2328,6 +2189,12 @@ void CallIC::HandleMiss(Handle<Object> function) { if (array_function.is_identical_to(js_function)) { // Alter the slot. nexus->ConfigureMonomorphicArray(); + } else if (js_function->context()->native_context() != + *isolate()->native_context()) { + // Don't collect cross-native context feedback for the CallIC. + // TODO(bmeurer): We should collect the SharedFunctionInfo as + // feedback in this case instead. + nexus->ConfigureMegamorphic(); } else { nexus->ConfigureMonomorphic(js_function); } @@ -2338,8 +2205,7 @@ void CallIC::HandleMiss(Handle<Object> function) { name = handle(js_function->shared()->name(), isolate()); } - IC::State new_state = nexus->StateFromFeedback(); - OnTypeFeedbackChanged(isolate(), get_host(), *vector(), state(), new_state); + OnTypeFeedbackChanged(isolate(), get_host()); TRACE_IC("CallIC", name); } @@ -2448,29 +2314,21 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) { Handle<Object> value = args.at<Object>(2); Handle<Object> result; - if (FLAG_vector_stores) { - DCHECK(args.length() == 5 || args.length() == 6); - Handle<Smi> slot = args.at<Smi>(3); - Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4); - FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value()); - if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) { - StoreICNexus nexus(vector, vector_slot); - StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus); - ic.UpdateState(receiver, key); - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - ic.Store(receiver, key, value)); - } else { - DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, - vector->GetKind(vector_slot)); - KeyedStoreICNexus nexus(vector, vector_slot); - KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus); - ic.UpdateState(receiver, key); - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - ic.Store(receiver, key, value)); - } + DCHECK(args.length() == 5 || args.length() == 6); + Handle<Smi> slot = args.at<Smi>(3); + Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4); + FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value()); + if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) { + StoreICNexus nexus(vector, vector_slot); + StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus); + ic.UpdateState(receiver, key); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, + ic.Store(receiver, key, value)); } else { - DCHECK(args.length() == 3); - StoreIC ic(IC::NO_EXTRA_FRAME, isolate); + DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, + vector->GetKind(vector_slot)); + KeyedStoreICNexus nexus(vector, vector_slot); + KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus); ic.UpdateState(receiver, key); ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Store(receiver, key, value)); @@ -2487,49 +2345,41 @@ RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) { Handle<Object> value = args.at<Object>(2); Handle<Object> result; - if (FLAG_vector_stores) { - int length = args.length(); - DCHECK(length == 5 || length == 6); - // We might have slot and vector, for a normal miss (slot(3), vector(4)). - // Or, map and vector for a transitioning store miss (map(3), vector(4)). - // In this case, we need to recover the slot from a virtual register. - // If length == 6, then a map is included (map(3), slot(4), vector(5)). - Handle<Smi> slot; - Handle<TypeFeedbackVector> vector; - if (length == 5) { - if (args.at<Object>(3)->IsMap()) { - vector = args.at<TypeFeedbackVector>(4); - slot = handle( - *reinterpret_cast<Smi**>(isolate->virtual_slot_register_address()), - isolate); - } else { - vector = args.at<TypeFeedbackVector>(4); - slot = args.at<Smi>(3); - } + int length = args.length(); + DCHECK(length == 5 || length == 6); + // We might have slot and vector, for a normal miss (slot(3), vector(4)). + // Or, map and vector for a transitioning store miss (map(3), vector(4)). + // In this case, we need to recover the slot from a virtual register. + // If length == 6, then a map is included (map(3), slot(4), vector(5)). + Handle<Smi> slot; + Handle<TypeFeedbackVector> vector; + if (length == 5) { + if (args.at<Object>(3)->IsMap()) { + vector = args.at<TypeFeedbackVector>(4); + slot = handle( + *reinterpret_cast<Smi**>(isolate->virtual_slot_register_address()), + isolate); } else { - vector = args.at<TypeFeedbackVector>(5); - slot = args.at<Smi>(4); + vector = args.at<TypeFeedbackVector>(4); + slot = args.at<Smi>(3); } + } else { + vector = args.at<TypeFeedbackVector>(5); + slot = args.at<Smi>(4); + } - FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value()); - if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) { - StoreICNexus nexus(vector, vector_slot); - StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus); - ic.UpdateState(receiver, key); - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - ic.Store(receiver, key, value)); - } else { - DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, - vector->GetKind(vector_slot)); - KeyedStoreICNexus nexus(vector, vector_slot); - KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus); - ic.UpdateState(receiver, key); - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - ic.Store(receiver, key, value)); - } + FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value()); + if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) { + StoreICNexus nexus(vector, vector_slot); + StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus); + ic.UpdateState(receiver, key); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, + ic.Store(receiver, key, value)); } else { - DCHECK(args.length() == 3 || args.length() == 4); - StoreIC ic(IC::EXTRA_CALL_FRAME, isolate); + DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, + vector->GetKind(vector_slot)); + KeyedStoreICNexus nexus(vector, vector_slot); + KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus); ic.UpdateState(receiver, key); ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Store(receiver, key, value)); @@ -2547,23 +2397,15 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) { Handle<Object> value = args.at<Object>(2); Handle<Object> result; - if (FLAG_vector_stores) { - DCHECK(args.length() == 5); - Handle<Smi> slot = args.at<Smi>(3); - Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4); - FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value()); - KeyedStoreICNexus nexus(vector, vector_slot); - KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus); - ic.UpdateState(receiver, key); - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - ic.Store(receiver, key, value)); - } else { - DCHECK(args.length() == 3); - KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate); - ic.UpdateState(receiver, key); - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - ic.Store(receiver, key, value)); - } + DCHECK(args.length() == 5); + Handle<Smi> slot = args.at<Smi>(3); + Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4); + FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value()); + KeyedStoreICNexus nexus(vector, vector_slot); + KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus); + ic.UpdateState(receiver, key); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, + ic.Store(receiver, key, value)); return *result; } @@ -2576,42 +2418,29 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) { Handle<Object> value = args.at<Object>(2); Handle<Object> result; - if (FLAG_vector_stores) { - DCHECK(args.length() == 5); - Handle<Smi> slot = args.at<Smi>(3); - Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4); - FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value()); - KeyedStoreICNexus nexus(vector, vector_slot); - KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus); - ic.UpdateState(receiver, key); - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - ic.Store(receiver, key, value)); - } else { - DCHECK(args.length() == 3); - KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate); - ic.UpdateState(receiver, key); - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, result, ic.Store(receiver, key, args.at<Object>(2))); - } + DCHECK(args.length() == 5); + Handle<Smi> slot = args.at<Smi>(3); + Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4); + FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value()); + KeyedStoreICNexus nexus(vector, vector_slot); + KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus); + ic.UpdateState(receiver, key); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, + ic.Store(receiver, key, value)); return *result; } RUNTIME_FUNCTION(Runtime_StoreIC_Slow) { HandleScope scope(isolate); - DCHECK(args.length() == (FLAG_vector_stores ? 5 : 3)); + DCHECK(args.length() == 5); Handle<Object> object = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); Handle<Object> value = args.at<Object>(2); LanguageMode language_mode; - if (FLAG_vector_stores) { - StoreICNexus nexus(isolate); - StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus); - language_mode = ic.language_mode(); - } else { - StoreIC ic(IC::NO_EXTRA_FRAME, isolate); - language_mode = ic.language_mode(); - } + StoreICNexus nexus(isolate); + StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus); + language_mode = ic.language_mode(); Handle<Object> result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, @@ -2622,19 +2451,14 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Slow) { RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) { HandleScope scope(isolate); - DCHECK(args.length() == (FLAG_vector_stores ? 5 : 3)); + DCHECK(args.length() == 5); Handle<Object> object = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); Handle<Object> value = args.at<Object>(2); LanguageMode language_mode; - if (FLAG_vector_stores) { - KeyedStoreICNexus nexus(isolate); - KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus); - language_mode = ic.language_mode(); - } else { - KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate); - language_mode = ic.language_mode(); - } + KeyedStoreICNexus nexus(isolate); + KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus); + language_mode = ic.language_mode(); Handle<Object> result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, @@ -2646,23 +2470,17 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) { RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) { TimerEventScope<TimerEventIcMiss> timer(isolate); HandleScope scope(isolate); - // Without vector stores, length == 4. - // With vector stores, length == 5 or 6, depending on whether the vector slot + // Length == 5 or 6, depending on whether the vector slot // is passed in a virtual register or not. - DCHECK(!FLAG_vector_stores || args.length() == 5 || args.length() == 6); + DCHECK(args.length() == 5 || args.length() == 6); Handle<Object> object = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); Handle<Object> value = args.at<Object>(2); Handle<Map> map = args.at<Map>(3); LanguageMode language_mode; - if (FLAG_vector_stores) { - KeyedStoreICNexus nexus(isolate); - KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus); - language_mode = ic.language_mode(); - } else { - KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate); - language_mode = ic.language_mode(); - } + KeyedStoreICNexus nexus(isolate); + KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus); + language_mode = ic.language_mode(); if (object->IsJSObject()) { JSObject::TransitionElementsKind(Handle<JSObject>::cast(object), map->elements_kind()); @@ -2795,9 +2613,9 @@ MaybeHandle<Object> BinaryOpIC::Transition( // Patch the inlined smi code as necessary. if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) { - PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); + PatchInlinedSmiCode(isolate(), address(), ENABLE_INLINED_SMI_CHECK); } else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) { - PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK); + PatchInlinedSmiCode(isolate(), address(), DISABLE_INLINED_SMI_CHECK); } return result; @@ -2868,9 +2686,9 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { HasInlinedSmiCode(address()), x, y); CompareICStub stub(isolate(), op_, old_stub.strength(), new_left, new_right, state); - if (state == CompareICState::KNOWN_OBJECT) { + if (state == CompareICState::KNOWN_RECEIVER) { stub.set_known_map( - Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate())); + Handle<Map>(Handle<JSReceiver>::cast(x)->map(), isolate())); } Handle<Code> new_target = stub.GetCode(); set_target(*new_target); @@ -2890,7 +2708,7 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { // Activate inlined smi code. if (old_stub.state() == CompareICState::UNINITIALIZED) { - PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); + PatchInlinedSmiCode(isolate(), address(), ENABLE_INLINED_SMI_CHECK); } return *new_target; @@ -2980,7 +2798,7 @@ Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) { bool to_boolean_value = stub.UpdateStatus(object); Handle<Code> code = stub.GetCode(); set_target(*code); - return handle(Smi::FromInt(to_boolean_value ? 1 : 0), isolate()); + return isolate()->factory()->ToBoolean(to_boolean_value); } diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h index 47883b46af..a3265d70b9 100644 --- a/deps/v8/src/ic/ic.h +++ b/deps/v8/src/ic/ic.h @@ -77,9 +77,8 @@ class IC { static bool ICUseVector(Code::Kind kind) { return kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC || - kind == Code::CALL_IC || - (FLAG_vector_stores && - (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC)); + kind == Code::CALL_IC || kind == Code::STORE_IC || + kind == Code::KEYED_STORE_IC; } protected: @@ -144,9 +143,7 @@ class IC { State old_state, State new_state, bool target_remains_ic_stub); // As a vector-based IC, type feedback must be updated differently. - static void OnTypeFeedbackChanged(Isolate* isolate, Code* host, - TypeFeedbackVector* vector, State old_state, - State new_state); + static void OnTypeFeedbackChanged(Isolate* isolate, Code* host); static void PostPatching(Address address, Code* target, Code* old_target); // Compute the handler either by compiling or by retrieving a cached version. @@ -532,22 +529,10 @@ class KeyedStoreIC : public StoreIC { IcCheckTypeField::encode(ELEMENT); } - static KeyedAccessStoreMode GetKeyedAccessStoreMode( - ExtraICState extra_state) { - DCHECK(!FLAG_vector_stores); - return ExtraICStateKeyedAccessStoreMode::decode(extra_state); - } - KeyedAccessStoreMode GetKeyedAccessStoreMode() { - DCHECK(FLAG_vector_stores); return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode(); } - static IcCheckType GetKeyType(ExtraICState extra_state) { - DCHECK(!FLAG_vector_stores); - return IcCheckTypeField::decode(extra_state); - } - KeyedStoreIC(FrameDepth depth, Isolate* isolate, KeyedStoreICNexus* nexus = NULL) : StoreIC(depth, isolate, nexus) { @@ -604,8 +589,6 @@ class KeyedStoreIC : public StoreIC { Handle<Map> ComputeTransitionedMap(Handle<Map> map, KeyedAccessStoreMode store_mode); - void ValidateStoreMode(Handle<Code> stub); - friend class IC; }; @@ -679,7 +662,8 @@ class ToBooleanIC : public IC { // Helper for BinaryOpIC and CompareIC. enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK }; -void PatchInlinedSmiCode(Address address, InlinedSmiCheck check); +void PatchInlinedSmiCode(Isolate* isolate, Address address, + InlinedSmiCheck check); } // namespace internal } // namespace v8 diff --git a/deps/v8/src/ic/mips/OWNERS b/deps/v8/src/ic/mips/OWNERS index 5508ba626f..89455a4fbd 100644 --- a/deps/v8/src/ic/mips/OWNERS +++ b/deps/v8/src/ic/mips/OWNERS @@ -3,3 +3,4 @@ gergely.kis@imgtec.com akos.palfi@imgtec.com balazs.kilvady@imgtec.com dusan.milosavljevic@imgtec.com +ivica.bogosavljevic@imgtec.com diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc index f2f6c62c71..b122946577 100644 --- a/deps/v8/src/ic/mips/access-compiler-mips.cc +++ b/deps/v8/src/ic/mips/access-compiler-mips.cc @@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, a3, t0, t1}; return registers; } diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc index 200d1f6ebe..554d0c56ff 100644 --- a/deps/v8/src/ic/mips/handler-compiler-mips.cc +++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc @@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( ParameterCount actual(0); ParameterCount expected(expected_arguments); __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER); - __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( ParameterCount actual(1); ParameterCount expected(expected_arguments); __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER); - __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -141,7 +143,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( // Check that receiver is a JSObject. __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE)); // Load properties array. Register properties = scratch0; @@ -165,10 +167,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register result, Label* miss) { - const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); - __ lw(result, MemOperand(cp, offset)); - __ lw(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset)); - __ lw(result, MemOperand(result, Context::SlotOffset(index))); + __ LoadNativeContextSlot(index, result); // Load its initial map. The global functions all have initial maps. __ lw(result, FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset)); @@ -216,8 +215,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, static void CompileCallLoadPropertyWithInterceptor( MacroAssembler* masm, Register receiver, Register holder, Register name, Handle<JSObject> holder_obj, Runtime::FunctionId id) { + DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength == + Runtime::FunctionForId(id)->nargs); PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength); + __ CallRuntime(id); } @@ -283,6 +284,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( __ lw(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset)); __ lw(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset)); } + + if (api_call_info->fast_handler()->IsCode()) { + // Just tail call into the fast handler if present. + __ Jump(handle(Code::cast(api_call_info->fast_handler())), + RelocInfo::CODE_TARGET); + return; + } // Put api_function_address in place. Address function_address = v8::ToCData<Address>(api_call_info->callback()); ApiFunction fun(function_address); @@ -297,15 +305,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( static void StoreIC_PushArgs(MacroAssembler* masm) { - if (FLAG_vector_stores) { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), - VectorStoreICDescriptor::SlotRegister(), - VectorStoreICDescriptor::VectorRegister()); - } else { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); - } + __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), + StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); } @@ -314,7 +317,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow); } @@ -323,8 +326,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, - 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow); } @@ -698,8 +700,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) { PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), holder()); - __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor, - NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); + __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor); } @@ -722,7 +723,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( __ Push(at, value()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1); + __ TailCallRuntime(Runtime::kStoreCallbackProperty); // Return the generated code. return GetCode(kind(), Code::FAST, name); @@ -734,7 +735,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( __ Push(receiver(), this->name(), value()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1); + __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor); // Return the generated code. return GetCode(kind(), Code::FAST, name); diff --git a/deps/v8/src/ic/mips/ic-compiler-mips.cc b/deps/v8/src/ic/mips/ic-compiler-mips.cc index 64f1662880..86a602b3ec 100644 --- a/deps/v8/src/ic/mips/ic-compiler-mips.cc +++ b/deps/v8/src/ic/mips/ic-compiler-mips.cc @@ -10,114 +10,6 @@ namespace v8 { namespace internal { -#define __ ACCESS_MASM(masm()) - - -Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { - Label miss; - - if (check == PROPERTY && - (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - // In case we are compiling an IC for dictionary loads or stores, just - // check whether the name is unique. - if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - // Keyed loads with dictionaries shouldn't be here, they go generic. - // The DCHECK is to protect assumptions when --vector-ics is on. - DCHECK(kind() != Code::KEYED_LOAD_IC); - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset)); - __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); - } else { - __ Branch(&miss, ne, this->name(), Operand(name)); - } - } - - Label number_case; - Register match = scratch2(); - Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss; - __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi. - - // Polymorphic keyed stores may use the map register - Register map_reg = scratch1(); - DCHECK(kind() != Code::KEYED_STORE_IC || - map_reg.is(StoreTransitionDescriptor::MapRegister())); - - int receiver_count = maps->length(); - int number_of_handled_maps = 0; - __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map = maps->at(current); - if (!map->is_deprecated()) { - number_of_handled_maps++; - // Check map and tail call if there's a match. - // Separate compare from branch, to provide path for above JumpIfSmi(). - Handle<WeakCell> cell = Map::WeakCellForMap(map); - __ GetWeakValue(match, cell); - __ Subu(match, match, Operand(map_reg)); - if (map->instance_type() == HEAP_NUMBER_TYPE) { - DCHECK(!number_case.is_unused()); - __ bind(&number_case); - } - __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match, - Operand(zero_reg)); - } - } - DCHECK(number_of_handled_maps != 0); - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - InlineCacheState state = - number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetCode(kind(), type, name, state); -} - - -Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( - MapHandleList* receiver_maps, CodeHandleList* handler_stubs, - MapHandleList* transitioned_maps) { - Label miss; - __ JumpIfSmi(receiver(), &miss); - - int receiver_count = receiver_maps->length(); - Register map_reg = scratch1(); - Register match = scratch2(); - __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); - for (int i = 0; i < receiver_count; ++i) { - Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i)); - __ GetWeakValue(match, cell); - if (transitioned_maps->at(i).is_null()) { - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, match, - Operand(map_reg)); - } else { - Label next_map; - __ Branch(&next_map, ne, match, Operand(map_reg)); - Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i)); - Register transition_map = scratch1(); - DCHECK(!FLAG_vector_stores && - transition_map.is(StoreTransitionDescriptor::MapRegister())); - __ LoadWeakValue(transition_map, cell, &miss); - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); - __ bind(&next_map); - } - } - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); -} - - -#undef __ #define __ ACCESS_MASM(masm) @@ -130,7 +22,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty( __ Push(a0); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 4, 1); + __ TailCallRuntime(Runtime::kSetProperty); } diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc index 60c06a3eb4..a27d6b56f7 100644 --- a/deps/v8/src/ic/mips/ic-mips.cc +++ b/deps/v8/src/ic/mips/ic-mips.cc @@ -316,8 +316,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kLoadIC_Miss); } @@ -330,8 +329,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong - : Runtime::kGetProperty, - 2, 1); + : Runtime::kGetProperty); } @@ -346,8 +344,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss); } @@ -359,8 +356,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong - : Runtime::kKeyedGetProperty, - 2, 1); + : Runtime::kKeyedGetProperty); } @@ -477,8 +473,13 @@ static void KeyedStoreGenerateMegamorphicHelper( // Fast case: Do the store, could be either Object or double. __ bind(fast_object); - Register scratch_value = t0; + Register scratch = t0; + Register scratch2 = t4; + Register scratch3 = t5; Register address = t1; + DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements, + scratch, scratch2, scratch3, address)); + if (check_map == kCheckMap) { __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); __ Branch(fast_double, ne, elements_map, @@ -492,11 +493,10 @@ static void KeyedStoreGenerateMegamorphicHelper( __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); __ sll(at, key, kPointerSizeLog2 - kSmiTagSize); __ addu(address, address, at); - __ lw(scratch_value, MemOperand(address)); - __ Branch(&holecheck_passed1, ne, scratch_value, + __ lw(scratch, MemOperand(address)); + __ Branch(&holecheck_passed1, ne, scratch, Operand(masm->isolate()->factory()->the_hole_value())); - __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, - slow); + __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); __ bind(&holecheck_passed1); @@ -506,35 +506,34 @@ static void KeyedStoreGenerateMegamorphicHelper( if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); - __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ Addu(scratch, key, Operand(Smi::FromInt(1))); + __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); } // It's irrelevant whether array is smi-only or not when writing a smi. __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize); - __ Addu(address, address, scratch_value); + __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize); + __ Addu(address, address, scratch); __ sw(value, MemOperand(address)); __ Ret(); __ bind(&non_smi_value); // Escape to elements kind transition case. - __ CheckFastObjectElements(receiver_map, scratch_value, - &transition_smi_elements); + __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements); // Fast elements array, store the value to the elements backing store. __ bind(&finish_object_store); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); - __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ Addu(scratch, key, Operand(Smi::FromInt(1))); + __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); } __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize); - __ Addu(address, address, scratch_value); + __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize); + __ Addu(address, address, scratch); __ sw(value, MemOperand(address)); // Update write barrier for the elements array address. - __ mov(scratch_value, value); // Preserve the value which is returned. - __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved, + __ mov(scratch, value); // Preserve the value which is returned. + __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ Ret(); @@ -553,34 +552,31 @@ static void KeyedStoreGenerateMegamorphicHelper( kHoleNanUpper32Offset - kHeapObjectTag)); __ sll(at, key, kPointerSizeLog2); __ addu(address, address, at); - __ lw(scratch_value, MemOperand(address)); - __ Branch(&fast_double_without_map_check, ne, scratch_value, + __ lw(scratch, MemOperand(address)); + __ Branch(&fast_double_without_map_check, ne, scratch, Operand(kHoleNanUpper32)); - __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, - slow); + __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(value, key, - elements, // Overwritten. - a3, // Scratch regs... - t0, t1, &transition_double_elements); + __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2, + scratch3, &transition_double_elements); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); - __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ Addu(scratch, key, Operand(Smi::FromInt(1))); + __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); } __ Ret(); __ bind(&transition_smi_elements); // Transition the array appropriately depending on the value type. - __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset)); + __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - __ Branch(&non_double_value, ne, t0, Operand(at)); + __ Branch(&non_double_value, ne, scratch, Operand(at)); // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. __ LoadTransitionedArrayMapConditional( - FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, t0, slow); + FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow); AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, @@ -591,7 +587,7 @@ static void KeyedStoreGenerateMegamorphicHelper( __ bind(&non_double_value); // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, - receiver_map, t0, slow); + receiver_map, scratch, slow); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateMapChangeElementsTransition( masm, receiver, key, value, receiver_map, mode, slow); @@ -603,7 +599,7 @@ static void KeyedStoreGenerateMegamorphicHelper( // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, - receiver_map, t0, slow); + receiver_map, scratch, slow); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateDoubleToObject( masm, receiver, key, value, receiver_map, mode, slow); @@ -675,19 +671,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, __ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); __ JumpIfNotUniqueNameInstanceType(t0, &slow); - if (FLAG_vector_stores) { - // The handlers in the stub cache expect a vector and slot. Since we won't - // change the IC from any downstream misses, a dummy vector can be used. - Register vector = VectorStoreICDescriptor::VectorRegister(); - Register slot = VectorStoreICDescriptor::SlotRegister(); - DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5)); - Handle<TypeFeedbackVector> dummy_vector = - TypeFeedbackVector::DummyVector(masm->isolate()); - int slot_index = dummy_vector->GetIndex( - FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); - __ LoadRoot(vector, Heap::kDummyVectorRootIndex); - __ li(slot, Operand(Smi::FromInt(slot_index))); - } + // The handlers in the stub cache expect a vector and slot. Since we won't + // change the IC from any downstream misses, a dummy vector can be used. + Register vector = VectorStoreICDescriptor::VectorRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5)); + Handle<TypeFeedbackVector> dummy_vector = + TypeFeedbackVector::DummyVector(masm->isolate()); + int slot_index = dummy_vector->GetIndex( + FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); + __ LoadRoot(vector, Heap::kDummyVectorRootIndex); + __ li(slot, Operand(Smi::FromInt(slot_index))); Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); @@ -741,23 +735,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, static void StoreIC_PushArgs(MacroAssembler* masm) { - if (FLAG_vector_stores) { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), - VectorStoreICDescriptor::SlotRegister(), - VectorStoreICDescriptor::VectorRegister()); - } else { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); - } + __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), + StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); } void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss); } @@ -783,8 +771,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Perform tail call to the entry. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kStoreIC_Miss); } @@ -849,7 +836,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) { } -void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { +void PatchInlinedSmiCode(Isolate* isolate, Address address, + InlinedSmiCheck check) { Address andi_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -887,7 +875,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // andi at, rx, #kSmiTagMask // Branch <target>, ne, at, Operand(zero_reg) // and vice-versa to be disabled again. - CodePatcher patcher(patch_address, 2); + CodePatcher patcher(isolate, patch_address, 2); Register reg = Register::from_code(Assembler::GetRs(instr_at_patch)); if (check == ENABLE_INLINED_SMI_CHECK) { DCHECK(Assembler::IsAndImmediate(instr_at_patch)); diff --git a/deps/v8/src/ic/mips64/OWNERS b/deps/v8/src/ic/mips64/OWNERS index 5508ba626f..89455a4fbd 100644 --- a/deps/v8/src/ic/mips64/OWNERS +++ b/deps/v8/src/ic/mips64/OWNERS @@ -3,3 +3,4 @@ gergely.kis@imgtec.com akos.palfi@imgtec.com balazs.kilvady@imgtec.com dusan.milosavljevic@imgtec.com +ivica.bogosavljevic@imgtec.com diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc index 500a6d65c7..96e921c7c6 100644 --- a/deps/v8/src/ic/mips64/access-compiler-mips64.cc +++ b/deps/v8/src/ic/mips64/access-compiler-mips64.cc @@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, a3, a4, a5}; return registers; } diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc index 942c42c221..d94a292228 100644 --- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc +++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc @@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( ParameterCount actual(0); ParameterCount expected(expected_arguments); __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER); - __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( ParameterCount actual(1); ParameterCount expected(expected_arguments); __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER); - __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -141,7 +143,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( // Check that receiver is a JSObject. __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE)); // Load properties array. Register properties = scratch0; @@ -165,11 +167,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register result, Label* miss) { - // Check we're still in the same context. - const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); - __ ld(result, MemOperand(cp, offset)); - __ ld(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset)); - __ ld(result, MemOperand(result, Context::SlotOffset(index))); + __ LoadNativeContextSlot(index, result); // Load its initial map. The global functions all have initial maps. __ ld(result, FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset)); @@ -217,8 +215,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, static void CompileCallLoadPropertyWithInterceptor( MacroAssembler* masm, Register receiver, Register holder, Register name, Handle<JSObject> holder_obj, Runtime::FunctionId id) { + DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength == + Runtime::FunctionForId(id)->nargs); PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength); + __ CallRuntime(id); } @@ -284,6 +284,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( __ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset)); __ ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset)); } + + if (api_call_info->fast_handler()->IsCode()) { + // Just tail call into the fast handler if present. + __ Jump(handle(Code::cast(api_call_info->fast_handler())), + RelocInfo::CODE_TARGET); + return; + } // Put api_function_address in place. Address function_address = v8::ToCData<Address>(api_call_info->callback()); ApiFunction fun(function_address); @@ -298,15 +305,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( static void StoreIC_PushArgs(MacroAssembler* masm) { - if (FLAG_vector_stores) { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), - VectorStoreICDescriptor::SlotRegister(), - VectorStoreICDescriptor::VectorRegister()); - } else { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); - } + __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), + StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); } @@ -315,7 +317,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow); } @@ -324,8 +326,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, - 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow); } @@ -699,8 +700,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) { PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), holder()); - __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor, - NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); + __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor); } @@ -723,7 +723,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( __ Push(at, value()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1); + __ TailCallRuntime(Runtime::kStoreCallbackProperty); // Return the generated code. return GetCode(kind(), Code::FAST, name); @@ -735,7 +735,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( __ Push(receiver(), this->name(), value()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1); + __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor); // Return the generated code. return GetCode(kind(), Code::FAST, name); diff --git a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc index 8cdd8f03bc..276f3afd38 100644 --- a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc +++ b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc @@ -10,114 +10,6 @@ namespace v8 { namespace internal { -#define __ ACCESS_MASM(masm()) - - -Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { - Label miss; - - if (check == PROPERTY && - (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - // In case we are compiling an IC for dictionary loads or stores, just - // check whether the name is unique. - if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - // Keyed loads with dictionaries shouldn't be here, they go generic. - // The DCHECK is to protect assumptions when --vector-ics is on. - DCHECK(kind() != Code::KEYED_LOAD_IC); - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset)); - __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); - } else { - __ Branch(&miss, ne, this->name(), Operand(name)); - } - } - - Label number_case; - Register match = scratch2(); - Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss; - __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi. - - // Polymorphic keyed stores may use the map register - Register map_reg = scratch1(); - DCHECK(kind() != Code::KEYED_STORE_IC || - map_reg.is(StoreTransitionDescriptor::MapRegister())); - - int receiver_count = maps->length(); - int number_of_handled_maps = 0; - __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map = maps->at(current); - if (!map->is_deprecated()) { - number_of_handled_maps++; - // Check map and tail call if there's a match. - // Separate compare from branch, to provide path for above JumpIfSmi(). - Handle<WeakCell> cell = Map::WeakCellForMap(map); - __ GetWeakValue(match, cell); - __ Dsubu(match, match, Operand(map_reg)); - if (map->instance_type() == HEAP_NUMBER_TYPE) { - DCHECK(!number_case.is_unused()); - __ bind(&number_case); - } - __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match, - Operand(zero_reg)); - } - } - DCHECK(number_of_handled_maps != 0); - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - InlineCacheState state = - number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetCode(kind(), type, name, state); -} - - -Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( - MapHandleList* receiver_maps, CodeHandleList* handler_stubs, - MapHandleList* transitioned_maps) { - Label miss; - __ JumpIfSmi(receiver(), &miss); - - int receiver_count = receiver_maps->length(); - Register map_reg = scratch1(); - Register match = scratch2(); - __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); - for (int i = 0; i < receiver_count; ++i) { - Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i)); - __ GetWeakValue(match, cell); - if (transitioned_maps->at(i).is_null()) { - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, match, - Operand(map_reg)); - } else { - Label next_map; - __ Branch(&next_map, ne, match, Operand(map_reg)); - Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i)); - Register transition_map = scratch1(); - DCHECK(!FLAG_vector_stores && - transition_map.is(StoreTransitionDescriptor::MapRegister())); - __ LoadWeakValue(transition_map, cell, &miss); - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); - __ bind(&next_map); - } - } - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); -} - - -#undef __ #define __ ACCESS_MASM(masm) @@ -130,7 +22,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty( __ Push(a0); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 4, 1); + __ TailCallRuntime(Runtime::kSetProperty); } diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc index e73921a317..c5da5fbb42 100644 --- a/deps/v8/src/ic/mips64/ic-mips64.cc +++ b/deps/v8/src/ic/mips64/ic-mips64.cc @@ -313,8 +313,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kLoadIC_Miss); } @@ -327,8 +326,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong - : Runtime::kGetProperty, - 2, 1); + : Runtime::kGetProperty); } @@ -343,8 +341,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss); } @@ -356,8 +353,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong - : Runtime::kKeyedGetProperty, - 2, 1); + : Runtime::kKeyedGetProperty); } @@ -474,8 +470,12 @@ static void KeyedStoreGenerateMegamorphicHelper( // Fast case: Do the store, could be either Object or double. __ bind(fast_object); - Register scratch_value = a4; + Register scratch = a4; + Register scratch2 = t0; Register address = a5; + DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements, + scratch, scratch2, address)); + if (check_map == kCheckMap) { __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); __ Branch(fast_double, ne, elements_map, @@ -489,12 +489,11 @@ static void KeyedStoreGenerateMegamorphicHelper( __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); __ SmiScale(at, key, kPointerSizeLog2); __ daddu(address, address, at); - __ ld(scratch_value, MemOperand(address)); + __ ld(scratch, MemOperand(address)); - __ Branch(&holecheck_passed1, ne, scratch_value, + __ Branch(&holecheck_passed1, ne, scratch, Operand(masm->isolate()->factory()->the_hole_value())); - __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, - slow); + __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); __ bind(&holecheck_passed1); @@ -504,37 +503,36 @@ static void KeyedStoreGenerateMegamorphicHelper( if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); - __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ Daddu(scratch, key, Operand(Smi::FromInt(1))); + __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); } // It's irrelevant whether array is smi-only or not when writing a smi. __ Daddu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ SmiScale(scratch_value, key, kPointerSizeLog2); - __ Daddu(address, address, scratch_value); + __ SmiScale(scratch, key, kPointerSizeLog2); + __ Daddu(address, address, scratch); __ sd(value, MemOperand(address)); __ Ret(); __ bind(&non_smi_value); // Escape to elements kind transition case. - __ CheckFastObjectElements(receiver_map, scratch_value, - &transition_smi_elements); + __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements); // Fast elements array, store the value to the elements backing store. __ bind(&finish_object_store); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); - __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ Daddu(scratch, key, Operand(Smi::FromInt(1))); + __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); } __ Daddu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ SmiScale(scratch_value, key, kPointerSizeLog2); - __ Daddu(address, address, scratch_value); + __ SmiScale(scratch, key, kPointerSizeLog2); + __ Daddu(address, address, scratch); __ sd(value, MemOperand(address)); // Update write barrier for the elements array address. - __ mov(scratch_value, value); // Preserve the value which is returned. - __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved, + __ mov(scratch, value); // Preserve the value which is returned. + __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ Ret(); @@ -554,34 +552,31 @@ static void KeyedStoreGenerateMegamorphicHelper( kHeapObjectTag)); __ SmiScale(at, key, kPointerSizeLog2); __ daddu(address, address, at); - __ lw(scratch_value, MemOperand(address)); - __ Branch(&fast_double_without_map_check, ne, scratch_value, + __ lw(scratch, MemOperand(address)); + __ Branch(&fast_double_without_map_check, ne, scratch, Operand(static_cast<int32_t>(kHoleNanUpper32))); - __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, - slow); + __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(value, key, - elements, // Overwritten. - a3, // Scratch regs... - a4, &transition_double_elements); + __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2, + &transition_double_elements); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); - __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ Daddu(scratch, key, Operand(Smi::FromInt(1))); + __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); } __ Ret(); __ bind(&transition_smi_elements); // Transition the array appropriately depending on the value type. - __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset)); + __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - __ Branch(&non_double_value, ne, a4, Operand(at)); + __ Branch(&non_double_value, ne, scratch, Operand(at)); // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. __ LoadTransitionedArrayMapConditional( - FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, a4, slow); + FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow); AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, @@ -592,7 +587,7 @@ static void KeyedStoreGenerateMegamorphicHelper( __ bind(&non_double_value); // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, - receiver_map, a4, slow); + receiver_map, scratch, slow); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateMapChangeElementsTransition( masm, receiver, key, value, receiver_map, mode, slow); @@ -604,7 +599,7 @@ static void KeyedStoreGenerateMegamorphicHelper( // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, - receiver_map, a4, slow); + receiver_map, scratch, slow); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateDoubleToObject( masm, receiver, key, value, receiver_map, mode, slow); @@ -673,20 +668,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, __ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); __ JumpIfNotUniqueNameInstanceType(a4, &slow); - if (FLAG_vector_stores) { - // The handlers in the stub cache expect a vector and slot. Since we won't - // change the IC from any downstream misses, a dummy vector can be used. - Register vector = VectorStoreICDescriptor::VectorRegister(); - Register slot = VectorStoreICDescriptor::SlotRegister(); - - DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0)); - Handle<TypeFeedbackVector> dummy_vector = - TypeFeedbackVector::DummyVector(masm->isolate()); - int slot_index = dummy_vector->GetIndex( - FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); - __ LoadRoot(vector, Heap::kDummyVectorRootIndex); - __ li(slot, Operand(Smi::FromInt(slot_index))); - } + // The handlers in the stub cache expect a vector and slot. Since we won't + // change the IC from any downstream misses, a dummy vector can be used. + Register vector = VectorStoreICDescriptor::VectorRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + + DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0)); + Handle<TypeFeedbackVector> dummy_vector = + TypeFeedbackVector::DummyVector(masm->isolate()); + int slot_index = dummy_vector->GetIndex( + FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); + __ LoadRoot(vector, Heap::kDummyVectorRootIndex); + __ li(slot, Operand(Smi::FromInt(slot_index))); Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); @@ -740,23 +733,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, static void StoreIC_PushArgs(MacroAssembler* masm) { - if (FLAG_vector_stores) { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), - VectorStoreICDescriptor::SlotRegister(), - VectorStoreICDescriptor::VectorRegister()); - } else { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); - } + __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), + StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); } void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss); } @@ -782,8 +769,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Perform tail call to the entry. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kStoreIC_Miss); } @@ -846,7 +832,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) { } -void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { +void PatchInlinedSmiCode(Isolate* isolate, Address address, + InlinedSmiCheck check) { Address andi_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -876,8 +863,6 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { Address patch_address = andi_instruction_address - delta * Instruction::kInstrSize; Instr instr_at_patch = Assembler::instr_at(patch_address); - Instr branch_instr = - Assembler::instr_at(patch_address + Instruction::kInstrSize); // This is patching a conditional "jump if not smi/jump if smi" site. // Enabling by changing from // andi at, rx, 0 @@ -886,7 +871,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // andi at, rx, #kSmiTagMask // Branch <target>, ne, at, Operand(zero_reg) // and vice-versa to be disabled again. - CodePatcher patcher(patch_address, 2); + CodePatcher patcher(isolate, patch_address, 2); Register reg = Register::from_code(Assembler::GetRs(instr_at_patch)); if (check == ENABLE_INLINED_SMI_CHECK) { DCHECK(Assembler::IsAndImmediate(instr_at_patch)); @@ -897,13 +882,44 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { DCHECK(Assembler::IsAndImmediate(instr_at_patch)); patcher.masm()->andi(at, reg, 0); } + Instr branch_instr = + Assembler::instr_at(patch_address + Instruction::kInstrSize); DCHECK(Assembler::IsBranch(branch_instr)); - if (Assembler::IsBeq(branch_instr)) { - patcher.ChangeBranchCondition(ne); - } else { - DCHECK(Assembler::IsBne(branch_instr)); - patcher.ChangeBranchCondition(eq); + + uint32_t opcode = Assembler::GetOpcodeField(branch_instr); + // Currently only the 'eq' and 'ne' cond values are supported and the simple + // branch instructions and their r6 variants (with opcode being the branch + // type). There are some special cases (see Assembler::IsBranch()) so + // extending this would be tricky. + DCHECK(opcode == BEQ || // BEQ + opcode == BNE || // BNE + opcode == POP10 || // BEQC + opcode == POP30 || // BNEC + opcode == POP66 || // BEQZC + opcode == POP76); // BNEZC + switch (opcode) { + case BEQ: + opcode = BNE; // change BEQ to BNE. + break; + case POP10: + opcode = POP30; // change BEQC to BNEC. + break; + case POP66: + opcode = POP76; // change BEQZC to BNEZC. + break; + case BNE: + opcode = BEQ; // change BNE to BEQ. + break; + case POP30: + opcode = POP10; // change BNEC to BEQC. + break; + case POP76: + opcode = POP66; // change BNEZC to BEQZC. + break; + default: + UNIMPLEMENTED(); } + patcher.ChangeBranchCondition(branch_instr, opcode); } } // namespace internal } // namespace v8 diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc index fcbbc66121..b1e06e16e1 100644 --- a/deps/v8/src/ic/ppc/access-compiler-ppc.cc +++ b/deps/v8/src/ic/ppc/access-compiler-ppc.cc @@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(FLAG_vector_stores || r6.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, r6, r7, r8}; return registers; } diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc index 0335362fbb..8b48755bbf 100644 --- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc +++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc @@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( ParameterCount actual(0); ParameterCount expected(expected_arguments); __ LoadAccessor(r4, holder, accessor_index, ACCESSOR_GETTER); - __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( ParameterCount actual(1); ParameterCount expected(expected_arguments); __ LoadAccessor(r4, holder, accessor_index, ACCESSOR_SETTER); - __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -141,7 +143,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( // Check that receiver is a JSObject. __ lbz(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ cmpi(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ cmpi(scratch0, Operand(FIRST_JS_RECEIVER_TYPE)); __ blt(miss_label); // Load properties array. @@ -167,11 +169,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register result, Label* miss) { - const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); - __ LoadP(result, MemOperand(cp, offset)); - __ LoadP(result, - FieldMemOperand(result, JSGlobalObject::kNativeContextOffset)); - __ LoadP(result, MemOperand(result, Context::SlotOffset(index))); + __ LoadNativeContextSlot(index, result); // Load its initial map. The global functions all have initial maps. __ LoadP(result, FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset)); @@ -222,8 +220,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, static void CompileCallLoadPropertyWithInterceptor( MacroAssembler* masm, Register receiver, Register holder, Register name, Handle<JSObject> holder_obj, Runtime::FunctionId id) { + DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength == + Runtime::FunctionForId(id)->nargs); PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength); + __ CallRuntime(id); } @@ -293,6 +293,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( __ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset)); } + if (api_call_info->fast_handler()->IsCode()) { + // Just tail call into the fast handler if present. + __ Jump(handle(Code::cast(api_call_info->fast_handler())), + RelocInfo::CODE_TARGET); + return; + } + // Put api_function_address in place. Address function_address = v8::ToCData<Address>(api_call_info->callback()); ApiFunction fun(function_address); @@ -307,15 +314,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( static void StoreIC_PushArgs(MacroAssembler* masm) { - if (FLAG_vector_stores) { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), - VectorStoreICDescriptor::SlotRegister(), - VectorStoreICDescriptor::VectorRegister()); - } else { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); - } + __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), + StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); } @@ -324,7 +326,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow); } @@ -333,8 +335,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, - 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow); } @@ -705,8 +706,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) { PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(), holder()); - __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor, - NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); + __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor); } @@ -730,7 +730,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( __ Push(ip, value()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1); + __ TailCallRuntime(Runtime::kStoreCallbackProperty); // Return the generated code. return GetCode(kind(), Code::FAST, name); @@ -742,7 +742,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( __ Push(receiver(), this->name(), value()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1); + __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor); // Return the generated code. return GetCode(kind(), Code::FAST, name); diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc index 578b73d40e..c6b36f29f4 100644 --- a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc +++ b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc @@ -20,112 +20,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty( StoreDescriptor::ValueRegister(), r0); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 4, 1); -} - - -#undef __ -#define __ ACCESS_MASM(masm()) - - -Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { - Label miss; - - if (check == PROPERTY && - (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - // In case we are compiling an IC for dictionary loads or stores, just - // check whether the name is unique. - if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - // Keyed loads with dictionaries shouldn't be here, they go generic. - // The DCHECK is to protect assumptions when --vector-ics is on. - DCHECK(kind() != Code::KEYED_LOAD_IC); - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ LoadP(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset)); - __ lbz(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); - } else { - __ Cmpi(this->name(), Operand(name), r0); - __ bne(&miss); - } - } - - Label number_case; - Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss; - __ JumpIfSmi(receiver(), smi_target); - - // Polymorphic keyed stores may use the map register - Register map_reg = scratch1(); - DCHECK(kind() != Code::KEYED_STORE_IC || - map_reg.is(StoreTransitionDescriptor::MapRegister())); - - int receiver_count = maps->length(); - int number_of_handled_maps = 0; - __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map = maps->at(current); - if (!map->is_deprecated()) { - number_of_handled_maps++; - Handle<WeakCell> cell = Map::WeakCellForMap(map); - __ CmpWeakValue(map_reg, cell, scratch2()); - Label next; - __ bne(&next); - if (map->instance_type() == HEAP_NUMBER_TYPE) { - DCHECK(!number_case.is_unused()); - __ bind(&number_case); - } - __ Jump(handlers->at(current), RelocInfo::CODE_TARGET); - __ bind(&next); - } - } - DCHECK(number_of_handled_maps != 0); - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - InlineCacheState state = - number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetCode(kind(), type, name, state); -} - - -Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( - MapHandleList* receiver_maps, CodeHandleList* handler_stubs, - MapHandleList* transitioned_maps) { - Label miss; - __ JumpIfSmi(receiver(), &miss); - - int receiver_count = receiver_maps->length(); - Register map_reg = scratch1(); - __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); - for (int i = 0; i < receiver_count; ++i) { - Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i)); - __ CmpWeakValue(map_reg, cell, scratch2()); - if (transitioned_maps->at(i).is_null()) { - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq); - } else { - Label next_map; - __ bne(&next_map); - Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i)); - Register transition_map = scratch1(); - DCHECK(!FLAG_vector_stores && - transition_map.is(StoreTransitionDescriptor::MapRegister())); - __ LoadWeakValue(transition_map, cell, &miss); - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al); - __ bind(&next_map); - } - } - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); + __ TailCallRuntime(Runtime::kSetProperty); } diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc index ea8239a3e2..78daac2657 100644 --- a/deps/v8/src/ic/ppc/ic-ppc.cc +++ b/deps/v8/src/ic/ppc/ic-ppc.cc @@ -319,8 +319,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kLoadIC_Miss); } @@ -333,8 +332,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong - : Runtime::kGetProperty, - 2, 1); + : Runtime::kGetProperty); } @@ -349,8 +347,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss); } @@ -362,8 +359,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong - : Runtime::kKeyedGetProperty, - 2, 1); + : Runtime::kKeyedGetProperty); } @@ -472,23 +468,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm, static void StoreIC_PushArgs(MacroAssembler* masm) { - if (FLAG_vector_stores) { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), - VectorStoreICDescriptor::SlotRegister(), - VectorStoreICDescriptor::VectorRegister()); - } else { - __ Push(StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); - } + __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), + StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); } void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss); } @@ -503,13 +493,15 @@ static void KeyedStoreGenerateMegamorphicHelper( // Fast case: Do the store, could be either Object or double. __ bind(fast_object); - Register scratch_value = r7; + Register scratch = r7; Register address = r8; + DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements, + scratch, address)); + if (check_map == kCheckMap) { __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ mov(scratch_value, - Operand(masm->isolate()->factory()->fixed_array_map())); - __ cmp(elements_map, scratch_value); + __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map())); + __ cmp(elements_map, scratch); __ bne(fast_double); } @@ -518,13 +510,11 @@ static void KeyedStoreGenerateMegamorphicHelper( // there may be a callback on the element Label holecheck_passed1; __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ SmiToPtrArrayOffset(scratch_value, key); - __ LoadPX(scratch_value, MemOperand(address, scratch_value)); - __ Cmpi(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()), - r0); + __ SmiToPtrArrayOffset(scratch, key); + __ LoadPX(scratch, MemOperand(address, scratch)); + __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0); __ bne(&holecheck_passed1); - __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, - slow); + __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); __ bind(&holecheck_passed1); @@ -534,35 +524,32 @@ static void KeyedStoreGenerateMegamorphicHelper( if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); - __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), - r0); + __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); + __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0); } // It's irrelevant whether array is smi-only or not when writing a smi. __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ SmiToPtrArrayOffset(scratch_value, key); - __ StorePX(value, MemOperand(address, scratch_value)); + __ SmiToPtrArrayOffset(scratch, key); + __ StorePX(value, MemOperand(address, scratch)); __ Ret(); __ bind(&non_smi_value); // Escape to elements kind transition case. - __ CheckFastObjectElements(receiver_map, scratch_value, - &transition_smi_elements); + __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements); // Fast elements array, store the value to the elements backing store. __ bind(&finish_object_store); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); - __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), - r0); + __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); + __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0); } __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ SmiToPtrArrayOffset(scratch_value, key); - __ StorePUX(value, MemOperand(address, scratch_value)); + __ SmiToPtrArrayOffset(scratch, key); + __ StorePUX(value, MemOperand(address, scratch)); // Update write barrier for the elements array address. - __ mr(scratch_value, value); // Preserve the value which is returned. - __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved, + __ mr(scratch, value); // Preserve the value which is returned. + __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ Ret(); @@ -580,34 +567,32 @@ static void KeyedStoreGenerateMegamorphicHelper( __ addi(address, elements, Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset - kHeapObjectTag))); - __ SmiToDoubleArrayOffset(scratch_value, key); - __ lwzx(scratch_value, MemOperand(address, scratch_value)); - __ Cmpi(scratch_value, Operand(kHoleNanUpper32), r0); + __ SmiToDoubleArrayOffset(scratch, key); + __ lwzx(scratch, MemOperand(address, scratch)); + __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); __ bne(&fast_double_without_map_check); - __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, - slow); + __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(value, key, elements, r6, d0, + __ StoreNumberToDoubleElements(value, key, elements, scratch, d0, &transition_double_elements); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. - __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); - __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), - r0); + __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); + __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0); } __ Ret(); __ bind(&transition_smi_elements); // Transition the array appropriately depending on the value type. - __ LoadP(r7, FieldMemOperand(value, HeapObject::kMapOffset)); - __ CompareRoot(r7, Heap::kHeapNumberMapRootIndex); + __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); + __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); __ bne(&non_double_value); // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. __ LoadTransitionedArrayMapConditional( - FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r7, slow); + FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow); AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, @@ -618,7 +603,7 @@ static void KeyedStoreGenerateMegamorphicHelper( __ bind(&non_double_value); // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, - receiver_map, r7, slow); + receiver_map, scratch, slow); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateMapChangeElementsTransition( masm, receiver, key, value, receiver_map, mode, slow); @@ -630,7 +615,7 @@ static void KeyedStoreGenerateMegamorphicHelper( // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, - receiver_map, r7, slow); + receiver_map, scratch, slow); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateDoubleToObject( masm, receiver, key, value, receiver_map, mode, slow); @@ -704,19 +689,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset)); __ JumpIfNotUniqueNameInstanceType(r7, &slow); - if (FLAG_vector_stores) { - // The handlers in the stub cache expect a vector and slot. Since we won't - // change the IC from any downstream misses, a dummy vector can be used. - Register vector = VectorStoreICDescriptor::VectorRegister(); - Register slot = VectorStoreICDescriptor::SlotRegister(); - DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11)); - Handle<TypeFeedbackVector> dummy_vector = - TypeFeedbackVector::DummyVector(masm->isolate()); - int slot_index = dummy_vector->GetIndex( - FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); - __ LoadRoot(vector, Heap::kDummyVectorRootIndex); - __ LoadSmiLiteral(slot, Smi::FromInt(slot_index)); - } + // The handlers in the stub cache expect a vector and slot. Since we won't + // change the IC from any downstream misses, a dummy vector can be used. + Register vector = VectorStoreICDescriptor::VectorRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11)); + Handle<TypeFeedbackVector> dummy_vector = + TypeFeedbackVector::DummyVector(masm->isolate()); + int slot_index = dummy_vector->GetIndex( + FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); + __ LoadRoot(vector, Heap::kDummyVectorRootIndex); + __ LoadSmiLiteral(slot, Smi::FromInt(slot_index)); Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); @@ -794,8 +777,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Perform tail call to the entry. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kStoreIC_Miss); } @@ -862,7 +844,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) { // // This code is paired with the JumpPatchSite class in full-codegen-ppc.cc // -void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { +void PatchInlinedSmiCode(Isolate* isolate, Address address, + InlinedSmiCheck check) { Address cmp_instruction_address = Assembler::return_address_from_call_start(address); @@ -900,7 +883,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // rlwinm(r0, value, 0, 31, 31, SetRC); // bc(label, BT/BF, 2) // and vice-versa to be disabled again. - CodePatcher patcher(patch_address, 2); + CodePatcher patcher(isolate, patch_address, 2); Register reg = Assembler::GetRA(instr_at_patch); if (check == ENABLE_INLINED_SMI_CHECK) { DCHECK(Assembler::IsCmpRegister(instr_at_patch)); diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc index 85b44ef475..b8d50b3d2c 100644 --- a/deps/v8/src/ic/x64/access-compiler-x64.cc +++ b/deps/v8/src/ic/x64/access-compiler-x64.cc @@ -31,8 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(FLAG_vector_stores || - rbx.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, rbx, rdi, r8}; return registers; } diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc index 6bc3aafa89..c09eca68dd 100644 --- a/deps/v8/src/ic/x64/handler-compiler-x64.cc +++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc @@ -56,7 +56,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( __ j(not_zero, miss_label); // Check that receiver is a JSObject. - __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE); + __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE); __ j(below, miss_label); // Load properties array. @@ -78,10 +78,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register result, Label* miss) { - const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); - __ movp(result, Operand(rsi, offset)); - __ movp(result, FieldOperand(result, JSGlobalObject::kNativeContextOffset)); - __ movp(result, Operand(result, Context::SlotOffset(index))); + __ LoadNativeContextSlot(index, result); // Load its initial map. The global functions all have initial maps. __ movp(result, FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset)); @@ -115,8 +112,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, static void CompileCallLoadPropertyWithInterceptor( MacroAssembler* masm, Register receiver, Register holder, Register name, Handle<JSObject> holder_obj, Runtime::FunctionId id) { + DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength == + Runtime::FunctionForId(id)->nargs); PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength); + __ CallRuntime(id); } @@ -188,6 +187,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( __ movp(data, FieldOperand(data, CallHandlerInfo::kDataOffset)); } + if (api_call_info->fast_handler()->IsCode()) { + // Just tail call into the fast handler if present. + __ Jump(handle(Code::cast(api_call_info->fast_handler())), + RelocInfo::CODE_TARGET); + return; + } + // Put api_function_address in place. Address function_address = v8::ToCData<Address>(api_call_info->callback()); __ Move(api_function_address, function_address, @@ -241,8 +247,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( ParameterCount actual(1); ParameterCount expected(expected_arguments); __ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_SETTER); - __ InvokeFunction(rdi, expected, actual, CALL_FUNCTION, - NullCallWrapper()); + __ InvokeFunction(rdi, no_reg, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -284,8 +290,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( ParameterCount actual(0); ParameterCount expected(expected_arguments); __ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_GETTER); - __ InvokeFunction(rdi, expected, actual, CALL_FUNCTION, - NullCallWrapper()); + __ InvokeFunction(rdi, no_reg, expected, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -304,26 +310,16 @@ static void StoreIC_PushArgs(MacroAssembler* masm) { Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); - if (FLAG_vector_stores) { - Register slot = VectorStoreICDescriptor::SlotRegister(); - Register vector = VectorStoreICDescriptor::VectorRegister(); - - __ PopReturnAddressTo(r11); - __ Push(receiver); - __ Push(name); - __ Push(value); - __ Push(slot); - __ Push(vector); - __ PushReturnAddressFrom(r11); - } else { - DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value)); + Register slot = VectorStoreICDescriptor::SlotRegister(); + Register vector = VectorStoreICDescriptor::VectorRegister(); - __ PopReturnAddressTo(rbx); - __ Push(receiver); - __ Push(name); - __ Push(value); - __ PushReturnAddressFrom(rbx); - } + __ PopReturnAddressTo(r11); + __ Push(receiver); + __ Push(name); + __ Push(value); + __ Push(slot); + __ Push(vector); + __ PushReturnAddressFrom(r11); } @@ -332,7 +328,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow); } @@ -341,8 +337,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, - 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow); } @@ -722,8 +717,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) { holder()); __ PushReturnAddressFrom(scratch2()); - __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor, - NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); + __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor); } @@ -748,7 +742,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( __ PushReturnAddressFrom(scratch1()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1); + __ TailCallRuntime(Runtime::kStoreCallbackProperty); // Return the generated code. return GetCode(kind(), Code::FAST, name); @@ -764,7 +758,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( __ PushReturnAddressFrom(scratch1()); // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1); + __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor); // Return the generated code. return GetCode(kind(), Code::FAST, name); diff --git a/deps/v8/src/ic/x64/ic-compiler-x64.cc b/deps/v8/src/ic/x64/ic-compiler-x64.cc index fd92cca570..9d734338bb 100644 --- a/deps/v8/src/ic/x64/ic-compiler-x64.cc +++ b/deps/v8/src/ic/x64/ic-compiler-x64.cc @@ -28,111 +28,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty( __ PushReturnAddressFrom(rbx); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 4, 1); -} - - -#undef __ -#define __ ACCESS_MASM(masm()) - - -Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( - MapHandleList* receiver_maps, CodeHandleList* handler_stubs, - MapHandleList* transitioned_maps) { - Label miss; - __ JumpIfSmi(receiver(), &miss); - - Register map_reg = scratch1(); - __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); - int receiver_count = receiver_maps->length(); - for (int i = 0; i < receiver_count; ++i) { - Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i)); - // Check map and tail call if there's a match - __ CmpWeakValue(map_reg, cell, scratch2()); - if (transitioned_maps->at(i).is_null()) { - __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET); - } else { - Label next_map; - __ j(not_equal, &next_map, Label::kNear); - Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i)); - Register transition_map = scratch1(); - DCHECK(!FLAG_vector_stores && - transition_map.is(StoreTransitionDescriptor::MapRegister())); - __ LoadWeakValue(transition_map, cell, &miss); - __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET); - __ bind(&next_map); - } - } - - __ bind(&miss); - - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); -} - - -Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { - Label miss; - - if (check == PROPERTY && - (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { - // In case we are compiling an IC for dictionary loads or stores, just - // check whether the name is unique. - if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - // Keyed loads with dictionaries shouldn't be here, they go generic. - // The DCHECK is to protect assumptions when --vector-ics is on. - DCHECK(kind() != Code::KEYED_LOAD_IC); - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset)); - __ movzxbp(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); - } else { - __ Cmp(this->name(), name); - __ j(not_equal, &miss); - } - } - - Label number_case; - Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss; - __ JumpIfSmi(receiver(), smi_target); - - // Polymorphic keyed stores may use the map register - Register map_reg = scratch1(); - DCHECK(kind() != Code::KEYED_STORE_IC || - map_reg.is(StoreTransitionDescriptor::MapRegister())); - __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); - int receiver_count = maps->length(); - int number_of_handled_maps = 0; - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map = maps->at(current); - if (!map->is_deprecated()) { - number_of_handled_maps++; - Handle<WeakCell> cell = Map::WeakCellForMap(map); - // Check map and tail call if there's a match - __ CmpWeakValue(map_reg, cell, scratch2()); - if (map->instance_type() == HEAP_NUMBER_TYPE) { - DCHECK(!number_case.is_unused()); - __ bind(&number_case); - } - __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET); - } - } - DCHECK(number_of_handled_maps > 0); - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - InlineCacheState state = - number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetCode(kind(), type, name, state); + __ TailCallRuntime(Runtime::kSetProperty); } diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc index 3fc8747c66..bf4ad96f69 100644 --- a/deps/v8/src/ic/x64/ic-x64.cc +++ b/deps/v8/src/ic/x64/ic-x64.cc @@ -564,18 +564,16 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, __ movzxbp(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); __ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index); - if (FLAG_vector_stores) { - Register vector = VectorStoreICDescriptor::VectorRegister(); - Register slot = VectorStoreICDescriptor::SlotRegister(); - // The handlers in the stub cache expect a vector and slot. Since we won't - // change the IC from any downstream misses, a dummy vector can be used. - Handle<TypeFeedbackVector> dummy_vector = - TypeFeedbackVector::DummyVector(masm->isolate()); - int slot_index = dummy_vector->GetIndex( - FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); - __ Move(vector, dummy_vector); - __ Move(slot, Smi::FromInt(slot_index)); - } + Register vector = VectorStoreICDescriptor::VectorRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + // The handlers in the stub cache expect a vector and slot. Since we won't + // change the IC from any downstream misses, a dummy vector can be used. + Handle<TypeFeedbackVector> dummy_vector = + TypeFeedbackVector::DummyVector(masm->isolate()); + int slot_index = dummy_vector->GetIndex( + FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); + __ Move(vector, dummy_vector); + __ Move(slot, Smi::FromInt(slot_index)); Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); @@ -674,8 +672,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kLoadIC_Miss); } @@ -694,8 +691,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong - : Runtime::kGetProperty, - 2, 1); + : Runtime::kGetProperty); } @@ -707,8 +703,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss); } @@ -727,28 +722,13 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong - : Runtime::kKeyedGetProperty, - 2, 1); + : Runtime::kKeyedGetProperty); } void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { - if (FLAG_vector_stores) { - // This shouldn't be called. - __ int3(); - return; - } - - // The return address is on the stack. - // Get the receiver from the stack and probe the stub cache. - Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( - Code::ComputeHandlerFlags(Code::STORE_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), rbx, no_reg); - - // Cache miss: Jump to runtime. - GenerateMiss(masm); + // This shouldn't be called. + __ int3(); } @@ -763,13 +743,11 @@ static void StoreIC_PushArgs(MacroAssembler* masm) { __ Push(receiver); __ Push(name); __ Push(value); - if (FLAG_vector_stores) { - Register slot = VectorStoreICDescriptor::SlotRegister(); - Register vector = VectorStoreICDescriptor::VectorRegister(); - DCHECK(!temp.is(slot) && !temp.is(vector)); - __ Push(slot); - __ Push(vector); - } + Register slot = VectorStoreICDescriptor::SlotRegister(); + Register vector = VectorStoreICDescriptor::VectorRegister(); + DCHECK(!temp.is(slot) && !temp.is(vector)); + __ Push(slot); + __ Push(vector); __ PushReturnAddressFrom(temp); } @@ -779,8 +757,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Perform tail call to the entry. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kStoreIC_Miss); } @@ -789,8 +766,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); Register dictionary = r11; - DCHECK(!FLAG_vector_stores || - !AreAliased(dictionary, VectorStoreICDescriptor::VectorRegister(), + DCHECK(!AreAliased(dictionary, VectorStoreICDescriptor::VectorRegister(), VectorStoreICDescriptor::SlotRegister())); Label miss; @@ -812,8 +788,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss); } @@ -851,7 +826,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) { } -void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { +void PatchInlinedSmiCode(Isolate* isolate, Address address, + InlinedSmiCheck check) { // The address of the instruction following the call. Address test_instruction_address = address + Assembler::kCallTargetAddressOffset; diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc index a80c649e45..2c1b942756 100644 --- a/deps/v8/src/ic/x87/access-compiler-x87.cc +++ b/deps/v8/src/ic/x87/access-compiler-x87.cc @@ -30,8 +30,6 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(FLAG_vector_stores || - ebx.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, ebx, edi, no_reg}; return registers; } diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc index bb3b25a47f..cc43ed298d 100644 --- a/deps/v8/src/ic/x87/handler-compiler-x87.cc +++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc @@ -36,7 +36,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( ParameterCount expected(expected_arguments); __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER); __ InvokeFunction(edi, expected, actual, CALL_FUNCTION, - NullCallWrapper()); + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -92,7 +92,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( __ j(not_zero, miss_label); // Check that receiver is a JSObject. - __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE); + __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE); __ j(below, miss_label); // Load properties array. @@ -114,10 +114,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register result, Label* miss) { - const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); - __ mov(result, Operand(esi, offset)); - __ mov(result, FieldOperand(result, JSGlobalObject::kNativeContextOffset)); - __ mov(result, Operand(result, Context::SlotOffset(index))); + __ LoadGlobalFunction(index, result); // Load its initial map. The global functions all have initial maps. __ mov(result, FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset)); @@ -206,6 +203,12 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( __ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset)); } + if (api_call_info->fast_handler()->IsCode()) { + // Just tail call into the code. + __ Jump(handle(Code::cast(api_call_info->fast_handler())), + RelocInfo::CODE_TARGET); + return; + } // Put api_function_address in place. Address function_address = v8::ToCData<Address>(api_call_info->callback()); __ mov(api_function_address, Immediate(function_address)); @@ -261,7 +264,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( ParameterCount expected(expected_arguments); __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER); __ InvokeFunction(edi, expected, actual, CALL_FUNCTION, - NullCallWrapper()); + CheckDebugStepCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -294,8 +297,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, static void CompileCallLoadPropertyWithInterceptor( MacroAssembler* masm, Register receiver, Register holder, Register name, Handle<JSObject> holder_obj, Runtime::FunctionId id) { + DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength == + Runtime::FunctionForId(id)->nargs); PushInterceptorArguments(masm, receiver, holder, name, holder_obj); - __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength); + __ CallRuntime(id); } @@ -303,25 +308,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + Register vector = VectorStoreICDescriptor::VectorRegister(); - if (FLAG_vector_stores) { - Register slot = VectorStoreICDescriptor::SlotRegister(); - Register vector = VectorStoreICDescriptor::VectorRegister(); - - __ xchg(receiver, Operand(esp, 0)); - __ push(name); - __ push(value); - __ push(slot); - __ push(vector); - __ push(receiver); // which contains the return address. - } else { - DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value)); - __ pop(ebx); - __ push(receiver); - __ push(name); - __ push(value); - __ push(ebx); - } + __ xchg(receiver, Operand(esp, 0)); + __ push(name); + __ push(value); + __ push(slot); + __ push(vector); + __ push(receiver); // which contains the return address. } @@ -330,7 +325,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow); } @@ -339,8 +334,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, - 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow); } @@ -732,8 +726,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) { holder()); __ push(scratch2()); // restore old return address - __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor, - NamedLoadHandlerCompiler::kInterceptorArgsLength, 1); + __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor); } @@ -758,7 +751,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( __ push(scratch1()); // restore return address // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1); + __ TailCallRuntime(Runtime::kStoreCallbackProperty); // Return the generated code. return GetCode(kind(), Code::FAST, name); @@ -774,7 +767,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor( __ push(scratch1()); // restore return address // Do tail-call to the runtime system. - __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1); + __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor); // Return the generated code. return GetCode(kind(), Code::FAST, name); diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc index d29e32108b..9edf63b722 100644 --- a/deps/v8/src/ic/x87/ic-compiler-x87.cc +++ b/deps/v8/src/ic/x87/ic-compiler-x87.cc @@ -27,104 +27,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty( __ push(ebx); // return address // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kSetProperty, 4, 1); -} - - -#undef __ -#define __ ACCESS_MASM(masm()) - -Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps, - CodeHandleList* handlers, - Handle<Name> name, - Code::StubType type, - IcCheckType check) { - Label miss; - - if (check == PROPERTY && - (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) { - // In case we are compiling an IC for dictionary loads or stores, just - // check whether the name is unique. - if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { - // Keyed loads with dictionaries shouldn't be here, they go generic. - // The DCHECK is to protect assumptions when --vector-ics is on. - DCHECK(kind() != Code::KEYED_LOAD_IC); - Register tmp = scratch1(); - __ JumpIfSmi(this->name(), &miss); - __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset)); - __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset)); - __ JumpIfNotUniqueNameInstanceType(tmp, &miss); - } else { - __ cmp(this->name(), Immediate(name)); - __ j(not_equal, &miss); - } - } - - Label number_case; - Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss; - __ JumpIfSmi(receiver(), smi_target); - - // Polymorphic keyed stores may use the map register - Register map_reg = scratch1(); - DCHECK(kind() != Code::KEYED_STORE_IC || - map_reg.is(StoreTransitionDescriptor::MapRegister())); - __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); - int receiver_count = maps->length(); - int number_of_handled_maps = 0; - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map = maps->at(current); - if (!map->is_deprecated()) { - number_of_handled_maps++; - Handle<WeakCell> cell = Map::WeakCellForMap(map); - __ CmpWeakValue(map_reg, cell, scratch2()); - if (map->instance_type() == HEAP_NUMBER_TYPE) { - DCHECK(!number_case.is_unused()); - __ bind(&number_case); - } - __ j(equal, handlers->at(current)); - } - } - DCHECK(number_of_handled_maps != 0); - - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - InlineCacheState state = - number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; - return GetCode(kind(), type, name, state); -} - - -Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( - MapHandleList* receiver_maps, CodeHandleList* handler_stubs, - MapHandleList* transitioned_maps) { - Label miss; - __ JumpIfSmi(receiver(), &miss, Label::kNear); - Register map_reg = scratch1(); - __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); - for (int i = 0; i < receiver_maps->length(); ++i) { - Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i)); - __ CmpWeakValue(map_reg, cell, scratch2()); - if (transitioned_maps->at(i).is_null()) { - __ j(equal, handler_stubs->at(i)); - } else { - Label next_map; - __ j(not_equal, &next_map, Label::kNear); - Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i)); - Register transition_map = scratch1(); - DCHECK(!FLAG_vector_stores && - transition_map.is(StoreTransitionDescriptor::MapRegister())); - __ LoadWeakValue(transition_map, cell, &miss); - __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET); - __ bind(&next_map); - } - } - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - - // Return the generated code. - return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); + __ TailCallRuntime(Runtime::kSetProperty); } diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc index 6ef5b635c7..d4cc3ce80a 100644 --- a/deps/v8/src/ic/x87/ic-x87.cc +++ b/deps/v8/src/ic/x87/ic-x87.cc @@ -561,26 +561,22 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, __ JumpIfNotUniqueNameInstanceType(ebx, &slow); - if (FLAG_vector_stores) { - // The handlers in the stub cache expect a vector and slot. Since we won't - // change the IC from any downstream misses, a dummy vector can be used. - Handle<TypeFeedbackVector> dummy_vector = - TypeFeedbackVector::DummyVector(masm->isolate()); - int slot = dummy_vector->GetIndex( - FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); - __ push(Immediate(Smi::FromInt(slot))); - __ push(Immediate(dummy_vector)); - } + // The handlers in the stub cache expect a vector and slot. Since we won't + // change the IC from any downstream misses, a dummy vector can be used. + Handle<TypeFeedbackVector> dummy_vector = + TypeFeedbackVector::DummyVector(masm->isolate()); + int slot = dummy_vector->GetIndex( + FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); + __ push(Immediate(Smi::FromInt(slot))); + __ push(Immediate(dummy_vector)); Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, receiver, key, edi, no_reg); - if (FLAG_vector_stores) { - __ pop(VectorStoreICDescriptor::VectorRegister()); - __ pop(VectorStoreICDescriptor::SlotRegister()); - } + __ pop(VectorStoreICDescriptor::VectorRegister()); + __ pop(VectorStoreICDescriptor::SlotRegister()); // Cache miss. __ jmp(&miss); @@ -676,8 +672,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kLoadIC_Miss); } @@ -695,8 +690,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong - : Runtime::kGetProperty, - 2, 1); + : Runtime::kGetProperty); } @@ -707,8 +701,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { LoadIC_PushArgs(masm); // Perform tail call to the entry. - int arg_count = 4; - __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1); + __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss); } @@ -726,27 +719,15 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, // Do tail-call to runtime routine. __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong - : Runtime::kKeyedGetProperty, - 2, 1); + : Runtime::kKeyedGetProperty); } void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { - if (FLAG_vector_stores) { - // This shouldn't be called. - __ int3(); - return; - } - - // Return address is on the stack. - Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( - Code::ComputeHandlerFlags(Code::STORE_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(), - StoreDescriptor::NameRegister(), ebx, no_reg); - - // Cache miss: Jump to runtime. - GenerateMiss(masm); + // This shouldn't be called. + // TODO(mvstanton): remove this method. + __ int3(); + return; } @@ -754,25 +735,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + Register vector = VectorStoreICDescriptor::VectorRegister(); - if (FLAG_vector_stores) { - Register slot = VectorStoreICDescriptor::SlotRegister(); - Register vector = VectorStoreICDescriptor::VectorRegister(); - - __ xchg(receiver, Operand(esp, 0)); - __ push(name); - __ push(value); - __ push(slot); - __ push(vector); - __ push(receiver); // Contains the return address. - } else { - DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value)); - __ pop(ebx); - __ push(receiver); - __ push(name); - __ push(value); - __ push(ebx); - } + __ xchg(receiver, Operand(esp, 0)); + __ push(name); + __ push(value); + __ push(slot); + __ push(vector); + __ push(receiver); // Contains the return address. } @@ -781,8 +752,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Perform tail call to the entry. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kStoreIC_Miss); } @@ -798,25 +768,21 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { // objects. Push and restore receiver but rely on // GenerateDictionaryStore preserving the value and name. __ push(receiver); - if (FLAG_vector_stores) { - __ push(vector); - __ push(slot); - } + __ push(vector); + __ push(slot); Register dictionary = ebx; __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset)); GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value, receiver, edi); - __ Drop(FLAG_vector_stores ? 3 : 1); + __ Drop(3); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->store_normal_hit(), 1); __ ret(0); __ bind(&restore_miss); - if (FLAG_vector_stores) { - __ pop(slot); - __ pop(vector); - } + __ pop(slot); + __ pop(vector); __ pop(receiver); __ IncrementCounter(counters->store_normal_miss(), 1); GenerateMiss(masm); @@ -828,8 +794,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - int args = FLAG_vector_stores ? 5 : 3; - __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss); } @@ -867,7 +832,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) { } -void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { +void PatchInlinedSmiCode(Isolate* isolate, Address address, + InlinedSmiCheck check) { // The address of the instruction following the call. Address test_instruction_address = address + Assembler::kCallTargetAddressOffset; |