diff options
Diffstat (limited to 'deps/v8/src/compiler')
136 files changed, 8816 insertions, 5243 deletions
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc index a369de4885..7a72be8028 100644 --- a/deps/v8/src/compiler/access-builder.cc +++ b/deps/v8/src/compiler/access-builder.cc @@ -32,18 +32,6 @@ FieldAccess AccessBuilder::ForExternalTaggedValue() { } // static -FieldAccess AccessBuilder::ForExternalUint8Value() { - FieldAccess access = {kUntaggedBase, - 0, - MaybeHandle<Name>(), - MaybeHandle<Map>(), - TypeCache::Get()->kUint8, - MachineType::Uint8(), - kNoWriteBarrier}; - return access; -} - -// static FieldAccess AccessBuilder::ForMap() { FieldAccess access = { kTaggedBase, HeapObject::kMapOffset, @@ -94,9 +82,19 @@ FieldAccess AccessBuilder::ForBigIntLeastSignificantDigit64() { // static FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() { FieldAccess access = { + kTaggedBase, JSObject::kPropertiesOrHashOffset, + MaybeHandle<Name>(), MaybeHandle<Map>(), + Type::Any(), MachineType::TypeCompressedTagged(), + kFullWriteBarrier, LoadSensitivity::kCritical}; + return access; +} + +// static +FieldAccess AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer() { + FieldAccess access = { kTaggedBase, JSObject::kPropertiesOrHashOffset, MaybeHandle<Name>(), MaybeHandle<Map>(), - Type::Any(), MachineType::TypeCompressedTagged(), + Type::Any(), MachineType::TypeCompressedTaggedPointer(), kPointerWriteBarrier, LoadSensitivity::kCritical}; return access; } @@ -172,8 +170,8 @@ FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() { FieldAccess access = { kTaggedBase, JSFunction::kPrototypeOrInitialMapOffset, MaybeHandle<Name>(), MaybeHandle<Map>(), - Type::Any(), MachineType::TypeCompressedTagged(), - kFullWriteBarrier}; + Type::Any(), MachineType::TypeCompressedTaggedPointer(), + kPointerWriteBarrier}; return access; } @@ -182,7 +180,7 @@ FieldAccess AccessBuilder::ForJSFunctionContext() { FieldAccess access = { kTaggedBase, JSFunction::kContextOffset, MaybeHandle<Name>(), MaybeHandle<Map>(), - Type::Internal(), MachineType::TypeCompressedTagged(), + Type::Internal(), MachineType::TypeCompressedTaggedPointer(), kPointerWriteBarrier}; return access; } @@ -304,7 +302,7 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectParametersAndRegisters() { FieldAccess access = { kTaggedBase, JSGeneratorObject::kParametersAndRegistersOffset, Handle<Name>(), MaybeHandle<Map>(), - Type::Internal(), MachineType::TypeCompressedTagged(), + Type::Internal(), MachineType::TypeCompressedTaggedPointer(), kPointerWriteBarrier}; return access; } @@ -325,7 +323,7 @@ FieldAccess AccessBuilder::ForJSAsyncFunctionObjectPromise() { kTaggedBase, JSAsyncFunctionObject::kPromiseOffset, Handle<Name>(), MaybeHandle<Map>(), Type::OtherObject(), MachineType::TypeCompressedTaggedPointer(), - kFullWriteBarrier}; + kPointerWriteBarrier}; return access; } @@ -357,29 +355,20 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) { Handle<Name>(), MaybeHandle<Map>(), type_cache->kJSArrayLengthType, - MachineType::TypeCompressedTaggedSigned(), + MachineType::TypeCompressedTagged(), kFullWriteBarrier}; if (IsDoubleElementsKind(elements_kind)) { access.type = type_cache->kFixedDoubleArrayLengthType; + access.machine_type = MachineType::TypeCompressedTaggedSigned(); access.write_barrier_kind = kNoWriteBarrier; } else if (IsFastElementsKind(elements_kind)) { access.type = type_cache->kFixedArrayLengthType; + access.machine_type = MachineType::TypeCompressedTaggedSigned(); access.write_barrier_kind = kNoWriteBarrier; } return access; } - -// static -FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() { - FieldAccess access = { - kTaggedBase, JSArrayBuffer::kBackingStoreOffset, - MaybeHandle<Name>(), MaybeHandle<Map>(), - Type::OtherInternal(), MachineType::Pointer(), - kNoWriteBarrier}; - return access; -} - // static FieldAccess AccessBuilder::ForJSArrayBufferBitField() { FieldAccess access = { @@ -441,7 +430,7 @@ FieldAccess AccessBuilder::ForJSTypedArrayBasePointer() { kTaggedBase, JSTypedArray::kBasePointerOffset, MaybeHandle<Name>(), MaybeHandle<Map>(), Type::OtherInternal(), MachineType::TypeCompressedTagged(), - kPointerWriteBarrier, LoadSensitivity::kCritical}; + kFullWriteBarrier, LoadSensitivity::kCritical}; return access; } @@ -747,20 +736,6 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() { } // static -ElementAccess AccessBuilder::ForExternalOneByteStringCharacter() { - ElementAccess access = {kUntaggedBase, 0, TypeCache::Get()->kUint8, - MachineType::Uint8(), kNoWriteBarrier}; - return access; -} - -// static -ElementAccess AccessBuilder::ForExternalTwoByteStringCharacter() { - ElementAccess access = {kUntaggedBase, 0, TypeCache::Get()->kUint16, - MachineType::Uint16(), kNoWriteBarrier}; - return access; -} - -// static ElementAccess AccessBuilder::ForSeqOneByteStringCharacter() { ElementAccess access = {kTaggedBase, SeqOneByteString::kHeaderSize, TypeCache::Get()->kUint8, MachineType::Uint8(), @@ -777,26 +752,6 @@ ElementAccess AccessBuilder::ForSeqTwoByteStringCharacter() { } // static -FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() { - FieldAccess access = { - kTaggedBase, JSGlobalObject::kGlobalProxyOffset, - Handle<Name>(), MaybeHandle<Map>(), - Type::Receiver(), MachineType::TypeCompressedTaggedPointer(), - kPointerWriteBarrier}; - return access; -} - -// static -FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() { - FieldAccess access = { - kTaggedBase, JSGlobalObject::kNativeContextOffset, - Handle<Name>(), MaybeHandle<Map>(), - Type::Internal(), MachineType::TypeCompressedTaggedPointer(), - kPointerWriteBarrier}; - return access; -} - -// static FieldAccess AccessBuilder::ForJSGlobalProxyNativeContext() { FieldAccess access = { kTaggedBase, JSGlobalProxy::kNativeContextOffset, @@ -865,17 +820,6 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() { } // static -FieldAccess AccessBuilder::ForValue() { - FieldAccess access = { - kTaggedBase, JSPrimitiveWrapper::kValueOffset, - Handle<Name>(), MaybeHandle<Map>(), - Type::NonInternal(), MachineType::TypeCompressedTagged(), - kFullWriteBarrier}; - return access; -} - - -// static FieldAccess AccessBuilder::ForArgumentsLength() { FieldAccess access = { kTaggedBase, JSArgumentsObjectWithLength::kLengthOffset, @@ -892,7 +836,7 @@ FieldAccess AccessBuilder::ForArgumentsCallee() { kTaggedBase, JSSloppyArgumentsObject::kCalleeOffset, Handle<Name>(), MaybeHandle<Map>(), Type::NonInternal(), MachineType::TypeCompressedTagged(), - kPointerWriteBarrier}; + kFullWriteBarrier}; return access; } @@ -931,6 +875,19 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) { } // static +FieldAccess AccessBuilder::ForContextSlotKnownPointer(size_t index) { + int offset = Context::OffsetOfElementAt(static_cast<int>(index)); + DCHECK_EQ(offset, + Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag); + FieldAccess access = { + kTaggedBase, offset, + Handle<Name>(), MaybeHandle<Map>(), + Type::Any(), MachineType::TypeCompressedTaggedPointer(), + kPointerWriteBarrier}; + return access; +} + +// static ElementAccess AccessBuilder::ForFixedArrayElement() { ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), MachineType::TypeCompressedTagged(), @@ -1189,19 +1146,6 @@ ElementAccess AccessBuilder::ForOrderedHashMapEntryValue() { } // static -FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() { - FieldAccess access = { - kTaggedBase, - FixedArray::OffsetOfElementAt(NumberDictionary::kMaxNumberKeyIndex), - MaybeHandle<Name>(), - MaybeHandle<Map>(), - Type::Any(), - MachineType::TypeCompressedTagged(), - kNoWriteBarrier}; - return access; -} - -// static FieldAccess AccessBuilder::ForDictionaryNextEnumerationIndex() { FieldAccess access = { kTaggedBase, diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h index e3a17fe257..231e75f819 100644 --- a/deps/v8/src/compiler/access-builder.h +++ b/deps/v8/src/compiler/access-builder.h @@ -54,6 +54,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to JSObject::properties() field. static FieldAccess ForJSObjectPropertiesOrHash(); + // Provides access to JSObject::properties() field for known pointers. + static FieldAccess ForJSObjectPropertiesOrHashKnownPointer(); + // Provides access to JSObject::elements() field. static FieldAccess ForJSObjectElements(); @@ -128,9 +131,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to JSArray::length() field. static FieldAccess ForJSArrayLength(ElementsKind elements_kind); - // Provides access to JSArrayBuffer::backing_store() field. - static FieldAccess ForJSArrayBufferBackingStore(); - // Provides access to JSArrayBuffer::bit_field() field. static FieldAccess ForJSArrayBufferBitField(); @@ -236,24 +236,12 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to ExternalString::resource_data() field. static FieldAccess ForExternalStringResourceData(); - // Provides access to ExternalOneByteString characters. - static ElementAccess ForExternalOneByteStringCharacter(); - - // Provides access to ExternalTwoByteString characters. - static ElementAccess ForExternalTwoByteStringCharacter(); - // Provides access to SeqOneByteString characters. static ElementAccess ForSeqOneByteStringCharacter(); // Provides access to SeqTwoByteString characters. static ElementAccess ForSeqTwoByteStringCharacter(); - // Provides access to JSGlobalObject::global_proxy() field. - static FieldAccess ForJSGlobalObjectGlobalProxy(); - - // Provides access to JSGlobalObject::native_context() field. - static FieldAccess ForJSGlobalObjectNativeContext(); - // Provides access to JSGlobalProxy::native_context() field. static FieldAccess ForJSGlobalProxyNativeContext(); @@ -272,9 +260,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to JSStringIterator::index() field. static FieldAccess ForJSStringIteratorIndex(); - // Provides access to JSPrimitiveWrapper::value() field. - static FieldAccess ForValue(); - // Provides access to Cell::value() field. static FieldAccess ForCellValue(); @@ -289,6 +274,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to Context slots. static FieldAccess ForContextSlot(size_t index); + // Provides access to Context slots that are known to be pointers. + static FieldAccess ForContextSlotKnownPointer(size_t index); + // Provides access to FixedArray elements. static ElementAccess ForFixedArrayElement(); static ElementAccess ForFixedArrayElement( @@ -327,7 +315,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final static ElementAccess ForOrderedHashMapEntryValue(); // Provides access to Dictionary fields. - static FieldAccess ForDictionaryMaxNumberKey(); static FieldAccess ForDictionaryNextEnumerationIndex(); static FieldAccess ForDictionaryObjectHashIndex(); diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc index 6fc9e8214e..269ef90375 100644 --- a/deps/v8/src/compiler/access-info.cc +++ b/deps/v8/src/compiler/access-info.cc @@ -9,6 +9,7 @@ #include "src/builtins/accessors.h" #include "src/compiler/compilation-dependencies.h" #include "src/compiler/compilation-dependency.h" +#include "src/compiler/simplified-operator.h" #include "src/compiler/type-cache.h" #include "src/ic/call-optimization.h" #include "src/logging/counters.h" @@ -81,11 +82,12 @@ PropertyAccessInfo PropertyAccessInfo::DataField( Zone* zone, Handle<Map> receiver_map, ZoneVector<CompilationDependency const*>&& dependencies, FieldIndex field_index, Representation field_representation, - Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder, - MaybeHandle<Map> transition_map) { + Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map, + MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) { return PropertyAccessInfo(kDataField, holder, transition_map, field_index, - field_representation, field_type, field_map, - {{receiver_map}, zone}, std::move(dependencies)); + field_representation, field_type, field_owner_map, + field_map, {{receiver_map}, zone}, + std::move(dependencies)); } // static @@ -93,11 +95,12 @@ PropertyAccessInfo PropertyAccessInfo::DataConstant( Zone* zone, Handle<Map> receiver_map, ZoneVector<CompilationDependency const*>&& dependencies, FieldIndex field_index, Representation field_representation, - Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder, - MaybeHandle<Map> transition_map) { + Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map, + MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) { return PropertyAccessInfo(kDataConstant, holder, transition_map, field_index, - field_representation, field_type, field_map, - {{receiver_map}, zone}, std::move(dependencies)); + field_representation, field_type, field_owner_map, + field_map, {{receiver_map}, zone}, + std::move(dependencies)); } // static @@ -155,7 +158,7 @@ PropertyAccessInfo::PropertyAccessInfo(Zone* zone, Kind kind, PropertyAccessInfo::PropertyAccessInfo( Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map, FieldIndex field_index, Representation field_representation, - Type field_type, MaybeHandle<Map> field_map, + Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map, ZoneVector<Handle<Map>>&& receiver_maps, ZoneVector<CompilationDependency const*>&& unrecorded_dependencies) : kind_(kind), @@ -166,7 +169,11 @@ PropertyAccessInfo::PropertyAccessInfo( field_index_(field_index), field_representation_(field_representation), field_type_(field_type), - field_map_(field_map) {} + field_owner_map_(field_owner_map), + field_map_(field_map) { + DCHECK_IMPLIES(!transition_map.is_null(), + field_owner_map.address() == transition_map.address()); +} bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that, AccessMode access_mode, Zone* zone) { @@ -258,6 +265,13 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that, } } +ConstFieldInfo PropertyAccessInfo::GetConstFieldInfo() const { + if (IsDataConstant()) { + return ConstFieldInfo(field_owner_map_.ToHandleChecked()); + } + return ConstFieldInfo::None(); +} + AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone) @@ -276,35 +290,32 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo( } bool AccessInfoFactory::ComputeElementAccessInfos( - ElementAccessFeedback const& processed, AccessMode access_mode, + ElementAccessFeedback const& feedback, ZoneVector<ElementAccessInfo>* access_infos) const { + AccessMode access_mode = feedback.keyed_mode().access_mode(); if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) { // For polymorphic loads of similar elements kinds (i.e. all tagged or all // double), always use the "worst case" code without a transition. This is // much faster than transitioning the elements to the worst case, trading a // TransitionElementsKind for a CheckMaps, avoiding mutation of the array. base::Optional<ElementAccessInfo> access_info = - ConsolidateElementLoad(processed); + ConsolidateElementLoad(feedback); if (access_info.has_value()) { access_infos->push_back(*access_info); return true; } } - for (Handle<Map> receiver_map : processed.receiver_maps) { - // Compute the element access information. + for (auto const& group : feedback.transition_groups()) { + DCHECK(!group.empty()); + Handle<Map> target = group.front(); base::Optional<ElementAccessInfo> access_info = - ComputeElementAccessInfo(receiver_map, access_mode); + ComputeElementAccessInfo(target, access_mode); if (!access_info.has_value()) return false; - // Collect the possible transitions for the {receiver_map}. - for (auto transition : processed.transitions) { - if (transition.second.equals(receiver_map)) { - access_info->AddTransitionSource(transition.first); - } + for (size_t i = 1; i < group.size(); ++i) { + access_info->AddTransitionSource(group[i]); } - - // Schedule the access information. access_infos->push_back(*access_info); } return true; @@ -378,15 +389,19 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( map_ref.SerializeOwnDescriptor(descriptor); constness = dependencies()->DependOnFieldConstness(map_ref, descriptor); } + Handle<Map> field_owner_map(map->FindFieldOwner(isolate(), descriptor), + isolate()); switch (constness) { case PropertyConstness::kMutable: return PropertyAccessInfo::DataField( zone(), receiver_map, std::move(unrecorded_dependencies), field_index, - details_representation, field_type, field_map, holder); + details_representation, field_type, field_owner_map, field_map, + holder); case PropertyConstness::kConst: return PropertyAccessInfo::DataConstant( zone(), receiver_map, std::move(unrecorded_dependencies), field_index, - details_representation, field_type, field_map, holder); + details_representation, field_type, field_owner_map, field_map, + holder); } UNREACHABLE(); } @@ -431,7 +446,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo( CallOptimization optimization(isolate(), accessor); if (!optimization.is_simple_api_call() || optimization.IsCrossContextLazyAccessorPair( - *broker()->native_context().object(), *map)) { + *broker()->target_native_context().object(), *map)) { return PropertyAccessInfo::Invalid(zone()); } @@ -537,11 +552,13 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( } // Walk up the prototype chain. + MapRef(broker(), map).SerializePrototype(); if (!map->prototype().IsJSObject()) { // Perform the implicit ToObject for primitives here. // Implemented according to ES6 section 7.3.2 GetV (V, P). Handle<JSFunction> constructor; - if (Map::GetConstructorFunction(map, broker()->native_context().object()) + if (Map::GetConstructorFunction( + map, broker()->target_native_context().object()) .ToHandle(&constructor)) { map = handle(constructor->initial_map(), isolate()); DCHECK(map->prototype().IsJSObject()); @@ -615,6 +632,7 @@ void PropertyAccessInfo::RecordDependencies( bool AccessInfoFactory::FinalizePropertyAccessInfos( ZoneVector<PropertyAccessInfo> access_infos, AccessMode access_mode, ZoneVector<PropertyAccessInfo>* result) const { + if (access_infos.empty()) return false; MergePropertyAccessInfos(access_infos, access_mode, result); for (PropertyAccessInfo const& info : *result) { if (info.IsInvalid()) return false; @@ -668,22 +686,28 @@ Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind, } // namespace base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad( - ElementAccessFeedback const& processed) const { - ElementAccessFeedback::MapIterator it = processed.all_maps(broker()); - MapRef first_map = it.current(); + ElementAccessFeedback const& feedback) const { + if (feedback.transition_groups().empty()) return base::nullopt; + + DCHECK(!feedback.transition_groups().front().empty()); + MapRef first_map(broker(), feedback.transition_groups().front().front()); InstanceType instance_type = first_map.instance_type(); ElementsKind elements_kind = first_map.elements_kind(); + ZoneVector<Handle<Map>> maps(zone()); - for (; !it.done(); it.advance()) { - MapRef map = it.current(); - if (map.instance_type() != instance_type || !CanInlineElementAccess(map)) { - return base::nullopt; - } - if (!GeneralizeElementsKind(elements_kind, map.elements_kind()) - .To(&elements_kind)) { - return base::nullopt; + for (auto const& group : feedback.transition_groups()) { + for (Handle<Map> map_handle : group) { + MapRef map(broker(), map_handle); + if (map.instance_type() != instance_type || + !CanInlineElementAccess(map)) { + return base::nullopt; + } + if (!GeneralizeElementsKind(elements_kind, map.elements_kind()) + .To(&elements_kind)) { + return base::nullopt; + } + maps.push_back(map.object()); } - maps.push_back(map.object()); } return ElementAccessInfo(std::move(maps), elements_kind, zone()); @@ -723,7 +747,7 @@ PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor( } // Special fields are always mutable. return PropertyAccessInfo::DataField(zone(), map, {{}, zone()}, field_index, - field_representation, field_type); + field_representation, field_type, map); } return PropertyAccessInfo::Invalid(zone()); } @@ -799,12 +823,12 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition( case PropertyConstness::kMutable: return PropertyAccessInfo::DataField( zone(), map, std::move(unrecorded_dependencies), field_index, - details_representation, field_type, field_map, holder, + details_representation, field_type, transition_map, field_map, holder, transition_map); case PropertyConstness::kConst: return PropertyAccessInfo::DataConstant( zone(), map, std::move(unrecorded_dependencies), field_index, - details_representation, field_type, field_map, holder, + details_representation, field_type, transition_map, field_map, holder, transition_map); } UNREACHABLE(); diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h index 4c7c3611df..e2f6e6d453 100644 --- a/deps/v8/src/compiler/access-info.h +++ b/deps/v8/src/compiler/access-info.h @@ -29,6 +29,7 @@ class CompilationDependency; class ElementAccessFeedback; class JSHeapBroker; class TypeCache; +struct ConstFieldInfo; std::ostream& operator<<(std::ostream&, AccessMode); @@ -77,14 +78,16 @@ class PropertyAccessInfo final { Zone* zone, Handle<Map> receiver_map, ZoneVector<CompilationDependency const*>&& unrecorded_dependencies, FieldIndex field_index, Representation field_representation, - Type field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(), + Type field_type, Handle<Map> field_owner_map, + MaybeHandle<Map> field_map = MaybeHandle<Map>(), MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(), MaybeHandle<Map> transition_map = MaybeHandle<Map>()); static PropertyAccessInfo DataConstant( Zone* zone, Handle<Map> receiver_map, ZoneVector<CompilationDependency const*>&& unrecorded_dependencies, FieldIndex field_index, Representation field_representation, - Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder, + Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map, + MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map = MaybeHandle<Map>()); static PropertyAccessInfo AccessorConstant(Zone* zone, Handle<Map> receiver_map, @@ -109,6 +112,7 @@ class PropertyAccessInfo final { bool IsStringLength() const { return kind() == kStringLength; } bool HasTransitionMap() const { return !transition_map().is_null(); } + ConstFieldInfo GetConstFieldInfo() const; Kind kind() const { return kind_; } MaybeHandle<JSObject> holder() const { @@ -137,7 +141,7 @@ class PropertyAccessInfo final { PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map, FieldIndex field_index, Representation field_representation, Type field_type, - MaybeHandle<Map> field_map, + Handle<Map> field_owner_map, MaybeHandle<Map> field_map, ZoneVector<Handle<Map>>&& receiver_maps, ZoneVector<CompilationDependency const*>&& dependencies); @@ -150,6 +154,7 @@ class PropertyAccessInfo final { FieldIndex field_index_; Representation field_representation_; Type field_type_; + MaybeHandle<Map> field_owner_map_; MaybeHandle<Map> field_map_; }; @@ -163,7 +168,7 @@ class AccessInfoFactory final { base::Optional<ElementAccessInfo> ComputeElementAccessInfo( Handle<Map> map, AccessMode access_mode) const; bool ComputeElementAccessInfos( - ElementAccessFeedback const& processed, AccessMode access_mode, + ElementAccessFeedback const& feedback, ZoneVector<ElementAccessInfo>* access_infos) const; PropertyAccessInfo ComputePropertyAccessInfo(Handle<Map> map, @@ -191,7 +196,7 @@ class AccessInfoFactory final { private: base::Optional<ElementAccessInfo> ConsolidateElementLoad( - ElementAccessFeedback const& processed) const; + ElementAccessFeedback const& feedback) const; PropertyAccessInfo LookupSpecialFieldAccessor(Handle<Map> map, Handle<Name> name) const; PropertyAccessInfo LookupTransition(Handle<Map> map, Handle<Name> name, diff --git a/deps/v8/src/compiler/allocation-builder-inl.h b/deps/v8/src/compiler/allocation-builder-inl.h index 8da7c685a1..4cab0a7e6e 100644 --- a/deps/v8/src/compiler/allocation-builder-inl.h +++ b/deps/v8/src/compiler/allocation-builder-inl.h @@ -14,11 +14,9 @@ namespace v8 { namespace internal { namespace compiler { -void AllocationBuilder::AllocateContext(int variadic_part_length, - Handle<Map> map) { - DCHECK( - IsInRange(map->instance_type(), FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)); - DCHECK_NE(NATIVE_CONTEXT_TYPE, map->instance_type()); +void AllocationBuilder::AllocateContext(int variadic_part_length, MapRef map) { + DCHECK(IsInRange(map.instance_type(), FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)); + DCHECK_NE(NATIVE_CONTEXT_TYPE, map.instance_type()); int size = Context::SizeFor(variadic_part_length); Allocate(size, AllocationType::kYoung, Type::OtherInternal()); Store(AccessBuilder::ForMap(), map); @@ -29,11 +27,11 @@ void AllocationBuilder::AllocateContext(int variadic_part_length, } // Compound allocation of a FixedArray. -void AllocationBuilder::AllocateArray(int length, Handle<Map> map, +void AllocationBuilder::AllocateArray(int length, MapRef map, AllocationType allocation) { - DCHECK(map->instance_type() == FIXED_ARRAY_TYPE || - map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE); - int size = (map->instance_type() == FIXED_ARRAY_TYPE) + DCHECK(map.instance_type() == FIXED_ARRAY_TYPE || + map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE); + int size = (map.instance_type() == FIXED_ARRAY_TYPE) ? FixedArray::SizeFor(length) : FixedDoubleArray::SizeFor(length); Allocate(size, allocation, Type::OtherInternal()); diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h index d92e0f769b..040dd01405 100644 --- a/deps/v8/src/compiler/allocation-builder.h +++ b/deps/v8/src/compiler/allocation-builder.h @@ -49,17 +49,13 @@ class AllocationBuilder final { } // Compound allocation of a context. - inline void AllocateContext(int variadic_part_length, Handle<Map> map); + inline void AllocateContext(int variadic_part_length, MapRef map); // Compound allocation of a FixedArray. - inline void AllocateArray(int length, Handle<Map> map, + inline void AllocateArray(int length, MapRef map, AllocationType allocation = AllocationType::kYoung); // Compound store of a constant into a field. - void Store(const FieldAccess& access, Handle<Object> value) { - Store(access, jsgraph()->Constant(value)); - } - // Compound store of a constant into a field. void Store(const FieldAccess& access, const ObjectRef& value) { Store(access, jsgraph()->Constant(value)); } diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc index 88a9c52a33..65a569d755 100644 --- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc +++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc @@ -909,10 +909,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(LeaveCC, i.OutputSBit()); break; case kArchDeoptimize: { - int deopt_state_id = + DeoptimizationExit* exit = BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); - CodeGenResult result = - AssembleDeoptimizerCall(deopt_state_id, current_source_position_); + CodeGenResult result = AssembleDeoptimizerCall(exit); if (result != kSuccess) return result; unwinding_info_writer_.MarkBlockWillExit(); break; @@ -921,10 +920,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( AssembleReturn(instr->InputAt(0)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; - case kArchStackPointer: - __ mov(i.OutputRegister(), sp); - DCHECK_EQ(LeaveCC, i.OutputSBit()); - break; case kArchFramePointer: __ mov(i.OutputRegister(), fp); DCHECK_EQ(LeaveCC, i.OutputSBit()); @@ -936,6 +931,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ mov(i.OutputRegister(), fp); } break; + case kArchStackPointerGreaterThan: { + constexpr size_t kValueIndex = 0; + DCHECK(instr->InputAt(kValueIndex)->IsRegister()); + __ cmp(sp, i.InputRegister(kValueIndex)); + break; + } case kArchTruncateDoubleToI: __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), i.InputDoubleRegister(0), DetermineStubCallMode()); @@ -1838,6 +1839,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(1)); break; } + case kArmF32x4Div: { + QwNeonRegister dst = i.OutputSimd128Register(); + QwNeonRegister src1 = i.InputSimd128Register(0); + QwNeonRegister src2 = i.InputSimd128Register(1); + DCHECK_EQ(dst, q0); + DCHECK_EQ(src1, q0); + DCHECK_EQ(src2, q1); +#define S_FROM_Q(reg, lane) SwVfpRegister::from_code(reg.code() * 4 + lane) + __ vdiv(S_FROM_Q(dst, 0), S_FROM_Q(src1, 0), S_FROM_Q(src2, 0)); + __ vdiv(S_FROM_Q(dst, 1), S_FROM_Q(src1, 1), S_FROM_Q(src2, 1)); + __ vdiv(S_FROM_Q(dst, 2), S_FROM_Q(src1, 2), S_FROM_Q(src2, 2)); + __ vdiv(S_FROM_Q(dst, 3), S_FROM_Q(src1, 3), S_FROM_Q(src2, 3)); +#undef S_FROM_Q + break; + } case kArmF32x4Min: { __ vmin(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); @@ -1902,13 +1918,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI32x4Shl: { + QwNeonRegister tmp = i.TempSimd128Register(0); + __ vdup(Neon32, tmp, i.InputRegister(1)); __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt5(1)); + tmp); break; } case kArmI32x4ShrS: { - __ vshr(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt5(1)); + QwNeonRegister tmp = i.TempSimd128Register(0); + __ vdup(Neon32, tmp, i.InputRegister(1)); + __ vneg(Neon32, tmp, tmp); + __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), + tmp); break; } case kArmI32x4Add: { @@ -1976,8 +1997,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI32x4ShrU: { - __ vshr(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt5(1)); + QwNeonRegister tmp = i.TempSimd128Register(0); + __ vdup(Neon32, tmp, i.InputRegister(1)); + __ vneg(Neon32, tmp, tmp); + __ vshl(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0), + tmp); break; } case kArmI32x4MinU: { @@ -2029,13 +2053,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI16x8Shl: { + QwNeonRegister tmp = i.TempSimd128Register(0); + __ vdup(Neon16, tmp, i.InputRegister(1)); __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt4(1)); + tmp); break; } case kArmI16x8ShrS: { - __ vshr(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt4(1)); + QwNeonRegister tmp = i.TempSimd128Register(0); + __ vdup(Neon16, tmp, i.InputRegister(1)); + __ vneg(Neon16, tmp, tmp); + __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), + tmp); break; } case kArmI16x8SConvertI32x4: @@ -2112,8 +2141,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI16x8ShrU: { - __ vshr(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt4(1)); + QwNeonRegister tmp = i.TempSimd128Register(0); + __ vdup(Neon16, tmp, i.InputRegister(1)); + __ vneg(Neon16, tmp, tmp); + __ vshl(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), + tmp); break; } case kArmI16x8UConvertI32x4: @@ -2168,13 +2200,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI8x16Shl: { + QwNeonRegister tmp = i.TempSimd128Register(0); + __ vdup(Neon8, tmp, i.InputRegister(1)); __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt3(1)); + tmp); break; } case kArmI8x16ShrS: { - __ vshr(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt3(1)); + QwNeonRegister tmp = i.TempSimd128Register(0); + __ vdup(Neon8, tmp, i.InputRegister(1)); + __ vneg(Neon8, tmp, tmp); + __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), + tmp); break; } case kArmI8x16SConvertI16x8: @@ -2237,8 +2274,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI8x16ShrU: { - __ vshr(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt3(1)); + QwNeonRegister tmp = i.TempSimd128Register(0); + __ vdup(Neon8, tmp, i.InputRegister(1)); + __ vneg(Neon8, tmp, tmp); + __ vshl(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), + tmp); break; } case kArmI8x16UConvertI16x8: @@ -3192,6 +3232,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); } +void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} + void CodeGenerator::AssembleMove(InstructionOperand* source, InstructionOperand* destination) { ArmOperandConverter g(this, nullptr); diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h index 165ca39f9d..3551e26aea 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h +++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h @@ -141,6 +141,7 @@ namespace compiler { V(ArmF32x4AddHoriz) \ V(ArmF32x4Sub) \ V(ArmF32x4Mul) \ + V(ArmF32x4Div) \ V(ArmF32x4Min) \ V(ArmF32x4Max) \ V(ArmF32x4Eq) \ diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc index 41d7b4055f..1d7cf61dfe 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc @@ -121,6 +121,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArmF32x4AddHoriz: case kArmF32x4Sub: case kArmF32x4Mul: + case kArmF32x4Div: case kArmF32x4Min: case kArmF32x4Max: case kArmF32x4Eq: diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc index 06aba4491a..ce74faa4a6 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc @@ -74,17 +74,6 @@ class ArmOperandGenerator : public OperandGenerator { } return false; } - - // Use the stack pointer if the node is LoadStackPointer, otherwise assign a - // register. - InstructionOperand UseRegisterOrStackPointer(Node* node) { - if (node->opcode() == IrOpcode::kLoadStackPointer) { - return LocationOperand(LocationOperand::EXPLICIT, - LocationOperand::REGISTER, - MachineRepresentation::kWord32, sp.code()); - } - return UseRegister(node); - } }; namespace { @@ -102,6 +91,15 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { g.UseRegister(node->InputAt(1))); } +void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + ArmOperandGenerator g(selector); + InstructionOperand temps[] = {g.TempSimd128Register()}; + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1)), arraysize(temps), temps); +} + void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode, Node* node) { ArmOperandGenerator g(selector); @@ -509,7 +507,8 @@ void InstructionSelector::VisitStore(Node* node) { WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); MachineRepresentation rep = store_rep.representation(); - if (write_barrier_kind != kNoWriteBarrier) { + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!FLAG_disable_write_barriers)) { DCHECK(CanBeTaggedPointer(rep)); AddressingMode addressing_mode; InstructionOperand inputs[3]; @@ -887,6 +886,15 @@ void InstructionSelector::VisitWord32Xor(Node* node) { VisitBinop(this, node, kArmEor, kArmEor); } +void InstructionSelector::VisitStackPointerGreaterThan( + Node* node, FlagsContinuation* cont) { + Node* const value = node->InputAt(0); + InstructionCode opcode = kArchStackPointerGreaterThan; + + ArmOperandGenerator g(this); + EmitWithContinuation(opcode, g.UseRegister(value), cont); +} + namespace { template <typename TryMatchShift> @@ -1686,17 +1694,17 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(), &input_count, &inputs[1])) { - inputs[0] = g.UseRegisterOrStackPointer(m.left().node()); + inputs[0] = g.UseRegister(m.left().node()); input_count++; } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(), &input_count, &inputs[1])) { if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute(); - inputs[0] = g.UseRegisterOrStackPointer(m.right().node()); + inputs[0] = g.UseRegister(m.right().node()); input_count++; } else { opcode |= AddressingModeField::encode(kMode_Operand2_R); - inputs[input_count++] = g.UseRegisterOrStackPointer(m.left().node()); - inputs[input_count++] = g.UseRegisterOrStackPointer(m.right().node()); + inputs[input_count++] = g.UseRegister(m.left().node()); + inputs[input_count++] = g.UseRegister(m.right().node()); } if (has_result) { @@ -1848,6 +1856,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, return VisitShift(this, value, TryMatchLSR, cont); case IrOpcode::kWord32Ror: return VisitShift(this, value, TryMatchROR, cont); + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); default: break; } @@ -2488,7 +2499,7 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP) #define SIMD_VISIT_SHIFT_OP(Name) \ void InstructionSelector::Visit##Name(Node* node) { \ - VisitRRI(this, kArm##Name, node); \ + VisitSimdShiftRRR(this, kArm##Name, node); \ } SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) #undef SIMD_VISIT_SHIFT_OP @@ -2502,6 +2513,14 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP) #undef SIMD_VISIT_BINOP #undef SIMD_BINOP_LIST +void InstructionSelector::VisitF32x4Div(Node* node) { + ArmOperandGenerator g(this); + // Use fixed registers in the lower 8 Q-registers so we can directly access + // mapped registers S0-S31. + Emit(kArmF32x4Div, g.DefineAsFixed(node, q0), + g.UseFixed(node->InputAt(0), q0), g.UseFixed(node->InputAt(1), q1)); +} + void InstructionSelector::VisitS128Select(Node* node) { ArmOperandGenerator g(this); Emit(kArmS128Select, g.DefineSameAsFirst(node), diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc index c71a63cc3d..66ca7f6cf0 100644 --- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc @@ -820,20 +820,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // don't emit code for nops. break; case kArchDeoptimize: { - int deopt_state_id = + DeoptimizationExit* exit = BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); - CodeGenResult result = - AssembleDeoptimizerCall(deopt_state_id, current_source_position_); - if (result != kSuccess) return result; - unwinding_info_writer_.MarkBlockWillExit(); + __ B(exit->label()); break; } case kArchRet: AssembleReturn(instr->InputAt(0)); break; - case kArchStackPointer: - __ mov(i.OutputRegister(), sp); - break; case kArchFramePointer: __ mov(i.OutputRegister(), fp); break; @@ -844,6 +838,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ mov(i.OutputRegister(), fp); } break; + case kArchStackPointerGreaterThan: { + constexpr size_t kValueIndex = 0; + DCHECK(instr->InputAt(kValueIndex)->IsRegister()); + __ Cmp(sp, i.InputRegister(kValueIndex)); + break; + } case kArchTruncateDoubleToI: __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), i.InputDoubleRegister(0), DetermineStubCallMode()); @@ -1598,12 +1598,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ DecompressAnyTagged(i.OutputRegister(), i.InputRegister(0)); break; } - case kArm64CompressSigned: // Fall through. - case kArm64CompressPointer: // Fall through. - case kArm64CompressAny: { - __ Uxtw(i.OutputRegister(), i.InputRegister(0)); - break; - } case kArm64LdrS: __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand()); break; @@ -1780,6 +1774,50 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(1).V##FORMAT()); \ break; + case kArm64F64x2Splat: { + __ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0); + break; + } + case kArm64F64x2ExtractLane: { + __ Mov(i.OutputSimd128Register().D(), i.InputSimd128Register(0).V2D(), + i.InputInt8(1)); + break; + } + case kArm64F64x2ReplaceLane: { + VRegister dst = i.OutputSimd128Register().V2D(), + src1 = i.InputSimd128Register(0).V2D(); + if (!dst.is(src1)) { + __ Mov(dst, src1); + } + __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V2D(), 0); + break; + } + SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D); + SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D); + SIMD_BINOP_CASE(kArm64F64x2Add, Fadd, 2D); + SIMD_BINOP_CASE(kArm64F64x2Sub, Fsub, 2D); + SIMD_BINOP_CASE(kArm64F64x2Mul, Fmul, 2D); + SIMD_BINOP_CASE(kArm64F64x2Div, Fdiv, 2D); + SIMD_BINOP_CASE(kArm64F64x2Min, Fmin, 2D); + SIMD_BINOP_CASE(kArm64F64x2Max, Fmax, 2D); + SIMD_BINOP_CASE(kArm64F64x2Eq, Fcmeq, 2D); + case kArm64F64x2Ne: { + VRegister dst = i.OutputSimd128Register().V2D(); + __ Fcmeq(dst, i.InputSimd128Register(0).V2D(), + i.InputSimd128Register(1).V2D()); + __ Mvn(dst, dst); + break; + } + case kArm64F64x2Lt: { + __ Fcmgt(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(), + i.InputSimd128Register(0).V2D()); + break; + } + case kArm64F64x2Le: { + __ Fcmge(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(), + i.InputSimd128Register(0).V2D()); + break; + } case kArm64F32x4Splat: { __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0); break; @@ -1808,6 +1846,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_CASE(kArm64F32x4AddHoriz, Faddp, 4S); SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S); SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S); + SIMD_BINOP_CASE(kArm64F32x4Div, Fdiv, 4S); SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S); SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S); SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S); @@ -1828,6 +1867,62 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(0).V4S()); break; } + case kArm64I64x2Splat: { + __ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0)); + break; + } + case kArm64I64x2ExtractLane: { + __ Mov(i.OutputRegister64(), i.InputSimd128Register(0).V2D(), + i.InputInt8(1)); + break; + } + case kArm64I64x2ReplaceLane: { + VRegister dst = i.OutputSimd128Register().V2D(), + src1 = i.InputSimd128Register(0).V2D(); + if (!dst.is(src1)) { + __ Mov(dst, src1); + } + __ Mov(dst, i.InputInt8(1), i.InputRegister64(2)); + break; + } + SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D); + case kArm64I64x2Shl: { + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V2D(), i.InputRegister64(1)); + __ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(), + tmp.V2D()); + break; + } + case kArm64I64x2ShrS: { + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V2D(), i.InputRegister64(1)); + __ Neg(tmp.V2D(), tmp.V2D()); + __ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(), + tmp.V2D()); + break; + } + SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D); + SIMD_BINOP_CASE(kArm64I64x2Sub, Sub, 2D); + SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D); + case kArm64I64x2Ne: { + VRegister dst = i.OutputSimd128Register().V2D(); + __ Cmeq(dst, i.InputSimd128Register(0).V2D(), + i.InputSimd128Register(1).V2D()); + __ Mvn(dst, dst); + break; + } + SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D); + SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D); + case kArm64I64x2ShrU: { + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V2D(), i.InputRegister64(1)); + __ Neg(tmp.V2D(), tmp.V2D()); + __ Ushl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(), + tmp.V2D()); + break; + } + SIMD_BINOP_CASE(kArm64I64x2GtU, Cmhi, 2D); + SIMD_BINOP_CASE(kArm64I64x2GeU, Cmhs, 2D); case kArm64I32x4Splat: { __ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0)); break; @@ -1851,13 +1946,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8High, Sxtl2, 4S, 8H); SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S); case kArm64I32x4Shl: { - __ Shl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), - i.InputInt5(1)); + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V4S(), i.InputRegister32(1)); + __ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), + tmp.V4S()); break; } case kArm64I32x4ShrS: { - __ Sshr(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), - i.InputInt5(1)); + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V4S(), i.InputRegister32(1)); + __ Neg(tmp.V4S(), tmp.V4S()); + __ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), + tmp.V4S()); break; } SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S); @@ -1880,8 +1980,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8Low, Uxtl, 4S, 4H); SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H); case kArm64I32x4ShrU: { - __ Ushr(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), - i.InputInt5(1)); + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V4S(), i.InputRegister32(1)); + __ Neg(tmp.V4S(), tmp.V4S()); + __ Ushl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), + tmp.V4S()); break; } SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S); @@ -1910,13 +2013,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16High, Sxtl2, 8H, 16B); SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H); case kArm64I16x8Shl: { - __ Shl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), - i.InputInt5(1)); + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V8H(), i.InputRegister32(1)); + __ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), + tmp.V8H()); break; } case kArm64I16x8ShrS: { - __ Sshr(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), - i.InputInt5(1)); + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V8H(), i.InputRegister32(1)); + __ Neg(tmp.V8H(), tmp.V8H()); + __ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), + tmp.V8H()); break; } case kArm64I16x8SConvertI32x4: { @@ -1961,8 +2069,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I16x8ShrU: { - __ Ushr(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), - i.InputInt5(1)); + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V8H(), i.InputRegister32(1)); + __ Neg(tmp.V8H(), tmp.V8H()); + __ Ushl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), + tmp.V8H()); break; } case kArm64I16x8UConvertI32x4: { @@ -2005,13 +2116,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B); case kArm64I8x16Shl: { - __ Shl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), - i.InputInt5(1)); + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V16B(), i.InputRegister32(1)); + __ Sshl(i.OutputSimd128Register().V16B(), + i.InputSimd128Register(0).V16B(), tmp.V16B()); break; } case kArm64I8x16ShrS: { - __ Sshr(i.OutputSimd128Register().V16B(), - i.InputSimd128Register(0).V16B(), i.InputInt5(1)); + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V16B(), i.InputRegister32(1)); + __ Neg(tmp.V16B(), tmp.V16B()); + __ Sshl(i.OutputSimd128Register().V16B(), + i.InputSimd128Register(0).V16B(), tmp.V16B()); break; } case kArm64I8x16SConvertI16x8: { @@ -2046,8 +2162,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B); SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B); case kArm64I8x16ShrU: { - __ Ushr(i.OutputSimd128Register().V16B(), - i.InputSimd128Register(0).V16B(), i.InputInt5(1)); + VRegister tmp = i.TempSimd128Register(0); + __ Dup(tmp.V16B(), i.InputRegister32(1)); + __ Neg(tmp.V16B(), tmp.V16B()); + __ Ushl(i.OutputSimd128Register().V16B(), + i.InputSimd128Register(0).V16B(), tmp.V16B()); break; } case kArm64I8x16UConvertI16x8: { @@ -2192,7 +2311,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16B); SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B); SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B); + case kArm64S1x2AllTrue: { + UseScratchRegisterScope scope(tasm()); + VRegister temp1 = scope.AcquireV(kFormat2D); + VRegister temp2 = scope.AcquireV(kFormatS); + __ Cmeq(temp1, i.InputSimd128Register(0).V2D(), 0); + __ Umaxv(temp2, temp1.V4S()); + __ Umov(i.OutputRegister32(), temp2, 0); + __ Add(i.OutputRegister32(), i.OutputRegister32(), 1); + break; + } #define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \ case Op: { \ UseScratchRegisterScope scope(tasm()); \ @@ -2203,6 +2332,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Cset(i.OutputRegister32(), ne); \ break; \ } + // for AnyTrue, the format does not matter, umaxv does not support 2D + SIMD_REDUCE_OP_CASE(kArm64S1x2AnyTrue, Umaxv, kFormatS, 4S); SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S); SIMD_REDUCE_OP_CASE(kArm64S1x4AllTrue, Uminv, kFormatS, 4S); SIMD_REDUCE_OP_CASE(kArm64S1x8AnyTrue, Umaxv, kFormatH, 8H); @@ -2669,6 +2800,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); } +void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) { + __ ForceConstantPoolEmissionWithoutJump(); + __ CheckVeneerPool(false, false, deopt_count * Deoptimizer::kDeoptExitSize); +} + void CodeGenerator::AssembleMove(InstructionOperand* source, InstructionOperand* destination) { Arm64OperandConverter g(this, nullptr); diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h index 1c4c0e3335..4b56e402c1 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h +++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h @@ -168,11 +168,23 @@ namespace compiler { V(Arm64DecompressSigned) \ V(Arm64DecompressPointer) \ V(Arm64DecompressAny) \ - V(Arm64CompressSigned) \ - V(Arm64CompressPointer) \ - V(Arm64CompressAny) \ V(Arm64DmbIsh) \ V(Arm64DsbIsb) \ + V(Arm64F64x2Splat) \ + V(Arm64F64x2ExtractLane) \ + V(Arm64F64x2ReplaceLane) \ + V(Arm64F64x2Abs) \ + V(Arm64F64x2Neg) \ + V(Arm64F64x2Add) \ + V(Arm64F64x2Sub) \ + V(Arm64F64x2Mul) \ + V(Arm64F64x2Div) \ + V(Arm64F64x2Min) \ + V(Arm64F64x2Max) \ + V(Arm64F64x2Eq) \ + V(Arm64F64x2Ne) \ + V(Arm64F64x2Lt) \ + V(Arm64F64x2Le) \ V(Arm64F32x4Splat) \ V(Arm64F32x4ExtractLane) \ V(Arm64F32x4ReplaceLane) \ @@ -186,12 +198,28 @@ namespace compiler { V(Arm64F32x4AddHoriz) \ V(Arm64F32x4Sub) \ V(Arm64F32x4Mul) \ + V(Arm64F32x4Div) \ V(Arm64F32x4Min) \ V(Arm64F32x4Max) \ V(Arm64F32x4Eq) \ V(Arm64F32x4Ne) \ V(Arm64F32x4Lt) \ V(Arm64F32x4Le) \ + V(Arm64I64x2Splat) \ + V(Arm64I64x2ExtractLane) \ + V(Arm64I64x2ReplaceLane) \ + V(Arm64I64x2Neg) \ + V(Arm64I64x2Shl) \ + V(Arm64I64x2ShrS) \ + V(Arm64I64x2Add) \ + V(Arm64I64x2Sub) \ + V(Arm64I64x2Eq) \ + V(Arm64I64x2Ne) \ + V(Arm64I64x2GtS) \ + V(Arm64I64x2GeS) \ + V(Arm64I64x2ShrU) \ + V(Arm64I64x2GtU) \ + V(Arm64I64x2GeU) \ V(Arm64I32x4Splat) \ V(Arm64I32x4ExtractLane) \ V(Arm64I32x4ReplaceLane) \ @@ -310,6 +338,8 @@ namespace compiler { V(Arm64S8x8Reverse) \ V(Arm64S8x4Reverse) \ V(Arm64S8x2Reverse) \ + V(Arm64S1x2AnyTrue) \ + V(Arm64S1x2AllTrue) \ V(Arm64S1x4AnyTrue) \ V(Arm64S1x4AllTrue) \ V(Arm64S1x8AnyTrue) \ diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc index 8344887ec2..7cba2d50ea 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc @@ -137,6 +137,21 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64Float64MoveU64: case kArm64U64MoveFloat64: case kArm64Float64SilenceNaN: + case kArm64F64x2Splat: + case kArm64F64x2ExtractLane: + case kArm64F64x2ReplaceLane: + case kArm64F64x2Abs: + case kArm64F64x2Neg: + case kArm64F64x2Add: + case kArm64F64x2Sub: + case kArm64F64x2Mul: + case kArm64F64x2Div: + case kArm64F64x2Min: + case kArm64F64x2Max: + case kArm64F64x2Eq: + case kArm64F64x2Ne: + case kArm64F64x2Lt: + case kArm64F64x2Le: case kArm64F32x4Splat: case kArm64F32x4ExtractLane: case kArm64F32x4ReplaceLane: @@ -150,12 +165,28 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64F32x4AddHoriz: case kArm64F32x4Sub: case kArm64F32x4Mul: + case kArm64F32x4Div: case kArm64F32x4Min: case kArm64F32x4Max: case kArm64F32x4Eq: case kArm64F32x4Ne: case kArm64F32x4Lt: case kArm64F32x4Le: + case kArm64I64x2Splat: + case kArm64I64x2ExtractLane: + case kArm64I64x2ReplaceLane: + case kArm64I64x2Neg: + case kArm64I64x2Shl: + case kArm64I64x2ShrS: + case kArm64I64x2Add: + case kArm64I64x2Sub: + case kArm64I64x2Eq: + case kArm64I64x2Ne: + case kArm64I64x2GtS: + case kArm64I64x2GeS: + case kArm64I64x2ShrU: + case kArm64I64x2GtU: + case kArm64I64x2GeU: case kArm64I32x4Splat: case kArm64I32x4ExtractLane: case kArm64I32x4ReplaceLane: @@ -274,6 +305,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64S8x8Reverse: case kArm64S8x4Reverse: case kArm64S8x2Reverse: + case kArm64S1x2AnyTrue: + case kArm64S1x2AllTrue: case kArm64S1x4AnyTrue: case kArm64S1x4AllTrue: case kArm64S1x8AnyTrue: @@ -287,9 +320,6 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64DecompressSigned: case kArm64DecompressPointer: case kArm64DecompressAny: - case kArm64CompressSigned: - case kArm64CompressPointer: - case kArm64CompressAny: return kNoOpcodeFlags; case kArm64LdrS: diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc index a953e35a66..4abbd68c49 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -48,16 +48,6 @@ class Arm64OperandGenerator final : public OperandGenerator { return UseRegister(node); } - // Use the stack pointer if the node is LoadStackPointer, otherwise assign a - // register. - InstructionOperand UseRegisterOrStackPointer(Node* node, bool sp_allowed) { - if (sp_allowed && node->opcode() == IrOpcode::kLoadStackPointer) - return LocationOperand(LocationOperand::EXPLICIT, - LocationOperand::REGISTER, - MachineRepresentation::kWord64, sp.code()); - return UseRegister(node); - } - // Use the provided node if it has the required value, or create a // TempImmediate otherwise. InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) { @@ -160,6 +150,15 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { g.UseRegister(node->InputAt(1))); } +void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + Arm64OperandGenerator g(selector); + InstructionOperand temps[] = {g.TempSimd128Register()}; + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1)), arraysize(temps), temps); +} + void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) { Arm64OperandGenerator g(selector); int32_t imm = OpParameter<int32_t>(node->op()); @@ -554,23 +553,21 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, // is used when we merge a conversion into the load. outputs[0] = g.DefineAsRegister(output == nullptr ? node : output); - if (selector->CanAddressRelativeToRootsRegister()) { - ExternalReferenceMatcher m(base); - if (m.HasValue() && g.IsIntegerConstant(index)) { - ptrdiff_t const delta = - g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( - selector->isolate(), m.Value()); - input_count = 1; - // Check that the delta is a 32-bit integer due to the limitations of - // immediate operands. - if (is_int32(delta)) { - inputs[0] = g.UseImmediate(static_cast<int32_t>(delta)); - opcode |= AddressingModeField::encode(kMode_Root); - selector->Emit(opcode, arraysize(outputs), outputs, input_count, - inputs); - return; - } + ExternalReferenceMatcher m(base); + if (m.HasValue() && g.IsIntegerConstant(index) && + selector->CanAddressRelativeToRootsRegister(m.Value())) { + ptrdiff_t const delta = + g.GetIntegerConstantValue(index) + + TurboAssemblerBase::RootRegisterOffsetForExternalReference( + selector->isolate(), m.Value()); + input_count = 1; + // Check that the delta is a 32-bit integer due to the limitations of + // immediate operands. + if (is_int32(delta)) { + inputs[0] = g.UseImmediate(static_cast<int32_t>(delta)); + opcode |= AddressingModeField::encode(kMode_Root); + selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs); + return; } } @@ -670,7 +667,8 @@ void InstructionSelector::VisitStore(Node* node) { MachineRepresentation rep = store_rep.representation(); // TODO(arm64): I guess this could be done in a better way. - if (write_barrier_kind != kNoWriteBarrier) { + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!FLAG_disable_write_barriers)) { DCHECK(CanBeTaggedOrCompressedPointer(rep)); AddressingMode addressing_mode; InstructionOperand inputs[3]; @@ -1004,6 +1002,15 @@ void InstructionSelector::VisitWord64Shl(Node* node) { VisitRRO(this, kArm64Lsl, node, kShift64Imm); } +void InstructionSelector::VisitStackPointerGreaterThan( + Node* node, FlagsContinuation* cont) { + Node* const value = node->InputAt(0); + InstructionCode opcode = kArchStackPointerGreaterThan; + + Arm64OperandGenerator g(this); + EmitWithContinuation(opcode, g.UseRegister(value), cont); +} + namespace { bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) { @@ -1625,23 +1632,23 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { } void InstructionSelector::VisitChangeTaggedToCompressed(Node* node) { - Arm64OperandGenerator g(this); - Node* value = node->InputAt(0); - Emit(kArm64CompressAny, g.DefineAsRegister(node), g.UseRegister(value)); + // The top 32 bits in the 64-bit register will be undefined, and + // must not be used by a dependent node. + EmitIdentity(node); } void InstructionSelector::VisitChangeTaggedPointerToCompressedPointer( Node* node) { - Arm64OperandGenerator g(this); - Node* value = node->InputAt(0); - Emit(kArm64CompressPointer, g.DefineAsRegister(node), g.UseRegister(value)); + // The top 32 bits in the 64-bit register will be undefined, and + // must not be used by a dependent node. + EmitIdentity(node); } void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned( Node* node) { - Arm64OperandGenerator g(this); - Node* value = node->InputAt(0); - Emit(kArm64CompressSigned, g.DefineAsRegister(node), g.UseRegister(value)); + // The top 32 bits in the 64-bit register will be undefined, and + // must not be used by a dependent node. + EmitIdentity(node); } void InstructionSelector::VisitChangeCompressedToTagged(Node* node) { @@ -1826,26 +1833,25 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode, // Shared routine for multiple word compare operations. void VisitWordCompare(InstructionSelector* selector, Node* node, InstructionCode opcode, FlagsContinuation* cont, - bool commutative, ImmediateMode immediate_mode) { + ImmediateMode immediate_mode) { Arm64OperandGenerator g(selector); + Node* left = node->InputAt(0); Node* right = node->InputAt(1); - if (right->opcode() == IrOpcode::kLoadStackPointer || + // If one of the two inputs is an immediate, make sure it's on the right. + if (!g.CanBeImmediate(right, immediate_mode) && g.CanBeImmediate(left, immediate_mode)) { - if (!commutative) cont->Commute(); + cont->Commute(); std::swap(left, right); } - // Match immediates on left or right side of comparison. if (g.CanBeImmediate(right, immediate_mode)) { - VisitCompare(selector, opcode, - g.UseRegisterOrStackPointer(left, opcode == kArm64Cmp), - g.UseImmediate(right), cont); + VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), + cont); } else { - VisitCompare(selector, opcode, - g.UseRegisterOrStackPointer(left, opcode == kArm64Cmp), - g.UseRegister(right), cont); + VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), + cont); } } @@ -2370,8 +2376,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, if (m.right().Is(0)) { Node* const left = m.left().node(); if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) { - return VisitWordCompare(this, left, kArm64Tst, cont, true, - kLogical64Imm); + return VisitWordCompare(this, left, kArm64Tst, cont, kLogical64Imm); } // Merge the Word64Equal(x, 0) comparison into a cbz instruction. if ((cont->IsBranch() || cont->IsDeoptimize()) && @@ -2381,25 +2386,20 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, return; } } - return VisitWordCompare(this, value, kArm64Cmp, cont, false, - kArithmeticImm); + return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm); } case IrOpcode::kInt64LessThan: cont->OverwriteAndNegateIfEqual(kSignedLessThan); - return VisitWordCompare(this, value, kArm64Cmp, cont, false, - kArithmeticImm); + return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm); case IrOpcode::kInt64LessThanOrEqual: cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); - return VisitWordCompare(this, value, kArm64Cmp, cont, false, - kArithmeticImm); + return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm); case IrOpcode::kUint64LessThan: cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); - return VisitWordCompare(this, value, kArm64Cmp, cont, false, - kArithmeticImm); + return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm); case IrOpcode::kUint64LessThanOrEqual: cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); - return VisitWordCompare(this, value, kArm64Cmp, cont, false, - kArithmeticImm); + return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm); case IrOpcode::kFloat32Equal: cont->OverwriteAndNegateIfEqual(kEqual); return VisitFloat32Compare(this, value, cont); @@ -2461,16 +2461,16 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, } break; case IrOpcode::kInt32Add: - return VisitWordCompare(this, value, kArm64Cmn32, cont, true, - kArithmeticImm); + return VisitWordCompare(this, value, kArm64Cmn32, cont, kArithmeticImm); case IrOpcode::kInt32Sub: return VisitWord32Compare(this, value, cont); case IrOpcode::kWord32And: - return VisitWordCompare(this, value, kArm64Tst32, cont, true, - kLogical32Imm); + return VisitWordCompare(this, value, kArm64Tst32, cont, kLogical32Imm); case IrOpcode::kWord64And: - return VisitWordCompare(this, value, kArm64Tst, cont, true, - kLogical64Imm); + return VisitWordCompare(this, value, kArm64Tst, cont, kLogical64Imm); + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); default: break; } @@ -2530,7 +2530,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) { case IrOpcode::kWord32And: return VisitWord32Compare(this, node, &cont); case IrOpcode::kInt32Sub: - return VisitWordCompare(this, value, kArm64Cmp32, &cont, false, + return VisitWordCompare(this, value, kArm64Cmp32, &cont, kArithmeticImm); case IrOpcode::kWord32Equal: { // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne). @@ -2587,15 +2587,14 @@ void InstructionSelector::VisitWord64Equal(Node* const node) { if (CanCover(user, value)) { switch (value->opcode()) { case IrOpcode::kWord64And: - return VisitWordCompare(this, value, kArm64Tst, &cont, true, - kLogical64Imm); + return VisitWordCompare(this, value, kArm64Tst, &cont, kLogical64Imm); default: break; } return VisitWord64Test(this, value, &cont); } } - VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm); + VisitWordCompare(this, node, kArm64Cmp, &cont, kArithmeticImm); } void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { @@ -2653,24 +2652,24 @@ void InstructionSelector::VisitInt64SubWithOverflow(Node* node) { void InstructionSelector::VisitInt64LessThan(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); - VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm); + VisitWordCompare(this, node, kArm64Cmp, &cont, kArithmeticImm); } void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); - VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm); + VisitWordCompare(this, node, kArm64Cmp, &cont, kArithmeticImm); } void InstructionSelector::VisitUint64LessThan(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); - VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm); + VisitWordCompare(this, node, kArm64Cmp, &cont, kArithmeticImm); } void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); - VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm); + VisitWordCompare(this, node, kArm64Cmp, &cont, kArithmeticImm); } void InstructionSelector::VisitFloat32Neg(Node* node) { @@ -3045,18 +3044,23 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { } #define SIMD_TYPE_LIST(V) \ + V(F64x2) \ V(F32x4) \ + V(I64x2) \ V(I32x4) \ V(I16x8) \ V(I8x16) #define SIMD_UNOP_LIST(V) \ + V(F64x2Abs, kArm64F64x2Abs) \ + V(F64x2Neg, kArm64F64x2Neg) \ V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \ V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \ V(F32x4Abs, kArm64F32x4Abs) \ V(F32x4Neg, kArm64F32x4Neg) \ V(F32x4RecipApprox, kArm64F32x4RecipApprox) \ V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \ + V(I64x2Neg, kArm64I64x2Neg) \ V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \ V(I32x4SConvertI16x8Low, kArm64I32x4SConvertI16x8Low) \ V(I32x4SConvertI16x8High, kArm64I32x4SConvertI16x8High) \ @@ -3071,6 +3075,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { V(I16x8UConvertI8x16High, kArm64I16x8UConvertI8x16High) \ V(I8x16Neg, kArm64I8x16Neg) \ V(S128Not, kArm64S128Not) \ + V(S1x2AnyTrue, kArm64S1x2AnyTrue) \ + V(S1x2AllTrue, kArm64S1x2AllTrue) \ V(S1x4AnyTrue, kArm64S1x4AnyTrue) \ V(S1x4AllTrue, kArm64S1x4AllTrue) \ V(S1x8AnyTrue, kArm64S1x8AnyTrue) \ @@ -3079,6 +3085,9 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { V(S1x16AllTrue, kArm64S1x16AllTrue) #define SIMD_SHIFT_OP_LIST(V) \ + V(I64x2Shl) \ + V(I64x2ShrS) \ + V(I64x2ShrU) \ V(I32x4Shl) \ V(I32x4ShrS) \ V(I32x4ShrU) \ @@ -3090,16 +3099,35 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { V(I8x16ShrU) #define SIMD_BINOP_LIST(V) \ + V(F64x2Add, kArm64F64x2Add) \ + V(F64x2Sub, kArm64F64x2Sub) \ + V(F64x2Mul, kArm64F64x2Mul) \ + V(F64x2Div, kArm64F64x2Div) \ + V(F64x2Min, kArm64F64x2Min) \ + V(F64x2Max, kArm64F64x2Max) \ + V(F64x2Eq, kArm64F64x2Eq) \ + V(F64x2Ne, kArm64F64x2Ne) \ + V(F64x2Lt, kArm64F64x2Lt) \ + V(F64x2Le, kArm64F64x2Le) \ V(F32x4Add, kArm64F32x4Add) \ V(F32x4AddHoriz, kArm64F32x4AddHoriz) \ V(F32x4Sub, kArm64F32x4Sub) \ V(F32x4Mul, kArm64F32x4Mul) \ + V(F32x4Div, kArm64F32x4Div) \ V(F32x4Min, kArm64F32x4Min) \ V(F32x4Max, kArm64F32x4Max) \ V(F32x4Eq, kArm64F32x4Eq) \ V(F32x4Ne, kArm64F32x4Ne) \ V(F32x4Lt, kArm64F32x4Lt) \ V(F32x4Le, kArm64F32x4Le) \ + V(I64x2Add, kArm64I64x2Add) \ + V(I64x2Sub, kArm64I64x2Sub) \ + V(I64x2Eq, kArm64I64x2Eq) \ + V(I64x2Ne, kArm64I64x2Ne) \ + V(I64x2GtS, kArm64I64x2GtS) \ + V(I64x2GeS, kArm64I64x2GeS) \ + V(I64x2GtU, kArm64I64x2GtU) \ + V(I64x2GeU, kArm64I64x2GeU) \ V(I32x4Add, kArm64I32x4Add) \ V(I32x4AddHoriz, kArm64I32x4AddHoriz) \ V(I32x4Sub, kArm64I32x4Sub) \ @@ -3194,7 +3222,7 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP) #define SIMD_VISIT_SHIFT_OP(Name) \ void InstructionSelector::Visit##Name(Node* node) { \ - VisitRRI(this, kArm64##Name, node); \ + VisitSimdShiftRRR(this, kArm64##Name, node); \ } SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) #undef SIMD_VISIT_SHIFT_OP diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h index 75f8e70203..2bfb009980 100644 --- a/deps/v8/src/compiler/backend/code-generator-impl.h +++ b/deps/v8/src/compiler/backend/code-generator-impl.h @@ -116,6 +116,10 @@ class InstructionOperandConverter { return ToSimd128Register(instr_->Output()); } + Simd128Register TempSimd128Register(size_t index) { + return ToSimd128Register(instr_->TempAt(index)); + } + // -- Conversions for operands ----------------------------------------------- Label* ToLabel(InstructionOperand* op) { @@ -176,20 +180,55 @@ class InstructionOperandConverter { Instruction* instr_; }; -// Eager deoptimization exit. +// Deoptimization exit. class DeoptimizationExit : public ZoneObject { public: - explicit DeoptimizationExit(int deoptimization_id, SourcePosition pos) - : deoptimization_id_(deoptimization_id), pos_(pos) {} - - int deoptimization_id() const { return deoptimization_id_; } - Label* label() { return &label_; } + explicit DeoptimizationExit(SourcePosition pos, BailoutId bailout_id, + int translation_id, int pc_offset, + DeoptimizeKind kind, DeoptimizeReason reason) + : deoptimization_id_(kNoDeoptIndex), + pos_(pos), + bailout_id_(bailout_id), + translation_id_(translation_id), + pc_offset_(pc_offset), + kind_(kind), + reason_(reason), + emitted_(false) {} + + bool has_deoptimization_id() const { + return deoptimization_id_ != kNoDeoptIndex; + } + int deoptimization_id() const { + DCHECK(has_deoptimization_id()); + return deoptimization_id_; + } + void set_deoptimization_id(int deoptimization_id) { + deoptimization_id_ = deoptimization_id; + } SourcePosition pos() const { return pos_; } + Label* label() { return &label_; } + BailoutId bailout_id() const { return bailout_id_; } + int translation_id() const { return translation_id_; } + int pc_offset() const { return pc_offset_; } + DeoptimizeKind kind() const { return kind_; } + DeoptimizeReason reason() const { return reason_; } + // Returns whether the deopt exit has already been emitted. Most deopt exits + // are emitted contiguously at the end of the code, but unconditional deopt + // exits (kArchDeoptimize) may be inlined where they are encountered. + bool emitted() const { return emitted_; } + void set_emitted() { emitted_ = true; } private: - int const deoptimization_id_; + static const int kNoDeoptIndex = kMaxInt16 + 1; + int deoptimization_id_; + const SourcePosition pos_; Label label_; - SourcePosition const pos_; + const BailoutId bailout_id_; + const int translation_id_; + const int pc_offset_; + const DeoptimizeKind kind_; + const DeoptimizeReason reason_; + bool emitted_; }; // Generator for out-of-line code that is emitted after the main code is done. diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc index 9ce92dadaa..e7702bcdf6 100644 --- a/deps/v8/src/compiler/backend/code-generator.cc +++ b/deps/v8/src/compiler/backend/code-generator.cc @@ -47,7 +47,8 @@ CodeGenerator::CodeGenerator( Isolate* isolate, base::Optional<OsrHelper> osr_helper, int start_source_position, JumpOptimizationInfo* jump_opt, PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options, - int32_t builtin_index, std::unique_ptr<AssemblerBuffer> buffer) + int32_t builtin_index, size_t max_unoptimized_frame_height, + std::unique_ptr<AssemblerBuffer> buffer) : zone_(codegen_zone), isolate_(isolate), frame_access_state_(nullptr), @@ -64,9 +65,9 @@ CodeGenerator::CodeGenerator( safepoints_(zone()), handlers_(zone()), deoptimization_exits_(zone()), - deoptimization_states_(zone()), deoptimization_literals_(zone()), translations_(zone()), + max_unoptimized_frame_height_(max_unoptimized_frame_height), caller_registers_saved_(false), jump_tables_(nullptr), ools_(nullptr), @@ -91,6 +92,7 @@ CodeGenerator::CodeGenerator( code_kind == Code::WASM_TO_CAPI_FUNCTION || code_kind == Code::WASM_TO_JS_FUNCTION || code_kind == Code::WASM_INTERPRETER_ENTRY || + code_kind == Code::JS_TO_WASM_FUNCTION || (Builtins::IsBuiltinId(builtin_index) && Builtins::IsWasmRuntimeStub(builtin_index))) { tasm_.set_abort_hard(true); @@ -114,20 +116,22 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) { } CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall( - int deoptimization_id, SourcePosition pos) { + DeoptimizationExit* exit) { + int deoptimization_id = exit->deoptimization_id(); if (deoptimization_id > Deoptimizer::kMaxNumberOfEntries) { return kTooManyDeoptimizationBailouts; } - DeoptimizeKind deopt_kind = GetDeoptimizationKind(deoptimization_id); - DeoptimizeReason deoptimization_reason = - GetDeoptimizationReason(deoptimization_id); + DeoptimizeKind deopt_kind = exit->kind(); + DeoptimizeReason deoptimization_reason = exit->reason(); Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind); if (info()->is_source_positions_enabled()) { - tasm()->RecordDeoptReason(deoptimization_reason, pos, deoptimization_id); + tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(), + deoptimization_id); } tasm()->CallForDeoptimization(deopt_entry, deoptimization_id); + exit->set_emitted(); return kSuccess; } @@ -146,7 +150,7 @@ void CodeGenerator::AssembleCode() { if (info->is_source_positions_enabled()) { AssembleSourcePosition(start_source_position()); } - + offsets_info_.code_start_register_check = tasm()->pc_offset(); // Check that {kJavaScriptCallCodeStartRegister} has been set correctly. if (FLAG_debug_code && (info->code_kind() == Code::OPTIMIZED_FUNCTION || info->code_kind() == Code::BYTECODE_HANDLER)) { @@ -154,6 +158,7 @@ void CodeGenerator::AssembleCode() { AssembleCodeStartRegisterCheck(); } + offsets_info_.deopt_check = tasm()->pc_offset(); // We want to bailout only from JS functions, which are the only ones // that are optimized. if (info->IsOptimizing()) { @@ -162,6 +167,7 @@ void CodeGenerator::AssembleCode() { BailoutIfDeoptimized(); } + offsets_info_.init_poison = tasm()->pc_offset(); InitializeSpeculationPoison(); // Define deoptimization literals for all inlined functions. @@ -191,10 +197,10 @@ void CodeGenerator::AssembleCode() { if (info->trace_turbo_json_enabled()) { block_starts_.assign(instructions()->instruction_blocks().size(), -1); - instr_starts_.assign(instructions()->instructions().size(), -1); + instr_starts_.assign(instructions()->instructions().size(), {}); } - // Assemble instructions in assembly order. + offsets_info_.blocks_start = tasm()->pc_offset(); for (const InstructionBlock* block : instructions()->ao_blocks()) { // Align loop headers on vendor recommended boundaries. if (block->ShouldAlign() && !tasm()->jump_optimization_info()) { @@ -252,6 +258,7 @@ void CodeGenerator::AssembleCode() { } // Assemble all out-of-line code. + offsets_info_.out_of_line_code = tasm()->pc_offset(); if (ools_) { tasm()->RecordComment("-- Out of line code --"); for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) { @@ -266,28 +273,45 @@ void CodeGenerator::AssembleCode() { // The test regress/regress-259 is an example of where we need it. tasm()->nop(); + // For some targets, we must make sure that constant and veneer pools are + // emitted before emitting the deoptimization exits. + PrepareForDeoptimizationExits(static_cast<int>(deoptimization_exits_.size())); + + if (Deoptimizer::kSupportsFixedDeoptExitSize) { + deopt_exit_start_offset_ = tasm()->pc_offset(); + } + // Assemble deoptimization exits. + offsets_info_.deoptimization_exits = tasm()->pc_offset(); int last_updated = 0; for (DeoptimizationExit* exit : deoptimization_exits_) { + if (exit->emitted()) continue; + if (Deoptimizer::kSupportsFixedDeoptExitSize) { + exit->set_deoptimization_id(next_deoptimization_id_++); + } tasm()->bind(exit->label()); - int trampoline_pc = tasm()->pc_offset(); - int deoptimization_id = exit->deoptimization_id(); - DeoptimizationState* ds = deoptimization_states_[deoptimization_id]; - if (ds->kind() == DeoptimizeKind::kLazy) { + // UpdateDeoptimizationInfo expects lazy deopts to be visited in pc_offset + // order, which is always the case since they are added to + // deoptimization_exits_ in that order. + if (exit->kind() == DeoptimizeKind::kLazy) { + int trampoline_pc = tasm()->pc_offset(); last_updated = safepoints()->UpdateDeoptimizationInfo( - ds->pc_offset(), trampoline_pc, last_updated); + exit->pc_offset(), trampoline_pc, last_updated, + exit->deoptimization_id()); } - result_ = AssembleDeoptimizerCall(deoptimization_id, exit->pos()); + result_ = AssembleDeoptimizerCall(exit); if (result_ != kSuccess) return; } + offsets_info_.pools = tasm()->pc_offset(); // TODO(jgruber): Move all inlined metadata generation into a new, // architecture-independent version of FinishCode. Currently, this includes // the safepoint table, handler table, constant pool, and code comments, in // that order. FinishCode(); + offsets_info_.jump_tables = tasm()->pc_offset(); // Emit the jump tables. if (jump_tables_) { tasm()->Align(kSystemPointerSize); @@ -396,12 +420,12 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() { CodeDesc desc; tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_); -#if defined(V8_OS_WIN_X64) +#if defined(V8_OS_WIN64) if (Builtins::IsBuiltinId(info_->builtin_index())) { isolate_->SetBuiltinUnwindData(info_->builtin_index(), tasm()->GetUnwindInfo()); } -#endif +#endif // V8_OS_WIN64 if (unwinding_info_writer_.eh_frame_writer()) { unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc); @@ -473,11 +497,7 @@ bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object, CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock( const InstructionBlock* block) { for (int i = block->code_start(); i < block->code_end(); ++i) { - if (info()->trace_turbo_json_enabled()) { - instr_starts_[i] = tasm()->pc_offset(); - } - Instruction* instr = instructions()->InstructionAt(i); - CodeGenResult result = AssembleInstruction(instr, block); + CodeGenResult result = AssembleInstruction(i, block); if (result != kSuccess) return result; } return kSuccess; @@ -631,7 +651,11 @@ RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch, } CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( - Instruction* instr, const InstructionBlock* block) { + int instruction_index, const InstructionBlock* block) { + Instruction* instr = instructions()->InstructionAt(instruction_index); + if (info()->trace_turbo_json_enabled()) { + instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset(); + } int first_unused_stack_slot; FlagsMode mode = FlagsModeField::decode(instr->opcode()); if (mode != kFlags_trap) { @@ -649,10 +673,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( if (instr->IsJump() && block->must_deconstruct_frame()) { AssembleDeconstructFrame(); } + if (info()->trace_turbo_json_enabled()) { + instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset(); + } // Assemble architecture-specific code for the instruction. CodeGenResult result = AssembleArchInstruction(instr); if (result != kSuccess) return result; + if (info()->trace_turbo_json_enabled()) { + instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset(); + } + FlagsCondition condition = FlagsConditionField::decode(instr->opcode()); switch (mode) { case kFlags_branch: @@ -801,7 +832,7 @@ Handle<PodArray<InliningPosition>> CreateInliningPositions( Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() { OptimizedCompilationInfo* info = this->info(); - int deopt_count = static_cast<int>(deoptimization_states_.size()); + int deopt_count = static_cast<int>(deoptimization_exits_.size()); if (deopt_count == 0 && !info->is_osr()) { return DeoptimizationData::Empty(isolate()); } @@ -816,6 +847,8 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() { Smi::FromInt(static_cast<int>(inlined_function_count_))); data->SetOptimizationId(Smi::FromInt(info->optimization_id())); + data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_)); + if (info->has_shared_info()) { data->SetSharedFunctionInfo(*info->shared_info()); } else { @@ -846,12 +879,13 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() { // Populate deoptimization entries. for (int i = 0; i < deopt_count; i++) { - DeoptimizationState* deoptimization_state = deoptimization_states_[i]; - data->SetBytecodeOffset(i, deoptimization_state->bailout_id()); - CHECK(deoptimization_state); + DeoptimizationExit* deoptimization_exit = deoptimization_exits_[i]; + CHECK_NOT_NULL(deoptimization_exit); + DCHECK_EQ(i, deoptimization_exit->deoptimization_id()); + data->SetBytecodeOffset(i, deoptimization_exit->bailout_id()); data->SetTranslationIndex( - i, Smi::FromInt(deoptimization_state->translation_id())); - data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset())); + i, Smi::FromInt(deoptimization_exit->translation_id())); + data->SetPc(i, Smi::FromInt(deoptimization_exit->pc_offset())); } return data; @@ -885,13 +919,8 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) { FrameStateDescriptor* descriptor = GetDeoptimizationEntry(instr, frame_state_offset).descriptor(); int pc_offset = tasm()->pc_offset(); - int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset, - descriptor->state_combine()); - - DeoptimizationExit* const exit = new (zone()) - DeoptimizationExit(deopt_state_id, current_source_position_); - deoptimization_exits_.push_back(exit); - safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id); + BuildTranslation(instr, pc_offset, frame_state_offset, + descriptor->state_combine()); } } @@ -911,20 +940,6 @@ DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry( return instructions()->GetDeoptimizationEntry(state_id); } -DeoptimizeKind CodeGenerator::GetDeoptimizationKind( - int deoptimization_id) const { - size_t const index = static_cast<size_t>(deoptimization_id); - DCHECK_LT(index, deoptimization_states_.size()); - return deoptimization_states_[index]->kind(); -} - -DeoptimizeReason CodeGenerator::GetDeoptimizationReason( - int deoptimization_id) const { - size_t const index = static_cast<size_t>(deoptimization_id); - DCHECK_LT(index, deoptimization_states_.size()); - return deoptimization_states_[index]->reason(); -} - void CodeGenerator::TranslateStateValueDescriptor( StateValueDescriptor* desc, StateValueList* nested, Translation* translation, InstructionOperandIterator* iter) { @@ -996,8 +1011,12 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor( } shared_info = info()->shared_info(); } - int shared_info_id = + + const BailoutId bailout_id = descriptor->bailout_id(); + const int shared_info_id = DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info)); + const unsigned int height = + static_cast<unsigned int>(descriptor->GetHeight()); switch (descriptor->type()) { case FrameStateType::kInterpretedFunction: { @@ -1007,45 +1026,30 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor( return_offset = static_cast<int>(state_combine.GetOffsetToPokeAt()); return_count = static_cast<int>(iter->instruction()->OutputCount()); } - translation->BeginInterpretedFrame( - descriptor->bailout_id(), shared_info_id, - static_cast<unsigned int>(descriptor->locals_count() + 1), - return_offset, return_count); + translation->BeginInterpretedFrame(bailout_id, shared_info_id, height, + return_offset, return_count); break; } case FrameStateType::kArgumentsAdaptor: - translation->BeginArgumentsAdaptorFrame( - shared_info_id, - static_cast<unsigned int>(descriptor->parameters_count())); + translation->BeginArgumentsAdaptorFrame(shared_info_id, height); break; case FrameStateType::kConstructStub: - DCHECK(descriptor->bailout_id().IsValidForConstructStub()); - translation->BeginConstructStubFrame( - descriptor->bailout_id(), shared_info_id, - static_cast<unsigned int>(descriptor->parameters_count() + 1)); + DCHECK(bailout_id.IsValidForConstructStub()); + translation->BeginConstructStubFrame(bailout_id, shared_info_id, height); break; case FrameStateType::kBuiltinContinuation: { - BailoutId bailout_id = descriptor->bailout_id(); - int parameter_count = - static_cast<unsigned int>(descriptor->parameters_count()); translation->BeginBuiltinContinuationFrame(bailout_id, shared_info_id, - parameter_count); + height); break; } case FrameStateType::kJavaScriptBuiltinContinuation: { - BailoutId bailout_id = descriptor->bailout_id(); - int parameter_count = - static_cast<unsigned int>(descriptor->parameters_count()); translation->BeginJavaScriptBuiltinContinuationFrame( - bailout_id, shared_info_id, parameter_count); + bailout_id, shared_info_id, height); break; } case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: { - BailoutId bailout_id = descriptor->bailout_id(); - int parameter_count = - static_cast<unsigned int>(descriptor->parameters_count()); translation->BeginJavaScriptBuiltinContinuationWithCatchFrame( - bailout_id, shared_info_id, parameter_count); + bailout_id, shared_info_id, height); break; } } @@ -1053,9 +1057,9 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor( TranslateFrameStateDescriptorOperands(descriptor, iter, translation); } -int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset, - size_t frame_state_offset, - OutputFrameStateCombine state_combine) { +DeoptimizationExit* CodeGenerator::BuildTranslation( + Instruction* instr, int pc_offset, size_t frame_state_offset, + OutputFrameStateCombine state_combine) { DeoptimizationEntry const& entry = GetDeoptimizationEntry(instr, frame_state_offset); FrameStateDescriptor* const descriptor = entry.descriptor(); @@ -1068,21 +1072,24 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset, update_feedback_count, zone()); if (entry.feedback().IsValid()) { DeoptimizationLiteral literal = - DeoptimizationLiteral(entry.feedback().vector()); + DeoptimizationLiteral(entry.feedback().vector); int literal_id = DefineDeoptimizationLiteral(literal); - translation.AddUpdateFeedback(literal_id, entry.feedback().slot().ToInt()); + translation.AddUpdateFeedback(literal_id, entry.feedback().slot.ToInt()); } InstructionOperandIterator iter(instr, frame_state_offset); BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation, state_combine); - int deoptimization_id = static_cast<int>(deoptimization_states_.size()); + DeoptimizationExit* const exit = new (zone()) DeoptimizationExit( + current_source_position_, descriptor->bailout_id(), translation.index(), + pc_offset, entry.kind(), entry.reason()); - deoptimization_states_.push_back(new (zone()) DeoptimizationState( - descriptor->bailout_id(), translation.index(), pc_offset, entry.kind(), - entry.reason())); + if (!Deoptimizer::kSupportsFixedDeoptExitSize) { + exit->set_deoptimization_id(next_deoptimization_id_++); + } - return deoptimization_id; + deoptimization_exits_.push_back(exit); + return exit; } void CodeGenerator::AddTranslationForOperand(Translation* translation, @@ -1236,13 +1243,8 @@ void CodeGenerator::MarkLazyDeoptSite() { DeoptimizationExit* CodeGenerator::AddDeoptimizationExit( Instruction* instr, size_t frame_state_offset) { - int const deoptimization_id = BuildTranslation( - instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore()); - - DeoptimizationExit* const exit = new (zone()) - DeoptimizationExit(deoptimization_id, current_source_position_); - deoptimization_exits_.push_back(exit); - return exit; + return BuildTranslation(instr, -1, frame_state_offset, + OutputFrameStateCombine::Ignore()); } void CodeGenerator::InitializeSpeculationPoison() { diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h index 74dd90c5de..e9ebf67590 100644 --- a/deps/v8/src/compiler/backend/code-generator.h +++ b/deps/v8/src/compiler/backend/code-generator.h @@ -85,6 +85,25 @@ class DeoptimizationLiteral { const StringConstantBase* string_ = nullptr; }; +// These structs hold pc offsets for generated instructions and is only used +// when tracing for turbolizer is enabled. +struct TurbolizerCodeOffsetsInfo { + int code_start_register_check = -1; + int deopt_check = -1; + int init_poison = -1; + int blocks_start = -1; + int out_of_line_code = -1; + int deoptimization_exits = -1; + int pools = -1; + int jump_tables = -1; +}; + +struct TurbolizerInstructionStartInfo { + int gap_pc_offset = -1; + int arch_instr_pc_offset = -1; + int condition_pc_offset = -1; +}; + // Generates native code for a sequence of instructions. class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { public: @@ -96,6 +115,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { JumpOptimizationInfo* jump_opt, PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options, int32_t builtin_index, + size_t max_unoptimized_frame_height, std::unique_ptr<AssemblerBuffer> = {}); // Generate native code. After calling AssembleCode, call FinalizeCode to @@ -139,7 +159,13 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { size_t GetHandlerTableOffset() const { return handler_table_offset_; } const ZoneVector<int>& block_starts() const { return block_starts_; } - const ZoneVector<int>& instr_starts() const { return instr_starts_; } + const ZoneVector<TurbolizerInstructionStartInfo>& instr_starts() const { + return instr_starts_; + } + + const TurbolizerCodeOffsetsInfo& offsets_info() const { + return offsets_info_; + } static constexpr int kBinarySearchSwitchMinimalCases = 4; @@ -182,7 +208,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { void GenerateSpeculationPoisonFromCodeStartRegister(); // Assemble code for the specified instruction. - CodeGenResult AssembleInstruction(Instruction* instr, + CodeGenResult AssembleInstruction(int instruction_index, const InstructionBlock* block); void AssembleGaps(Instruction* instr); @@ -199,8 +225,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { // Determines how to call helper stubs depending on the code kind. StubCallMode DetermineStubCallMode() const; - CodeGenResult AssembleDeoptimizerCall(int deoptimization_id, - SourcePosition pos); + CodeGenResult AssembleDeoptimizerCall(DeoptimizationExit* exit); // =========================================================================== // ============= Architecture-specific code generation methods. ============== @@ -342,11 +367,9 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { int DefineDeoptimizationLiteral(DeoptimizationLiteral literal); DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr, size_t frame_state_offset); - DeoptimizeKind GetDeoptimizationKind(int deoptimization_id) const; - DeoptimizeReason GetDeoptimizationReason(int deoptimization_id) const; - int BuildTranslation(Instruction* instr, int pc_offset, - size_t frame_state_offset, - OutputFrameStateCombine state_combine); + DeoptimizationExit* BuildTranslation(Instruction* instr, int pc_offset, + size_t frame_state_offset, + OutputFrameStateCombine state_combine); void BuildTranslationForFrameStateDescriptor( FrameStateDescriptor* descriptor, InstructionOperandIterator* iter, Translation* translation, OutputFrameStateCombine state_combine); @@ -361,35 +384,12 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { InstructionOperand* op, MachineType type); void MarkLazyDeoptSite(); + void PrepareForDeoptimizationExits(int deopt_count); DeoptimizationExit* AddDeoptimizationExit(Instruction* instr, size_t frame_state_offset); // =========================================================================== - class DeoptimizationState final : public ZoneObject { - public: - DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset, - DeoptimizeKind kind, DeoptimizeReason reason) - : bailout_id_(bailout_id), - translation_id_(translation_id), - pc_offset_(pc_offset), - kind_(kind), - reason_(reason) {} - - BailoutId bailout_id() const { return bailout_id_; } - int translation_id() const { return translation_id_; } - int pc_offset() const { return pc_offset_; } - DeoptimizeKind kind() const { return kind_; } - DeoptimizeReason reason() const { return reason_; } - - private: - BailoutId bailout_id_; - int translation_id_; - int pc_offset_; - DeoptimizeKind kind_; - DeoptimizeReason reason_; - }; - struct HandlerInfo { Label* handler; int pc_offset; @@ -414,14 +414,19 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { GapResolver resolver_; SafepointTableBuilder safepoints_; ZoneVector<HandlerInfo> handlers_; + int next_deoptimization_id_ = 0; + int deopt_exit_start_offset_ = 0; ZoneDeque<DeoptimizationExit*> deoptimization_exits_; - ZoneDeque<DeoptimizationState*> deoptimization_states_; ZoneDeque<DeoptimizationLiteral> deoptimization_literals_; size_t inlined_function_count_ = 0; TranslationBuffer translations_; int handler_table_offset_ = 0; int last_lazy_deopt_pc_ = 0; + // The maximal combined height of all frames produced upon deoptimization. + // Applied as an offset to the first stack check of an optimized function. + const size_t max_unoptimized_frame_height_; + // kArchCallCFunction could be reached either: // kArchCallCFunction; // or: @@ -444,7 +449,8 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { CodeGenResult result_; PoisoningMitigationLevel poisoning_level_; ZoneVector<int> block_starts_; - ZoneVector<int> instr_starts_; + TurbolizerCodeOffsetsInfo offsets_info_; + ZoneVector<TurbolizerInstructionStartInfo> instr_starts_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/backend/frame-elider.cc b/deps/v8/src/compiler/backend/frame-elider.cc index 2167d0abaa..064501b097 100644 --- a/deps/v8/src/compiler/backend/frame-elider.cc +++ b/deps/v8/src/compiler/backend/frame-elider.cc @@ -24,7 +24,7 @@ void FrameElider::MarkBlocks() { for (int i = block->code_start(); i < block->code_end(); ++i) { const Instruction* instr = InstructionAt(i); if (instr->IsCall() || instr->IsDeoptimizeCall() || - instr->arch_opcode() == ArchOpcode::kArchStackPointer || + instr->arch_opcode() == ArchOpcode::kArchStackPointerGreaterThan || instr->arch_opcode() == ArchOpcode::kArchFramePointer) { block->mark_needs_frame(); break; diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc index ed4be7a47c..4542da643b 100644 --- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -165,6 +165,11 @@ class IA32OperandConverter : public InstructionOperandConverter { Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset))); return Operand(ctant.ToInt32(), ctant.rmode()); } + case kMode_Root: { + Register base = kRootRegister; + int32_t disp = InputInt32(NextOffset(offset)); + return Operand(base, disp); + } case kMode_None: UNREACHABLE(); } @@ -205,10 +210,18 @@ class IA32OperandConverter : public InstructionOperandConverter { namespace { +bool HasAddressingMode(Instruction* instr) { + return instr->addressing_mode() != kMode_None; +} + bool HasImmediateInput(Instruction* instr, size_t index) { return instr->InputAt(index)->IsImmediate(); } +bool HasRegisterInput(Instruction* instr, size_t index) { + return instr->InputAt(index)->IsRegister(); +} + class OutOfLineLoadFloat32NaN final : public OutOfLineCode { public: OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result) @@ -256,6 +269,8 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode { // Just encode the stub index. This will be patched when the code // is added to the native module and copied into wasm code space. __ wasm_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); + } else if (tasm()->options().inline_offheap_trampolines) { + __ CallBuiltin(Builtins::kDoubleToI); } else { __ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET); } @@ -326,31 +341,31 @@ class OutOfLineRecordWrite final : public OutOfLineCode { } // namespace -#define ASSEMBLE_COMPARE(asm_instr) \ - do { \ - if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \ - size_t index = 0; \ - Operand left = i.MemoryOperand(&index); \ - if (HasImmediateInput(instr, index)) { \ - __ asm_instr(left, i.InputImmediate(index)); \ - } else { \ - __ asm_instr(left, i.InputRegister(index)); \ - } \ - } else { \ - if (HasImmediateInput(instr, 1)) { \ - if (instr->InputAt(0)->IsRegister()) { \ - __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \ - } else { \ - __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \ - } \ - } else { \ - if (instr->InputAt(1)->IsRegister()) { \ - __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \ - } else { \ - __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \ - } \ - } \ - } \ +#define ASSEMBLE_COMPARE(asm_instr) \ + do { \ + if (HasAddressingMode(instr)) { \ + size_t index = 0; \ + Operand left = i.MemoryOperand(&index); \ + if (HasImmediateInput(instr, index)) { \ + __ asm_instr(left, i.InputImmediate(index)); \ + } else { \ + __ asm_instr(left, i.InputRegister(index)); \ + } \ + } else { \ + if (HasImmediateInput(instr, 1)) { \ + if (HasRegisterInput(instr, 0)) { \ + __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \ + } else { \ + __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \ + } \ + } else { \ + if (HasRegisterInput(instr, 1)) { \ + __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \ + } else { \ + __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \ + } \ + } \ + } \ } while (0) #define ASSEMBLE_IEEE754_BINOP(name) \ @@ -382,19 +397,19 @@ class OutOfLineRecordWrite final : public OutOfLineCode { __ add(esp, Immediate(kDoubleSize)); \ } while (false) -#define ASSEMBLE_BINOP(asm_instr) \ - do { \ - if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \ - size_t index = 1; \ - Operand right = i.MemoryOperand(&index); \ - __ asm_instr(i.InputRegister(0), right); \ - } else { \ - if (HasImmediateInput(instr, 1)) { \ - __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \ - } else { \ - __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \ - } \ - } \ +#define ASSEMBLE_BINOP(asm_instr) \ + do { \ + if (HasAddressingMode(instr)) { \ + size_t index = 1; \ + Operand right = i.MemoryOperand(&index); \ + __ asm_instr(i.InputRegister(0), right); \ + } else { \ + if (HasImmediateInput(instr, 1)) { \ + __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \ + } else { \ + __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \ + } \ + } \ } while (0) #define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \ @@ -431,9 +446,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode { #define ASSEMBLE_MOVX(mov_instr) \ do { \ - if (instr->addressing_mode() != kMode_None) { \ + if (HasAddressingMode(instr)) { \ __ mov_instr(i.OutputRegister(), i.MemoryOperand()); \ - } else if (instr->InputAt(0)->IsRegister()) { \ + } else if (HasRegisterInput(instr, 0)) { \ __ mov_instr(i.OutputRegister(), i.InputRegister(0)); \ } else { \ __ mov_instr(i.OutputRegister(), i.InputOperand(0)); \ @@ -905,19 +920,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // don't emit code for nops. break; case kArchDeoptimize: { - int deopt_state_id = + DeoptimizationExit* exit = BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); - CodeGenResult result = - AssembleDeoptimizerCall(deopt_state_id, current_source_position_); + CodeGenResult result = AssembleDeoptimizerCall(exit); if (result != kSuccess) return result; break; } case kArchRet: AssembleReturn(instr->InputAt(0)); break; - case kArchStackPointer: - __ mov(i.OutputRegister(), esp); - break; case kArchFramePointer: __ mov(i.OutputRegister(), ebp); break; @@ -928,6 +939,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ mov(i.OutputRegister(), ebp); } break; + case kArchStackPointerGreaterThan: { + constexpr size_t kValueIndex = 0; + if (HasAddressingMode(instr)) { + __ cmp(esp, i.MemoryOperand(kValueIndex)); + } else { + __ cmp(esp, i.InputRegister(kValueIndex)); + } + break; + } case kArchTruncateDoubleToI: { auto result = i.OutputRegister(); auto input = i.InputDoubleRegister(0); @@ -1115,7 +1135,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // i.InputRegister(2) ... right low word. // i.InputRegister(3) ... right high word. bool use_temp = false; - if ((instr->InputAt(1)->IsRegister() && + if ((HasRegisterInput(instr, 1) && i.OutputRegister(0).code() == i.InputRegister(1).code()) || i.OutputRegister(0).code() == i.InputRegister(3).code()) { // We cannot write to the output register directly, because it would @@ -1140,7 +1160,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // i.InputRegister(2) ... right low word. // i.InputRegister(3) ... right high word. bool use_temp = false; - if ((instr->InputAt(1)->IsRegister() && + if ((HasRegisterInput(instr, 1) && i.OutputRegister(0).code() == i.InputRegister(1).code()) || i.OutputRegister(0).code() == i.InputRegister(3).code()) { // We cannot write to the output register directly, because it would @@ -1671,7 +1691,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kIA32BitcastIF: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ movd(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ movss(i.OutputDoubleRegister(), i.InputOperand(0)); @@ -1762,7 +1782,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( frame_access_state()->IncreaseSPDelta(kSimd128Size / kSystemPointerSize); break; case kIA32Push: - if (AddressingModeField::decode(instr->opcode()) != kMode_None) { + if (HasAddressingMode(instr)) { size_t index = 0; Operand operand = i.MemoryOperand(&index); __ push(operand); @@ -1984,6 +2004,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputOperand(1)); break; } + case kSSEF32x4Div: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ divps(i.OutputSimd128Register(), i.InputOperand(1)); + break; + } + case kAVXF32x4Div: { + CpuFeatureScope avx_scope(tasm(), AVX); + __ vdivps(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputOperand(1)); + break; + } case kSSEF32x4Min: { XMMRegister src1 = i.InputSimd128Register(1), dst = i.OutputSimd128Register(); @@ -2180,24 +2211,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI32x4Shl: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ pslld(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ pslld(i.OutputSimd128Register(), tmp); break; } case kAVXI32x4Shl: { CpuFeatureScope avx_scope(tasm(), AVX); - __ vpslld(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ vpslld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kSSEI32x4ShrS: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ psrad(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ psrad(i.OutputSimd128Register(), tmp); break; } case kAVXI32x4ShrS: { CpuFeatureScope avx_scope(tasm(), AVX); - __ vpsrad(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ vpsrad(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kSSEI32x4Add: { @@ -2329,7 +2366,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); CpuFeatureScope sse_scope(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); - XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp = i.TempSimd128Register(0); // NAN->0, negative->0 __ pxor(kScratchDoubleReg, kScratchDoubleReg); __ maxps(dst, kScratchDoubleReg); @@ -2357,7 +2394,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister dst = i.OutputSimd128Register(); - XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp = i.TempSimd128Register(0); // NAN->0, negative->0 __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); __ vmaxps(dst, dst, kScratchDoubleReg); @@ -2392,13 +2429,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI32x4ShrU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ psrld(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ psrld(i.OutputSimd128Register(), tmp); break; } case kAVXI32x4ShrU: { CpuFeatureScope avx_scope(tasm(), AVX); - __ vpsrld(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ vpsrld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kSSEI32x4MinU: { @@ -2512,24 +2552,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI16x8Shl: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ psllw(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ psllw(i.OutputSimd128Register(), tmp); break; } case kAVXI16x8Shl: { CpuFeatureScope avx_scope(tasm(), AVX); - __ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kSSEI16x8ShrS: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ psraw(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ psraw(i.OutputSimd128Register(), tmp); break; } case kAVXI16x8ShrS: { CpuFeatureScope avx_scope(tasm(), AVX); - __ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kSSEI16x8SConvertI32x4: { @@ -2698,13 +2744,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI16x8ShrU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ psrlw(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ psrlw(i.OutputSimd128Register(), tmp); break; } case kAVXI16x8ShrU: { CpuFeatureScope avx_scope(tasm(), AVX); - __ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movd(tmp, i.InputRegister(1)); + __ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kSSEI16x8UConvertI32x4: { @@ -2867,53 +2916,54 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kSSEI8x16Shl: { XMMRegister dst = i.OutputSimd128Register(); DCHECK_EQ(dst, i.InputSimd128Register(0)); - int8_t shift = i.InputInt8(1) & 0x7; - if (shift < 4) { - // For small shifts, doubling is faster. - for (int i = 0; i < shift; ++i) { - __ paddb(dst, dst); - } - } else { - // Mask off the unwanted bits before word-shifting. - __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg); - __ psrlw(kScratchDoubleReg, 8 + shift); - __ packuswb(kScratchDoubleReg, kScratchDoubleReg); - __ pand(dst, kScratchDoubleReg); - __ psllw(dst, shift); - } + Register shift = i.InputRegister(1); + Register tmp = i.ToRegister(instr->TempAt(0)); + XMMRegister tmp_simd = i.TempSimd128Register(1); + // Mask off the unwanted bits before word-shifting. + __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg); + __ mov(tmp, shift); + __ add(tmp, Immediate(8)); + __ movd(tmp_simd, tmp); + __ psrlw(kScratchDoubleReg, tmp_simd); + __ packuswb(kScratchDoubleReg, kScratchDoubleReg); + __ pand(dst, kScratchDoubleReg); + __ movd(tmp_simd, shift); + __ psllw(dst, tmp_simd); break; } case kAVXI8x16Shl: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(0); - int8_t shift = i.InputInt8(1) & 0x7; - if (shift < 4) { - // For small shifts, doubling is faster. - for (int i = 0; i < shift; ++i) { - __ vpaddb(dst, src, src); - src = dst; - } - } else { - // Mask off the unwanted bits before word-shifting. - __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); - __ vpsrlw(kScratchDoubleReg, kScratchDoubleReg, 8 + shift); - __ vpackuswb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); - __ vpand(dst, src, kScratchDoubleReg); - __ vpsllw(dst, dst, shift); - } + Register shift = i.InputRegister(1); + Register tmp = i.ToRegister(instr->TempAt(0)); + XMMRegister tmp_simd = i.TempSimd128Register(1); + // Mask off the unwanted bits before word-shifting. + __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); + __ mov(tmp, shift); + __ add(tmp, Immediate(8)); + __ movd(tmp_simd, tmp); + __ vpsrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd); + __ vpackuswb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); + __ vpand(dst, src, kScratchDoubleReg); + __ movd(tmp_simd, shift); + __ vpsllw(dst, dst, tmp_simd); break; } case kIA32I8x16ShrS: { XMMRegister dst = i.OutputSimd128Register(); - XMMRegister src = i.InputSimd128Register(0); - int8_t shift = i.InputInt8(1) & 0x7; + DCHECK_EQ(dst, i.InputSimd128Register(0)); + Register tmp = i.ToRegister(instr->TempAt(0)); + XMMRegister tmp_simd = i.TempSimd128Register(1); // Unpack the bytes into words, do arithmetic shifts, and repack. - __ Punpckhbw(kScratchDoubleReg, src); - __ Punpcklbw(dst, src); - __ Psraw(kScratchDoubleReg, 8 + shift); - __ Psraw(dst, 8 + shift); - __ Packsswb(dst, kScratchDoubleReg); + __ punpckhbw(kScratchDoubleReg, dst); + __ punpcklbw(dst, dst); + __ mov(tmp, i.InputRegister(1)); + __ add(tmp, Immediate(8)); + __ movd(tmp_simd, tmp); + __ psraw(kScratchDoubleReg, tmp_simd); + __ psraw(dst, tmp_simd); + __ packsswb(dst, kScratchDoubleReg); break; } case kSSEI8x16Add: { @@ -2964,7 +3014,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister dst = i.OutputSimd128Register(); DCHECK_EQ(dst, i.InputSimd128Register(0)); XMMRegister right = i.InputSimd128Register(1); - XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp = i.TempSimd128Register(0); // I16x8 view of I8x16 // left = AAaa AAaa ... AAaa AAaa @@ -3004,7 +3054,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister dst = i.OutputSimd128Register(); XMMRegister left = i.InputSimd128Register(0); XMMRegister right = i.InputSimd128Register(1); - XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp = i.TempSimd128Register(0); // I16x8 view of I8x16 // left = AAaa AAaa ... AAaa AAaa @@ -3165,15 +3215,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kIA32I8x16ShrU: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister dst = i.OutputSimd128Register(); - XMMRegister src = i.InputSimd128Register(0); - int8_t shift = i.InputInt8(1) & 0x7; + Register tmp = i.ToRegister(instr->TempAt(0)); + XMMRegister tmp_simd = i.TempSimd128Register(1); // Unpack the bytes into words, do logical shifts, and repack. - __ Punpckhbw(kScratchDoubleReg, src); - __ Punpcklbw(dst, src); - __ Psrlw(kScratchDoubleReg, 8 + shift); - __ Psrlw(dst, 8 + shift); - __ Packuswb(dst, kScratchDoubleReg); + __ punpckhbw(kScratchDoubleReg, dst); + __ punpcklbw(dst, dst); + __ mov(tmp, i.InputRegister(1)); + __ add(tmp, Immediate(8)); + __ movd(tmp_simd, tmp); + __ psrlw(kScratchDoubleReg, tmp_simd); + __ psrlw(dst, tmp_simd); + __ packuswb(dst, kScratchDoubleReg); break; } case kSSEI8x16MinU: { @@ -3693,10 +3747,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb); break; } - case kIA32StackCheck: { - __ CompareStackLimit(esp); - break; - } case kIA32Word32AtomicPairLoad: { XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0)); __ movq(tmp, i.MemoryOperand()); @@ -4402,6 +4452,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { void CodeGenerator::FinishCode() {} +void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} + void CodeGenerator::AssembleMove(InstructionOperand* source, InstructionOperand* destination) { IA32OperandConverter g(this, nullptr); diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h index 56dea82fe2..7530c716b8 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h +++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h @@ -116,7 +116,6 @@ namespace compiler { V(IA32PushSimd128) \ V(IA32Poke) \ V(IA32Peek) \ - V(IA32StackCheck) \ V(SSEF32x4Splat) \ V(AVXF32x4Splat) \ V(SSEF32x4ExtractLane) \ @@ -140,6 +139,8 @@ namespace compiler { V(AVXF32x4Sub) \ V(SSEF32x4Mul) \ V(AVXF32x4Mul) \ + V(SSEF32x4Div) \ + V(AVXF32x4Div) \ V(SSEF32x4Min) \ V(AVXF32x4Min) \ V(SSEF32x4Max) \ @@ -394,7 +395,8 @@ namespace compiler { V(M2I) /* [ %r2*2 + K] */ \ V(M4I) /* [ %r2*4 + K] */ \ V(M8I) /* [ %r2*8 + K] */ \ - V(MI) /* [ K] */ + V(MI) /* [ K] */ \ + V(Root) /* [%root + K] */ } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc index 15f69b991c..c2097a6691 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc @@ -120,6 +120,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kAVXF32x4Sub: case kSSEF32x4Mul: case kAVXF32x4Mul: + case kSSEF32x4Div: + case kAVXF32x4Div: case kSSEF32x4Min: case kAVXF32x4Min: case kSSEF32x4Max: @@ -356,7 +358,6 @@ int InstructionScheduler::GetTargetInstructionFlags( // Moves are used for memory load/store operations. return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect; - case kIA32StackCheck: case kIA32Peek: return kIsLoadOperation; diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc index e1fc66b4ba..ebef39a93a 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -152,6 +152,21 @@ class IA32OperandGenerator final : public OperandGenerator { AddressingMode GetEffectiveAddressMemoryOperand(Node* node, InstructionOperand inputs[], size_t* input_count) { + { + LoadMatcher<ExternalReferenceMatcher> m(node); + if (m.index().HasValue() && m.object().HasValue() && + selector()->CanAddressRelativeToRootsRegister(m.object().Value())) { + ptrdiff_t const delta = + m.index().Value() + + TurboAssemblerBase::RootRegisterOffsetForExternalReference( + selector()->isolate(), m.object().Value()); + if (is_int32(delta)) { + inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta)); + return kMode_Root; + } + } + } + BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll); DCHECK(m.matches()); if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) { @@ -261,6 +276,31 @@ void VisitRRISimd(InstructionSelector* selector, Node* node, } } +void VisitRROSimdShift(InstructionSelector* selector, Node* node, + ArchOpcode avx_opcode, ArchOpcode sse_opcode) { + IA32OperandGenerator g(selector); + InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0)); + InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1)); + InstructionOperand temps[] = {g.TempSimd128Register()}; + if (selector->IsSupported(AVX)) { + selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1, + arraysize(temps), temps); + } else { + selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1, + arraysize(temps), temps); + } +} + +void VisitRROI8x16SimdRightShift(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + IA32OperandGenerator g(selector); + InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0)); + InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1)); + InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; + selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1, + arraysize(temps), temps); +} + } // namespace void InstructionSelector::VisitStackSlot(Node* node) { @@ -344,7 +384,8 @@ void InstructionSelector::VisitStore(Node* node) { WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); MachineRepresentation rep = store_rep.representation(); - if (write_barrier_kind != kNoWriteBarrier) { + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!FLAG_disable_write_barriers)) { DCHECK(CanBeTaggedPointer(rep)); AddressingMode addressing_mode; InstructionOperand inputs[] = { @@ -516,6 +557,35 @@ void InstructionSelector::VisitWord32Xor(Node* node) { } } +void InstructionSelector::VisitStackPointerGreaterThan( + Node* node, FlagsContinuation* cont) { + Node* const value = node->InputAt(0); + InstructionCode opcode = kArchStackPointerGreaterThan; + + DCHECK(cont->IsBranch()); + const int effect_level = + GetEffectLevel(cont->true_block()->PredecessorAt(0)->control_input()); + + IA32OperandGenerator g(this); + if (g.CanBeMemoryOperand(kIA32Cmp, node, value, effect_level)) { + DCHECK_EQ(IrOpcode::kLoad, value->opcode()); + + // GetEffectiveAddressMemoryOperand can create at most 3 inputs. + static constexpr int kMaxInputCount = 3; + + size_t input_count = 0; + InstructionOperand inputs[kMaxInputCount]; + AddressingMode addressing_mode = + g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count); + opcode |= AddressingModeField::encode(addressing_mode); + DCHECK_LE(input_count, kMaxInputCount); + + EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont); + } else { + EmitWithContinuation(opcode, g.UseRegister(value), cont); + } +} + // Shared routine for multiple shift operations. static inline void VisitShift(InstructionSelector* selector, Node* node, ArchOpcode opcode) { @@ -1243,30 +1313,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, void VisitWordCompare(InstructionSelector* selector, Node* node, FlagsContinuation* cont) { - if (selector->isolate() != nullptr) { - StackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> m( - selector->isolate(), node); - if (m.Matched()) { - // Compare(Load(js_stack_limit), LoadStackPointer) - if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute(); - InstructionCode opcode = cont->Encode(kIA32StackCheck); - CHECK(cont->IsBranch()); - selector->EmitWithContinuation(opcode, cont); - return; - } - } - WasmStackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> wasm_m( - node); - if (wasm_m.Matched()) { - // This is a wasm stack check. By structure, we know that we can use the - // stack pointer directly, as wasm code does not modify the stack at points - // where stack checks are performed. - Node* left = node->InputAt(0); - LocationOperand esp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER, - InstructionSequence::DefaultRepresentation(), - RegisterCode::kRegCode_esp); - return VisitCompareWithMemoryOperand(selector, kIA32Cmp, left, esp, cont); - } VisitWordCompare(selector, node, kIA32Cmp, cont); } @@ -1433,6 +1479,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, return VisitWordCompare(this, value, cont); case IrOpcode::kWord32And: return VisitWordCompare(this, value, kIA32Test, cont); + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); default: break; } @@ -1842,6 +1891,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { V(F32x4AddHoriz) \ V(F32x4Sub) \ V(F32x4Mul) \ + V(F32x4Div) \ V(F32x4Min) \ V(F32x4Max) \ V(F32x4Eq) \ @@ -1939,8 +1989,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { V(I32x4ShrU) \ V(I16x8Shl) \ V(I16x8ShrS) \ - V(I16x8ShrU) \ - V(I8x16Shl) + V(I16x8ShrU) #define SIMD_I8X16_RIGHT_SHIFT_OPCODES(V) \ V(I8x16ShrS) \ @@ -2037,22 +2086,21 @@ VISIT_SIMD_REPLACE_LANE(F32x4) #undef VISIT_SIMD_REPLACE_LANE #undef SIMD_INT_TYPES -#define VISIT_SIMD_SHIFT(Opcode) \ - void InstructionSelector::Visit##Opcode(Node* node) { \ - VisitRRISimd(this, node, kAVX##Opcode, kSSE##Opcode); \ +#define VISIT_SIMD_SHIFT(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + VisitRROSimdShift(this, node, kAVX##Opcode, kSSE##Opcode); \ } SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT) #undef VISIT_SIMD_SHIFT #undef SIMD_SHIFT_OPCODES -#define VISIT_SIMD_I8X16_RIGHT_SHIFT(Op) \ - void InstructionSelector::Visit##Op(Node* node) { \ - VisitRRISimd(this, node, kIA32##Op); \ +#define VISIT_SIMD_I8x16_RIGHT_SHIFT(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + VisitRROI8x16SimdRightShift(this, node, kIA32##Opcode); \ } - -SIMD_I8X16_RIGHT_SHIFT_OPCODES(VISIT_SIMD_I8X16_RIGHT_SHIFT) +SIMD_I8X16_RIGHT_SHIFT_OPCODES(VISIT_SIMD_I8x16_RIGHT_SHIFT) #undef SIMD_I8X16_RIGHT_SHIFT_OPCODES -#undef VISIT_SIMD_I8X16_RIGHT_SHIFT +#undef VISIT_SIMD_I8x16_RIGHT_SHIFT #define VISIT_SIMD_UNOP(Opcode) \ void InstructionSelector::Visit##Opcode(Node* node) { \ @@ -2123,6 +2171,20 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) { VisitPack(this, node, kAVXI8x16UConvertI16x8, kSSEI8x16UConvertI16x8); } +void InstructionSelector::VisitI8x16Shl(Node* node) { + IA32OperandGenerator g(this); + InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0)); + InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1)); + InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; + if (IsSupported(AVX)) { + Emit(kAVXI8x16Shl, g.DefineAsRegister(node), operand0, operand1, + arraysize(temps), temps); + } else { + Emit(kSSEI8x16Shl, g.DefineSameAsFirst(node), operand0, operand1, + arraysize(temps), temps); + } +} + void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { UNREACHABLE(); } @@ -2259,13 +2321,13 @@ static const ShuffleEntry arch_shuffles[] = { {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kSSES8x8Reverse, kAVXS8x8Reverse, - false, - false}, + true, + true}, {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kSSES8x4Reverse, kAVXS8x4Reverse, - false, - false}, + true, + true}, {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kSSES8x2Reverse, kAVXS8x2Reverse, diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h index 1085de2196..589c1bda3b 100644 --- a/deps/v8/src/compiler/backend/instruction-codes.h +++ b/deps/v8/src/compiler/backend/instruction-codes.h @@ -88,13 +88,13 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode( V(ArchThrowTerminator) \ V(ArchDeoptimize) \ V(ArchRet) \ - V(ArchStackPointer) \ V(ArchFramePointer) \ V(ArchParentFramePointer) \ V(ArchTruncateDoubleToI) \ V(ArchStoreWithWriteBarrier) \ V(ArchStackSlot) \ V(ArchWordPoisonOnSpeculation) \ + V(ArchStackPointerGreaterThan) \ V(Word32AtomicLoadInt8) \ V(Word32AtomicLoadUint8) \ V(Word32AtomicLoadInt16) \ @@ -238,6 +238,9 @@ enum FlagsCondition { kNegative }; +static constexpr FlagsCondition kStackPointerGreaterThanCondition = + kUnsignedGreaterThan; + inline FlagsCondition NegateFlagsCondition(FlagsCondition condition) { return static_cast<FlagsCondition>(condition ^ 1); } diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc index 538af71bb4..dc66813740 100644 --- a/deps/v8/src/compiler/backend/instruction-scheduler.cc +++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc @@ -275,9 +275,10 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { case kIeee754Float64Tanh: return kNoOpcodeFlags; - case kArchStackPointer: - // ArchStackPointer instruction loads the current stack pointer value and - // must not be reordered with instruction with side effects. + case kArchStackPointerGreaterThan: + // The ArchStackPointerGreaterThan instruction loads the current stack + // pointer value and must not be reordered with instructions with side + // effects. return kIsLoadOperation; case kArchWordPoisonOnSpeculation: diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index 11ba910405..43193ec2b1 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -26,6 +26,7 @@ InstructionSelector::InstructionSelector( InstructionSequence* sequence, Schedule* schedule, SourcePositionTable* source_positions, Frame* frame, EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter, + size_t* max_unoptimized_frame_height, SourcePositionMode source_position_mode, Features features, EnableScheduling enable_scheduling, EnableRootsRelativeAddressing enable_roots_relative_addressing, @@ -56,7 +57,10 @@ InstructionSelector::InstructionSelector( instruction_selection_failed_(false), instr_origins_(sequence->zone()), trace_turbo_(trace_turbo), - tick_counter_(tick_counter) { + tick_counter_(tick_counter), + max_unoptimized_frame_height_(max_unoptimized_frame_height) { + DCHECK_EQ(*max_unoptimized_frame_height, 0); // Caller-initialized. + instructions_.reserve(node_count); continuation_inputs_.reserve(5); continuation_outputs_.reserve(2); @@ -421,9 +425,27 @@ void InstructionSelector::SetEffectLevel(Node* node, int effect_level) { effect_level_[id] = effect_level; } -bool InstructionSelector::CanAddressRelativeToRootsRegister() const { - return enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing && - CanUseRootsRegister(); +bool InstructionSelector::CanAddressRelativeToRootsRegister( + const ExternalReference& reference) const { + // There are three things to consider here: + // 1. CanUseRootsRegister: Is kRootRegister initialized? + const bool root_register_is_available_and_initialized = CanUseRootsRegister(); + if (!root_register_is_available_and_initialized) return false; + + // 2. enable_roots_relative_addressing_: Can we address everything on the heap + // through the root register, i.e. are root-relative addresses to arbitrary + // addresses guaranteed not to change between code generation and + // execution? + const bool all_root_relative_offsets_are_constant = + (enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing); + if (all_root_relative_offsets_are_constant) return true; + + // 3. IsAddressableThroughRootRegister: Is the target address guaranteed to + // have a fixed root-relative offset? If so, we can ignore 2. + const bool this_root_relative_offset_is_constant = + TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(), + reference); + return this_root_relative_offset_is_constant; } bool InstructionSelector::CanUseRootsRegister() const { @@ -744,7 +766,7 @@ Instruction* InstructionSelector::EmitWithContinuation( void InstructionSelector::AppendDeoptimizeArguments( InstructionOperandVector* args, DeoptimizeKind kind, - DeoptimizeReason reason, VectorSlotPair const& feedback, + DeoptimizeReason reason, FeedbackSource const& feedback, Node* frame_state) { OperandGenerator g(this); FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state); @@ -761,7 +783,7 @@ void InstructionSelector::AppendDeoptimizeArguments( Instruction* InstructionSelector::EmitDeoptimize( InstructionCode opcode, size_t output_count, InstructionOperand* outputs, size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind, - DeoptimizeReason reason, VectorSlotPair const& feedback, + DeoptimizeReason reason, FeedbackSource const& feedback, Node* frame_state) { InstructionOperandVector args(instruction_zone()); for (size_t i = 0; i < input_count; ++i) { @@ -972,7 +994,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, int const state_id = sequence()->AddDeoptimizationEntry( buffer->frame_state_descriptor, DeoptimizeKind::kLazy, - DeoptimizeReason::kUnknown, VectorSlotPair()); + DeoptimizeReason::kUnknown, FeedbackSource()); buffer->instruction_args.push_back(g.TempImmediate(state_id)); StateObjectDeduplicator deduplicator(instruction_zone()); @@ -1056,7 +1078,6 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, bool InstructionSelector::IsSourcePositionUsed(Node* node) { return (source_position_mode_ == kAllSourcePositions || node->opcode() == IrOpcode::kCall || - node->opcode() == IrOpcode::kCallWithCallerSavedRegisters || node->opcode() == IrOpcode::kTrapIf || node->opcode() == IrOpcode::kTrapUnless || node->opcode() == IrOpcode::kProtectedLoad || @@ -1078,10 +1099,13 @@ void InstructionSelector::VisitBlock(BasicBlock* block) { if (node->opcode() == IrOpcode::kStore || node->opcode() == IrOpcode::kUnalignedStore || node->opcode() == IrOpcode::kCall || - node->opcode() == IrOpcode::kCallWithCallerSavedRegisters || node->opcode() == IrOpcode::kProtectedLoad || node->opcode() == IrOpcode::kProtectedStore || - node->opcode() == IrOpcode::kMemoryBarrier) { +#define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \ + node->opcode() == IrOpcode::k##Opcode || + MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP) +#undef ADD_EFFECT_FOR_ATOMIC_OP + node->opcode() == IrOpcode::kMemoryBarrier) { ++effect_level; } } @@ -1274,9 +1298,9 @@ void InstructionSelector::VisitNode(Node* node) { // No code needed for these graph artifacts. return; case IrOpcode::kIfException: - return MarkAsReference(node), VisitIfException(node); + return MarkAsTagged(node), VisitIfException(node); case IrOpcode::kFinishRegion: - return MarkAsReference(node), VisitFinishRegion(node); + return MarkAsTagged(node), VisitFinishRegion(node); case IrOpcode::kParameter: { MachineType type = linkage()->GetParameterType(ParameterIndexOf(node->op())); @@ -1284,7 +1308,7 @@ void InstructionSelector::VisitNode(Node* node) { return VisitParameter(node); } case IrOpcode::kOsrValue: - return MarkAsReference(node), VisitOsrValue(node); + return MarkAsTagged(node), VisitOsrValue(node); case IrOpcode::kPhi: { MachineRepresentation rep = PhiRepresentationOf(node->op()); if (rep == MachineRepresentation::kNone) return; @@ -1304,20 +1328,18 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kFloat64Constant: return MarkAsFloat64(node), VisitConstant(node); case IrOpcode::kHeapConstant: - return MarkAsReference(node), VisitConstant(node); + return MarkAsTagged(node), VisitConstant(node); case IrOpcode::kCompressedHeapConstant: return MarkAsCompressed(node), VisitConstant(node); case IrOpcode::kNumberConstant: { double value = OpParameter<double>(node->op()); - if (!IsSmiDouble(value)) MarkAsReference(node); + if (!IsSmiDouble(value)) MarkAsTagged(node); return VisitConstant(node); } case IrOpcode::kDelayedStringConstant: - return MarkAsReference(node), VisitConstant(node); + return MarkAsTagged(node), VisitConstant(node); case IrOpcode::kCall: return VisitCall(node); - case IrOpcode::kCallWithCallerSavedRegisters: - return VisitCallWithCallerSavedRegisters(node); case IrOpcode::kDeoptimizeIf: return VisitDeoptimizeIf(node); case IrOpcode::kDeoptimizeUnless: @@ -1484,10 +1506,16 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsRepresentation(MachineType::PointerRepresentation(), node), VisitBitcastTaggedToWord(node); case IrOpcode::kBitcastWordToTagged: - return MarkAsReference(node), VisitBitcastWordToTagged(node); + return MarkAsTagged(node), VisitBitcastWordToTagged(node); case IrOpcode::kBitcastWordToTaggedSigned: return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node), EmitIdentity(node); + case IrOpcode::kBitcastWord32ToCompressedSigned: + return MarkAsRepresentation(MachineRepresentation::kCompressedSigned, + node), + EmitIdentity(node); + case IrOpcode::kBitcastCompressedSignedToWord32: + return MarkAsWord32(node), EmitIdentity(node); case IrOpcode::kChangeFloat32ToFloat64: return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node); case IrOpcode::kChangeInt32ToFloat64: @@ -1536,18 +1564,20 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kChangeTaggedToCompressed: return MarkAsCompressed(node), VisitChangeTaggedToCompressed(node); case IrOpcode::kChangeTaggedPointerToCompressedPointer: - return MarkAsCompressed(node), + return MarkAsRepresentation(MachineRepresentation::kCompressedPointer, + node), VisitChangeTaggedPointerToCompressedPointer(node); case IrOpcode::kChangeTaggedSignedToCompressedSigned: - return MarkAsWord32(node), + return MarkAsRepresentation(MachineRepresentation::kCompressedSigned, + node), VisitChangeTaggedSignedToCompressedSigned(node); case IrOpcode::kChangeCompressedToTagged: - return MarkAsReference(node), VisitChangeCompressedToTagged(node); + return MarkAsTagged(node), VisitChangeCompressedToTagged(node); case IrOpcode::kChangeCompressedPointerToTaggedPointer: - return MarkAsReference(node), + return MarkAsRepresentation(MachineRepresentation::kTaggedPointer, node), VisitChangeCompressedPointerToTaggedPointer(node); case IrOpcode::kChangeCompressedSignedToTaggedSigned: - return MarkAsWord64(node), + return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node), VisitChangeCompressedSignedToTaggedSigned(node); #endif case IrOpcode::kTruncateFloat64ToFloat32: @@ -1697,15 +1727,15 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kFloat64InsertHighWord32: return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node); case IrOpcode::kTaggedPoisonOnSpeculation: - return MarkAsReference(node), VisitTaggedPoisonOnSpeculation(node); + return MarkAsTagged(node), VisitTaggedPoisonOnSpeculation(node); case IrOpcode::kWord32PoisonOnSpeculation: return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node); case IrOpcode::kWord64PoisonOnSpeculation: return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node); case IrOpcode::kStackSlot: return VisitStackSlot(node); - case IrOpcode::kLoadStackPointer: - return VisitLoadStackPointer(node); + case IrOpcode::kStackPointerGreaterThan: + return VisitStackPointerGreaterThan(node); case IrOpcode::kLoadFramePointer: return VisitLoadFramePointer(node); case IrOpcode::kLoadParentFramePointer: @@ -1827,6 +1857,18 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF64x2Abs(node); case IrOpcode::kF64x2Neg: return MarkAsSimd128(node), VisitF64x2Neg(node); + case IrOpcode::kF64x2Add: + return MarkAsSimd128(node), VisitF64x2Add(node); + case IrOpcode::kF64x2Sub: + return MarkAsSimd128(node), VisitF64x2Sub(node); + case IrOpcode::kF64x2Mul: + return MarkAsSimd128(node), VisitF64x2Mul(node); + case IrOpcode::kF64x2Div: + return MarkAsSimd128(node), VisitF64x2Div(node); + case IrOpcode::kF64x2Min: + return MarkAsSimd128(node), VisitF64x2Min(node); + case IrOpcode::kF64x2Max: + return MarkAsSimd128(node), VisitF64x2Max(node); case IrOpcode::kF64x2Eq: return MarkAsSimd128(node), VisitF64x2Eq(node); case IrOpcode::kF64x2Ne: @@ -1861,6 +1903,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF32x4Sub(node); case IrOpcode::kF32x4Mul: return MarkAsSimd128(node), VisitF32x4Mul(node); + case IrOpcode::kF32x4Div: + return MarkAsSimd128(node), VisitF32x4Div(node); case IrOpcode::kF32x4Min: return MarkAsSimd128(node), VisitF32x4Min(node); case IrOpcode::kF32x4Max: @@ -1891,6 +1935,10 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitI64x2Sub(node); case IrOpcode::kI64x2Mul: return MarkAsSimd128(node), VisitI64x2Mul(node); + case IrOpcode::kI64x2MinS: + return MarkAsSimd128(node), VisitI64x2MinS(node); + case IrOpcode::kI64x2MaxS: + return MarkAsSimd128(node), VisitI64x2MaxS(node); case IrOpcode::kI64x2Eq: return MarkAsSimd128(node), VisitI64x2Eq(node); case IrOpcode::kI64x2Ne: @@ -1901,6 +1949,10 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitI64x2GeS(node); case IrOpcode::kI64x2ShrU: return MarkAsSimd128(node), VisitI64x2ShrU(node); + case IrOpcode::kI64x2MinU: + return MarkAsSimd128(node), VisitI64x2MinU(node); + case IrOpcode::kI64x2MaxU: + return MarkAsSimd128(node), VisitI64x2MaxU(node); case IrOpcode::kI64x2GtU: return MarkAsSimd128(node), VisitI64x2GtU(node); case IrOpcode::kI64x2GeU: @@ -2134,9 +2186,10 @@ void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) { EmitWordPoisonOnSpeculation(node); } -void InstructionSelector::VisitLoadStackPointer(Node* node) { - OperandGenerator g(this); - Emit(kArchStackPointer, g.DefineAsRegister(node)); +void InstructionSelector::VisitStackPointerGreaterThan(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kStackPointerGreaterThanCondition, node); + VisitStackPointerGreaterThan(node, &cont); } void InstructionSelector::VisitLoadFramePointer(Node* node) { @@ -2553,11 +2606,18 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 #if !V8_TARGET_ARCH_X64 +#if !V8_TARGET_ARCH_ARM64 void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); } @@ -2566,20 +2626,25 @@ void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); } +#endif // !V8_TARGET_ARCH_ARM64 +void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2MinS(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2MaxS(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2MaxU(Node* node) { UNIMPLEMENTED(); } #endif // !V8_TARGET_ARCH_X64 void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); } @@ -2677,6 +2742,12 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { OperandGenerator g(this); auto call_descriptor = CallDescriptorOf(node->op()); + if (call_descriptor->NeedsCallerSavedRegisters()) { + Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>( + call_descriptor->get_save_fp_mode())), + g.NoOutput()); + } + FrameStateDescriptor* frame_state_descriptor = nullptr; if (call_descriptor->NeedsFrameState()) { frame_state_descriptor = GetFrameStateDescriptor( @@ -2745,18 +2816,13 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { call_instr->MarkAsCall(); EmitPrepareResults(&(buffer.output_nodes), call_descriptor, node); -} -void InstructionSelector::VisitCallWithCallerSavedRegisters( - Node* node, BasicBlock* handler) { - OperandGenerator g(this); - const auto fp_mode = CallDescriptorOf(node->op())->get_save_fp_mode(); - Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(fp_mode)), - g.NoOutput()); - VisitCall(node, handler); - Emit(kArchRestoreCallerRegisters | - MiscField::encode(static_cast<int>(fp_mode)), - g.NoOutput()); + if (call_descriptor->NeedsCallerSavedRegisters()) { + Emit(kArchRestoreCallerRegisters | + MiscField::encode( + static_cast<int>(call_descriptor->get_save_fp_mode())), + g.NoOutput()); + } } void InstructionSelector::VisitTailCall(Node* node) { @@ -2764,7 +2830,7 @@ void InstructionSelector::VisitTailCall(Node* node) { auto call_descriptor = CallDescriptorOf(node->op()); CallDescriptor* caller = linkage()->GetIncomingDescriptor(); - DCHECK(caller->CanTailCall(node)); + DCHECK(caller->CanTailCall(CallDescriptorOf(node->op()))); const CallDescriptor* callee = CallDescriptorOf(node->op()); int stack_param_delta = callee->GetStackParameterDelta(caller); CallBuffer buffer(zone(), call_descriptor, nullptr); @@ -2912,14 +2978,13 @@ void InstructionSelector::VisitTrapUnless(Node* node, TrapId trap_id) { } void InstructionSelector::EmitIdentity(Node* node) { - OperandGenerator g(this); MarkAsUsed(node->InputAt(0)); SetRename(node, node->InputAt(0)); } void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, Node* value) { EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason, feedback, value); @@ -2980,8 +3045,9 @@ bool InstructionSelector::CanProduceSignalingNaN(Node* node) { return true; } -FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor( - Node* state) { +namespace { + +FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone, Node* state) { DCHECK_EQ(IrOpcode::kFrameState, state->opcode()); DCHECK_EQ(kFrameStateInputCount, state->InputCount()); FrameStateInfo state_info = FrameStateInfoOf(state->op()); @@ -2999,13 +3065,24 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor( FrameStateDescriptor* outer_state = nullptr; Node* outer_node = state->InputAt(kFrameStateOuterStateInput); if (outer_node->opcode() == IrOpcode::kFrameState) { - outer_state = GetFrameStateDescriptor(outer_node); + outer_state = GetFrameStateDescriptorInternal(zone, outer_node); } - return new (instruction_zone()) FrameStateDescriptor( - instruction_zone(), state_info.type(), state_info.bailout_id(), - state_info.state_combine(), parameters, locals, stack, - state_info.shared_info(), outer_state); + return new (zone) + FrameStateDescriptor(zone, state_info.type(), state_info.bailout_id(), + state_info.state_combine(), parameters, locals, + stack, state_info.shared_info(), outer_state); +} + +} // namespace + +FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor( + Node* state) { + auto* desc = GetFrameStateDescriptorInternal(instruction_zone(), state); + *max_unoptimized_frame_height_ = + std::max(*max_unoptimized_frame_height_, + desc->total_conservative_frame_size_in_bytes()); + return desc; } // static diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h index 16f88bb516..eb3e098427 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.h +++ b/deps/v8/src/compiler/backend/instruction-selector.h @@ -12,6 +12,7 @@ #include "src/compiler/backend/instruction-scheduler.h" #include "src/compiler/backend/instruction.h" #include "src/compiler/common-operator.h" +#include "src/compiler/feedback-source.h" #include "src/compiler/linkage.h" #include "src/compiler/machine-operator.h" #include "src/compiler/node.h" @@ -60,7 +61,7 @@ class FlagsContinuation final { static FlagsContinuation ForDeoptimize(FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, Node* frame_state) { return FlagsContinuation(kFlags_deoptimize, condition, kind, reason, feedback, frame_state); @@ -69,7 +70,7 @@ class FlagsContinuation final { // Creates a new flags continuation for an eager deoptimization exit. static FlagsContinuation ForDeoptimizeAndPoison( FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, Node* frame_state) { + FeedbackSource const& feedback, Node* frame_state) { return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind, reason, feedback, frame_state); } @@ -110,7 +111,7 @@ class FlagsContinuation final { DCHECK(IsDeoptimize()); return reason_; } - VectorSlotPair const& feedback() const { + FeedbackSource const& feedback() const { DCHECK(IsDeoptimize()); return feedback_; } @@ -196,7 +197,7 @@ class FlagsContinuation final { FlagsContinuation(FlagsMode mode, FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, Node* frame_state) + FeedbackSource const& feedback, Node* frame_state) : mode_(mode), condition_(condition), kind_(kind), @@ -226,7 +227,7 @@ class FlagsContinuation final { FlagsCondition condition_; DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize* DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize* - VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize* + FeedbackSource feedback_; // Only valid if mode_ == kFlags_deoptimize* Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize* // or mode_ == kFlags_set. BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*. @@ -270,6 +271,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { InstructionSequence* sequence, Schedule* schedule, SourcePositionTable* source_positions, Frame* frame, EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter, + size_t* max_unoptimized_frame_height, SourcePositionMode source_position_mode = kCallSourcePositions, Features features = SupportedFeatures(), EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling @@ -352,7 +354,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { InstructionOperand* outputs, size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, Node* frame_state); // =========================================================================== @@ -446,7 +448,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final { // Check if we can generate loads and stores of ExternalConstants relative // to the roots register. - bool CanAddressRelativeToRootsRegister() const; + bool CanAddressRelativeToRootsRegister( + const ExternalReference& reference) const; // Check if we can use the roots register to access GC roots. bool CanUseRootsRegister() const; @@ -496,7 +499,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { void AppendDeoptimizeArguments(InstructionOperandVector* args, DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, Node* frame_state); void EmitTableSwitch( @@ -543,7 +546,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { void MarkAsSimd128(Node* node) { MarkAsRepresentation(MachineRepresentation::kSimd128, node); } - void MarkAsReference(Node* node) { + void MarkAsTagged(Node* node) { MarkAsRepresentation(MachineRepresentation::kTagged, node); } void MarkAsCompressed(Node* node) { @@ -621,8 +624,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final { void VisitProjection(Node* node); void VisitConstant(Node* node); void VisitCall(Node* call, BasicBlock* handler = nullptr); - void VisitCallWithCallerSavedRegisters(Node* call, - BasicBlock* handler = nullptr); void VisitDeoptimizeIf(Node* node); void VisitDeoptimizeUnless(Node* node); void VisitTrapIf(Node* node, TrapId trap_id); @@ -632,7 +633,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch); void VisitSwitch(Node* node, const SwitchInfo& sw); void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, Node* value); + FeedbackSource const& feedback, Node* value); void VisitReturn(Node* ret); void VisitThrow(Node* node); void VisitRetain(Node* node); @@ -640,6 +641,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final { void VisitStaticAssert(Node* node); void VisitDeadValue(Node* node); + void VisitStackPointerGreaterThan(Node* node, FlagsContinuation* cont); + void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont); void EmitWordPoisonOnSpeculation(Node* node); @@ -782,6 +785,10 @@ class V8_EXPORT_PRIVATE InstructionSelector final { ZoneVector<std::pair<int, int>> instr_origins_; EnableTraceTurboJson trace_turbo_; TickCounter* const tick_counter_; + + // Store the maximal unoptimized frame height. Later used to apply an offset + // to stack checks. + size_t* max_unoptimized_frame_height_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc index 09c7fe22c5..06158b0c72 100644 --- a/deps/v8/src/compiler/backend/instruction.cc +++ b/deps/v8/src/compiler/backend/instruction.cc @@ -6,12 +6,14 @@ #include <iomanip> +#include "src/codegen/interface-descriptors.h" #include "src/codegen/register-configuration.h" #include "src/codegen/source-position.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph.h" #include "src/compiler/schedule.h" #include "src/compiler/state-values-utils.h" +#include "src/execution/frames.h" namespace v8 { namespace internal { @@ -942,7 +944,7 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep, int InstructionSequence::AddDeoptimizationEntry( FrameStateDescriptor* descriptor, DeoptimizeKind kind, - DeoptimizeReason reason, VectorSlotPair const& feedback) { + DeoptimizeReason reason, FeedbackSource const& feedback) { int deoptimization_id = static_cast<int>(deoptimization_entries_.size()); deoptimization_entries_.push_back( DeoptimizationEntry(descriptor, kind, reason, feedback)); @@ -1002,6 +1004,59 @@ void InstructionSequence::SetRegisterConfigurationForTesting( GetRegConfig = InstructionSequence::RegisterConfigurationForTesting; } +namespace { + +size_t GetConservativeFrameSizeInBytes(FrameStateType type, + size_t parameters_count, + size_t locals_count, + BailoutId bailout_id) { + switch (type) { + case FrameStateType::kInterpretedFunction: { + auto info = InterpretedFrameInfo::Conservative( + static_cast<int>(parameters_count), static_cast<int>(locals_count)); + return info.frame_size_in_bytes(); + } + case FrameStateType::kArgumentsAdaptor: { + auto info = ArgumentsAdaptorFrameInfo::Conservative( + static_cast<int>(parameters_count)); + return info.frame_size_in_bytes(); + } + case FrameStateType::kConstructStub: { + auto info = ConstructStubFrameInfo::Conservative( + static_cast<int>(parameters_count)); + return info.frame_size_in_bytes(); + } + case FrameStateType::kBuiltinContinuation: + case FrameStateType::kJavaScriptBuiltinContinuation: + case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: { + const RegisterConfiguration* config = RegisterConfiguration::Default(); + auto info = BuiltinContinuationFrameInfo::Conservative( + static_cast<int>(parameters_count), + Builtins::CallInterfaceDescriptorFor( + Builtins::GetBuiltinFromBailoutId(bailout_id)), + config); + return info.frame_size_in_bytes(); + } + } + UNREACHABLE(); +} + +size_t GetTotalConservativeFrameSizeInBytes(FrameStateType type, + size_t parameters_count, + size_t locals_count, + BailoutId bailout_id, + FrameStateDescriptor* outer_state) { + size_t outer_total_conservative_frame_size_in_bytes = + (outer_state == nullptr) + ? 0 + : outer_state->total_conservative_frame_size_in_bytes(); + return GetConservativeFrameSizeInBytes(type, parameters_count, locals_count, + bailout_id) + + outer_total_conservative_frame_size_in_bytes; +} + +} // namespace + FrameStateDescriptor::FrameStateDescriptor( Zone* zone, FrameStateType type, BailoutId bailout_id, OutputFrameStateCombine state_combine, size_t parameters_count, @@ -1014,10 +1069,35 @@ FrameStateDescriptor::FrameStateDescriptor( parameters_count_(parameters_count), locals_count_(locals_count), stack_count_(stack_count), + total_conservative_frame_size_in_bytes_( + GetTotalConservativeFrameSizeInBytes( + type, parameters_count, locals_count, bailout_id, outer_state)), values_(zone), shared_info_(shared_info), outer_state_(outer_state) {} +size_t FrameStateDescriptor::GetHeight() const { + switch (type()) { + case FrameStateType::kInterpretedFunction: + return locals_count(); // The accumulator is *not* included. + case FrameStateType::kBuiltinContinuation: + // Custom, non-JS calling convention (that does not have a notion of + // a receiver or context). + return parameters_count(); + case FrameStateType::kArgumentsAdaptor: + case FrameStateType::kConstructStub: + case FrameStateType::kJavaScriptBuiltinContinuation: + case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: + // JS linkage. The parameters count + // - includes the receiver (input 1 in CreateArtificialFrameState, and + // passed as part of stack parameters to + // CreateJavaScriptBuiltinContinuationFrameState), and + // - does *not* include the context. + return parameters_count(); + } + UNREACHABLE(); +} + size_t FrameStateDescriptor::GetSize() const { return 1 + parameters_count() + locals_count() + stack_count() + (HasContext() ? 1 : 0); diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h index 9b32204055..f5f7f64c51 100644 --- a/deps/v8/src/compiler/backend/instruction.h +++ b/deps/v8/src/compiler/backend/instruction.h @@ -17,6 +17,7 @@ #include "src/common/globals.h" #include "src/compiler/backend/instruction-codes.h" #include "src/compiler/common-operator.h" +#include "src/compiler/feedback-source.h" #include "src/compiler/frame.h" #include "src/compiler/opcodes.h" #include "src/numbers/double.h" @@ -130,7 +131,7 @@ class V8_EXPORT_PRIVATE InstructionOperand { inline uint64_t GetCanonicalizedValue() const; - class KindField : public BitField64<Kind, 0, 3> {}; + using KindField = BitField64<Kind, 0, 3>; uint64_t value_; }; @@ -331,20 +332,20 @@ class UnallocatedOperand final : public InstructionOperand { STATIC_ASSERT(KindField::kSize == 3); - class VirtualRegisterField : public BitField64<uint32_t, 3, 32> {}; + using VirtualRegisterField = BitField64<uint32_t, 3, 32>; // BitFields for all unallocated operands. - class BasicPolicyField : public BitField64<BasicPolicy, 35, 1> {}; + using BasicPolicyField = BitField64<BasicPolicy, 35, 1>; // BitFields specific to BasicPolicy::FIXED_SLOT. - class FixedSlotIndexField : public BitField64<int, 36, 28> {}; + using FixedSlotIndexField = BitField64<int, 36, 28>; // BitFields specific to BasicPolicy::EXTENDED_POLICY. - class ExtendedPolicyField : public BitField64<ExtendedPolicy, 36, 3> {}; - class LifetimeField : public BitField64<Lifetime, 39, 1> {}; - class HasSecondaryStorageField : public BitField64<bool, 40, 1> {}; - class FixedRegisterField : public BitField64<int, 41, 6> {}; - class SecondaryStorageField : public BitField64<int, 47, 3> {}; + using ExtendedPolicyField = BitField64<ExtendedPolicy, 36, 3>; + using LifetimeField = BitField64<Lifetime, 39, 1>; + using HasSecondaryStorageField = BitField64<bool, 40, 1>; + using FixedRegisterField = BitField64<int, 41, 6>; + using SecondaryStorageField = BitField64<int, 47, 3>; private: explicit UnallocatedOperand(int virtual_register) @@ -373,7 +374,7 @@ class ConstantOperand : public InstructionOperand { INSTRUCTION_OPERAND_CASTS(ConstantOperand, CONSTANT) STATIC_ASSERT(KindField::kSize == 3); - class VirtualRegisterField : public BitField64<uint32_t, 3, 32> {}; + using VirtualRegisterField = BitField64<uint32_t, 3, 32>; }; class ImmediateOperand : public InstructionOperand { @@ -406,8 +407,8 @@ class ImmediateOperand : public InstructionOperand { INSTRUCTION_OPERAND_CASTS(ImmediateOperand, IMMEDIATE) STATIC_ASSERT(KindField::kSize == 3); - class TypeField : public BitField64<ImmediateType, 3, 1> {}; - class ValueField : public BitField64<int32_t, 32, 32> {}; + using TypeField = BitField64<ImmediateType, 3, 1>; + using ValueField = BitField64<int32_t, 32, 32>; }; class LocationOperand : public InstructionOperand { @@ -509,9 +510,9 @@ class LocationOperand : public InstructionOperand { } STATIC_ASSERT(KindField::kSize == 3); - class LocationKindField : public BitField64<LocationKind, 3, 2> {}; - class RepresentationField : public BitField64<MachineRepresentation, 5, 8> {}; - class IndexField : public BitField64<int32_t, 35, 29> {}; + using LocationKindField = BitField64<LocationKind, 3, 2>; + using RepresentationField = BitField64<MachineRepresentation, 5, 8>; + using IndexField = BitField64<int32_t, 35, 29>; }; class V8_EXPORT_PRIVATE ExplicitOperand @@ -1270,6 +1271,20 @@ class FrameStateDescriptor : public ZoneObject { type_ == FrameStateType::kConstructStub; } + // The frame height on the stack, in number of slots, as serialized into a + // Translation and later used by the deoptimizer. Does *not* include + // information from the chain of outer states. Unlike |GetSize| this does not + // always include parameters, locals, and stack slots; instead, the returned + // slot kinds depend on the frame type. + size_t GetHeight() const; + + // Returns an overapproximation of the unoptimized stack frame size in bytes, + // as later produced by the deoptimizer. Considers both this and the chain of + // outer states. + size_t total_conservative_frame_size_in_bytes() const { + return total_conservative_frame_size_in_bytes_; + } + size_t GetSize() const; size_t GetTotalSize() const; size_t GetFrameCount() const; @@ -1283,12 +1298,13 @@ class FrameStateDescriptor : public ZoneObject { FrameStateType type_; BailoutId bailout_id_; OutputFrameStateCombine frame_state_combine_; - size_t parameters_count_; - size_t locals_count_; - size_t stack_count_; + const size_t parameters_count_; + const size_t locals_count_; + const size_t stack_count_; + const size_t total_conservative_frame_size_in_bytes_; StateValueList values_; MaybeHandle<SharedFunctionInfo> const shared_info_; - FrameStateDescriptor* outer_state_; + FrameStateDescriptor* const outer_state_; }; // A deoptimization entry is a pair of the reason why we deoptimize and the @@ -1297,7 +1313,7 @@ class DeoptimizationEntry final { public: DeoptimizationEntry() = default; DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind, - DeoptimizeReason reason, VectorSlotPair const& feedback) + DeoptimizeReason reason, FeedbackSource const& feedback) : descriptor_(descriptor), kind_(kind), reason_(reason), @@ -1306,13 +1322,13 @@ class DeoptimizationEntry final { FrameStateDescriptor* descriptor() const { return descriptor_; } DeoptimizeKind kind() const { return kind_; } DeoptimizeReason reason() const { return reason_; } - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } private: FrameStateDescriptor* descriptor_ = nullptr; DeoptimizeKind kind_ = DeoptimizeKind::kEager; DeoptimizeReason reason_ = DeoptimizeReason::kUnknown; - VectorSlotPair feedback_ = VectorSlotPair(); + FeedbackSource feedback_ = FeedbackSource(); }; using DeoptimizationVector = ZoneVector<DeoptimizationEntry>; @@ -1577,7 +1593,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final int AddDeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback); + FeedbackSource const& feedback); DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id); int GetDeoptimizationEntryCount() const { return static_cast<int>(deoptimization_entries_.size()); diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc index 5cec4a8a16..239075392a 100644 --- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc +++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc @@ -850,18 +850,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // don't emit code for nops. break; case kArchDeoptimize: { - int deopt_state_id = + DeoptimizationExit* exit = BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); - CodeGenResult result = - AssembleDeoptimizerCall(deopt_state_id, current_source_position_); + CodeGenResult result = AssembleDeoptimizerCall(exit); if (result != kSuccess) return result; break; } case kArchRet: AssembleReturn(instr->InputAt(0)); break; - case kArchStackPointer: - __ mov(i.OutputRegister(), sp); + case kArchStackPointerGreaterThan: + // Pseudo-instruction used for cmp/branch. No opcode emitted here. break; case kArchFramePointer: __ mov(i.OutputRegister(), fp); @@ -2067,6 +2066,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(1)); break; } + case kMipsF32x4Div: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } case kMipsF32x4Max: { CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -3015,6 +3020,9 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } else if (instr->arch_opcode() == kMipsCmp) { cc = FlagsConditionToConditionCmp(condition); __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); + } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { + cc = FlagsConditionToConditionCmp(condition); + __ Branch(tlabel, cc, sp, Operand(i.InputRegister(0))); } else if (instr->arch_opcode() == kMipsCmpS || instr->arch_opcode() == kMipsCmpD) { bool predicate; @@ -3444,6 +3452,42 @@ void CodeGenerator::AssembleConstructFrame() { const RegList saves = call_descriptor->CalleeSavedRegisters(); const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); + + if (required_slots > 0) { + DCHECK(frame_access_state()->has_frame()); + if (info()->IsWasm() && required_slots > 128) { + // For WebAssembly functions with big frames we have to do the stack + // overflow check before we construct the frame. Otherwise we may not + // have enough space on the stack to call the runtime for the stack + // overflow. + Label done; + + // If the frame is bigger than the stack, we throw the stack overflow + // exception unconditionally. Thereby we can avoid the integer overflow + // check in the condition code. + if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { + __ Lw( + kScratchReg, + FieldMemOperand(kWasmInstanceRegister, + WasmInstanceObject::kRealStackLimitAddressOffset)); + __ Lw(kScratchReg, MemOperand(kScratchReg)); + __ Addu(kScratchReg, kScratchReg, + Operand(required_slots * kSystemPointerSize)); + __ Branch(&done, uge, sp, Operand(kScratchReg)); + } + + __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); + // We come from WebAssembly, there are no references for the GC. + ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); + RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); + if (FLAG_debug_code) { + __ stop(); + } + + __ bind(&done); + } + } + const int returns = frame()->GetReturnSlotCount(); // Skip callee-saved and return slots, which are pushed below. @@ -3527,6 +3571,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { void CodeGenerator::FinishCode() {} +void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} + void CodeGenerator::AssembleMove(InstructionOperand* source, InstructionOperand* destination) { MipsOperandConverter g(this, nullptr); diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h index 44e53ac044..e8020d9e89 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h +++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h @@ -165,6 +165,7 @@ namespace compiler { V(MipsF32x4AddHoriz) \ V(MipsF32x4Sub) \ V(MipsF32x4Mul) \ + V(MipsF32x4Div) \ V(MipsF32x4Max) \ V(MipsF32x4Min) \ V(MipsF32x4Eq) \ diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc index 92ab3f9344..4e6aef52f4 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc +++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc @@ -51,6 +51,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kMipsF32x4Max: case kMipsF32x4Min: case kMipsF32x4Mul: + case kMipsF32x4Div: case kMipsF32x4Ne: case kMipsF32x4Neg: case kMipsF32x4RecipApprox: @@ -1673,7 +1674,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { case kMipsCmp: return 0; case kArchDebugBreak: - case kArchStackPointer: case kArchFramePointer: case kArchParentFramePointer: case kMipsShl: diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc index 452e92a174..bb47262c6c 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc +++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc @@ -352,7 +352,8 @@ void InstructionSelector::VisitStore(Node* node) { MachineRepresentation rep = store_rep.representation(); // TODO(mips): I guess this could be done in a better way. - if (write_barrier_kind != kNoWriteBarrier) { + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!FLAG_disable_write_barriers)) { DCHECK(CanBeTaggedPointer(rep)); InstructionOperand inputs[3]; size_t input_count = 0; @@ -1529,6 +1530,15 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, } // namespace +void InstructionSelector::VisitStackPointerGreaterThan( + Node* node, FlagsContinuation* cont) { + Node* const value = node->InputAt(0); + InstructionCode opcode = kArchStackPointerGreaterThan; + + MipsOperandGenerator g(this); + EmitWithContinuation(opcode, g.UseRegister(value), cont); +} + // Shared routine for word comparisons against zero. void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont) { @@ -1607,6 +1617,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, break; case IrOpcode::kWord32And: return VisitWordCompare(this, value, kMipsTst, cont, true); + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); default: break; } @@ -2041,6 +2054,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { V(F32x4AddHoriz, kMipsF32x4AddHoriz) \ V(F32x4Sub, kMipsF32x4Sub) \ V(F32x4Mul, kMipsF32x4Mul) \ + V(F32x4Div, kMipsF32x4Div) \ V(F32x4Max, kMipsF32x4Max) \ V(F32x4Min, kMipsF32x4Min) \ V(F32x4Eq, kMipsF32x4Eq) \ diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc index f746b52df6..5682bed71a 100644 --- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc @@ -828,18 +828,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // don't emit code for nops. break; case kArchDeoptimize: { - int deopt_state_id = + DeoptimizationExit* exit = BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); - CodeGenResult result = - AssembleDeoptimizerCall(deopt_state_id, current_source_position_); + CodeGenResult result = AssembleDeoptimizerCall(exit); if (result != kSuccess) return result; break; } case kArchRet: AssembleReturn(instr->InputAt(0)); break; - case kArchStackPointer: - __ mov(i.OutputRegister(), sp); + case kArchStackPointerGreaterThan: + // Pseudo-instruction used for cmp/branch. No opcode emitted here. break; case kArchFramePointer: __ mov(i.OutputRegister(), fp); @@ -2182,6 +2181,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(1)); break; } + case kMips64F32x4Div: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } case kMips64F32x4Max: { CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -3140,6 +3145,9 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } else if (instr->arch_opcode() == kMips64Cmp) { cc = FlagsConditionToConditionCmp(condition); __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); + } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { + cc = FlagsConditionToConditionCmp(condition); + __ Branch(tlabel, cc, sp, Operand(i.InputRegister(0))); } else if (instr->arch_opcode() == kMips64CmpS || instr->arch_opcode() == kMips64CmpD) { bool predicate; @@ -3603,6 +3611,42 @@ void CodeGenerator::AssembleConstructFrame() { const RegList saves = call_descriptor->CalleeSavedRegisters(); const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); + + if (required_slots > 0) { + DCHECK(frame_access_state()->has_frame()); + if (info()->IsWasm() && required_slots > 128) { + // For WebAssembly functions with big frames we have to do the stack + // overflow check before we construct the frame. Otherwise we may not + // have enough space on the stack to call the runtime for the stack + // overflow. + Label done; + + // If the frame is bigger than the stack, we throw the stack overflow + // exception unconditionally. Thereby we can avoid the integer overflow + // check in the condition code. + if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { + __ Ld( + kScratchReg, + FieldMemOperand(kWasmInstanceRegister, + WasmInstanceObject::kRealStackLimitAddressOffset)); + __ Ld(kScratchReg, MemOperand(kScratchReg)); + __ Daddu(kScratchReg, kScratchReg, + Operand(required_slots * kSystemPointerSize)); + __ Branch(&done, uge, sp, Operand(kScratchReg)); + } + + __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); + // We come from WebAssembly, there are no references for the GC. + ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); + RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); + if (FLAG_debug_code) { + __ stop(); + } + + __ bind(&done); + } + } + const int returns = frame()->GetReturnSlotCount(); // Skip callee-saved and return slots, which are pushed below. @@ -3686,6 +3730,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { void CodeGenerator::FinishCode() {} +void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} + void CodeGenerator::AssembleMove(InstructionOperand* source, InstructionOperand* destination) { MipsOperandConverter g(this, nullptr); diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h index e375ee8d07..edc8924757 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h +++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h @@ -195,6 +195,7 @@ namespace compiler { V(Mips64F32x4AddHoriz) \ V(Mips64F32x4Sub) \ V(Mips64F32x4Mul) \ + V(Mips64F32x4Div) \ V(Mips64F32x4Max) \ V(Mips64F32x4Min) \ V(Mips64F32x4Eq) \ diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc index 4dcafe4197..880b424c41 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc @@ -79,6 +79,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kMips64F32x4Max: case kMips64F32x4Min: case kMips64F32x4Mul: + case kMips64F32x4Div: case kMips64F32x4Ne: case kMips64F32x4Neg: case kMips64F32x4RecipApprox: @@ -1275,7 +1276,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { return 0; case kArchRet: return AssemblerReturnLatency(); - case kArchStackPointer: case kArchFramePointer: return 1; case kArchParentFramePointer: diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc index 95f11ebed1..9c717ab1e9 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc @@ -422,7 +422,8 @@ void InstructionSelector::VisitStore(Node* node) { MachineRepresentation rep = store_rep.representation(); // TODO(mips): I guess this could be done in a better way. - if (write_barrier_kind != kNoWriteBarrier) { + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!FLAG_disable_write_barriers)) { DCHECK(CanBeTaggedPointer(rep)); InstructionOperand inputs[3]; size_t input_count = 0; @@ -2090,6 +2091,15 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node, } // namespace +void InstructionSelector::VisitStackPointerGreaterThan( + Node* node, FlagsContinuation* cont) { + Node* const value = node->InputAt(0); + InstructionCode opcode = kArchStackPointerGreaterThan; + + Mips64OperandGenerator g(this); + EmitWithContinuation(opcode, g.UseRegister(value), cont); +} + // Shared routine for word comparisons against zero. void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont) { @@ -2199,6 +2209,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, case IrOpcode::kWord32And: case IrOpcode::kWord64And: return VisitWordCompare(this, value, kMips64Tst, cont, true); + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); default: break; } @@ -2704,6 +2717,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { V(F32x4AddHoriz, kMips64F32x4AddHoriz) \ V(F32x4Sub, kMips64F32x4Sub) \ V(F32x4Mul, kMips64F32x4Mul) \ + V(F32x4Div, kMips64F32x4Div) \ V(F32x4Max, kMips64F32x4Max) \ V(F32x4Min, kMips64F32x4Min) \ V(F32x4Eq, kMips64F32x4Eq) \ diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc index 5289812cb5..5c69bc34a1 100644 --- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -1024,13 +1024,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Label start_call; bool isWasmCapiFunction = linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); - constexpr int offset = 12; + constexpr int offset = 9 * kInstrSize; if (isWasmCapiFunction) { - __ mflr(kScratchReg); + __ mflr(r0); __ bind(&start_call); - __ LoadPC(r0); - __ addi(r0, r0, Operand(offset)); - __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ LoadPC(kScratchReg); + __ addi(kScratchReg, kScratchReg, Operand(offset)); + __ StoreP(kScratchReg, + MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); __ mtlr(r0); } if (instr->InputAt(0)->IsImmediate()) { @@ -1040,11 +1041,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } - // TODO(miladfar): In the above block, r0 must be populated with the - // strictly-correct PC, which is the return address at this spot. The - // offset is set to 12 right now, which is counted from where we are - // binding to the label and ends at this spot. If failed, replace it it - // with the correct offset suggested. More info on f5ab7d3. + // TODO(miladfar): In the above block, kScratchReg must be populated with + // the strictly-correct PC, which is the return address at this spot. The + // offset is set to 36 (9 * kInstrSize) right now, which is counted from + // where we are binding to the label and ends at this spot. If failed, + // replace it with the correct offset suggested. More info on f5ab7d3. if (isWasmCapiFunction) CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); @@ -1104,10 +1105,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(LeaveRC, i.OutputRCBit()); break; case kArchDeoptimize: { - int deopt_state_id = + DeoptimizationExit* exit = BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); - CodeGenResult result = - AssembleDeoptimizerCall(deopt_state_id, current_source_position_); + CodeGenResult result = AssembleDeoptimizerCall(exit); if (result != kSuccess) return result; break; } @@ -1115,10 +1115,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( AssembleReturn(instr->InputAt(0)); DCHECK_EQ(LeaveRC, i.OutputRCBit()); break; - case kArchStackPointer: - __ mr(i.OutputRegister(), sp); - DCHECK_EQ(LeaveRC, i.OutputRCBit()); - break; case kArchFramePointer: __ mr(i.OutputRegister(), fp); DCHECK_EQ(LeaveRC, i.OutputRCBit()); @@ -1130,6 +1126,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ mr(i.OutputRegister(), fp); } break; + case kArchStackPointerGreaterThan: { + constexpr size_t kValueIndex = 0; + DCHECK(instr->InputAt(kValueIndex)->IsRegister()); + __ cmpl(sp, i.InputRegister(kValueIndex), cr0); + break; + } case kArchTruncateDoubleToI: __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), i.InputDoubleRegister(0), DetermineStubCallMode()); @@ -2516,6 +2518,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { void CodeGenerator::FinishCode() {} +void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} + void CodeGenerator::AssembleMove(InstructionOperand* source, InstructionOperand* destination) { PPCOperandConverter g(this, nullptr); diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc index bfc77b9412..ef8490a726 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -65,17 +65,6 @@ class PPCOperandGenerator final : public OperandGenerator { } return false; } - - // Use the stack pointer if the node is LoadStackPointer, otherwise assign a - // register. - InstructionOperand UseRegisterOrStackPointer(Node* node) { - if (node->opcode() == IrOpcode::kLoadStackPointer) { - return LocationOperand(LocationOperand::EXPLICIT, - LocationOperand::REGISTER, - MachineRepresentation::kWord32, sp.code()); - } - return UseRegister(node); - } }; namespace { @@ -267,7 +256,8 @@ void InstructionSelector::VisitStore(Node* node) { rep = store_rep.representation(); } - if (write_barrier_kind != kNoWriteBarrier) { + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!FLAG_disable_write_barriers)) { DCHECK(CanBeTaggedPointer(rep)); AddressingMode addressing_mode; InstructionOperand inputs[3]; @@ -558,6 +548,15 @@ void InstructionSelector::VisitWord32Xor(Node* node) { } } +void InstructionSelector::VisitStackPointerGreaterThan( + Node* node, FlagsContinuation* cont) { + Node* const value = node->InputAt(0); + InstructionCode opcode = kArchStackPointerGreaterThan; + + PPCOperandGenerator g(this); + EmitWithContinuation(opcode, g.UseRegister(value), cont); +} + #if V8_TARGET_ARCH_PPC64 void InstructionSelector::VisitWord64Xor(Node* node) { PPCOperandGenerator g(this); @@ -1456,15 +1455,15 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, // Match immediates on left or right side of comparison. if (g.CanBeImmediate(right, immediate_mode)) { - VisitCompare(selector, opcode, g.UseRegisterOrStackPointer(left), - g.UseImmediate(right), cont); + VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), + cont); } else if (g.CanBeImmediate(left, immediate_mode)) { if (!commutative) cont->Commute(); - VisitCompare(selector, opcode, g.UseRegisterOrStackPointer(right), - g.UseImmediate(left), cont); + VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), + cont); } else { - VisitCompare(selector, opcode, g.UseRegisterOrStackPointer(left), - g.UseRegisterOrStackPointer(right), cont); + VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), + cont); } } @@ -1639,6 +1638,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, // case IrOpcode::kWord64Shr: // case IrOpcode::kWord64Ror: #endif + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); default: break; } @@ -2281,6 +2283,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); } diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc index 44701f8159..21eef0485c 100644 --- a/deps/v8/src/compiler/backend/register-allocator.cc +++ b/deps/v8/src/compiler/backend/register-allocator.cc @@ -2989,34 +2989,72 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start, } LifetimePosition RegisterAllocator::FindOptimalSpillingPos( - LiveRange* range, LifetimePosition pos) { + LiveRange* range, LifetimePosition pos, SpillMode spill_mode, + LiveRange** begin_spill_out) { + *begin_spill_out = range; + // TODO(herhut): Be more clever here as long as we do not move pos out of + // deferred code. + if (spill_mode == SpillMode::kSpillDeferred) return pos; const InstructionBlock* block = GetInstructionBlock(code(), pos.Start()); const InstructionBlock* loop_header = block->IsLoopHeader() ? block : GetContainingLoop(code(), block); - if (loop_header == nullptr) return pos; - const UsePosition* prev_use = - range->PreviousUsePositionRegisterIsBeneficial(pos); - - while (loop_header != nullptr) { - // We are going to spill live range inside the loop. - // If possible try to move spilling position backwards to loop header. - // This will reduce number of memory moves on the back edge. - LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex( - loop_header->first_instruction_index()); - - if (range->Covers(loop_start)) { - if (prev_use == nullptr || prev_use->pos() < loop_start) { + if (data()->is_turbo_control_flow_aware_allocation()) { + while (loop_header != nullptr) { + // We are going to spill live range inside the loop. + // If possible try to move spilling position backwards to loop header. + // This will reduce number of memory moves on the back edge. + LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex( + loop_header->first_instruction_index()); + auto& loop_header_state = + data()->GetSpillState(loop_header->rpo_number()); + for (LiveRange* live_at_header : loop_header_state) { + if (live_at_header->TopLevel() != range->TopLevel() || + !live_at_header->Covers(loop_start) || live_at_header->spilled()) { + continue; + } + LiveRange* check_use = live_at_header; + for (; check_use != nullptr && check_use->Start() < pos; + check_use = check_use->next()) { + UsePosition* next_use = + check_use->NextUsePositionRegisterIsBeneficial(loop_start); + if (next_use != nullptr && next_use->pos() < pos) { + return pos; + } + } // No register beneficial use inside the loop before the pos. + *begin_spill_out = live_at_header; pos = loop_start; + break; } + + // Try hoisting out to an outer loop. + loop_header = GetContainingLoop(code(), loop_header); } + } else { + const UsePosition* prev_use = + range->PreviousUsePositionRegisterIsBeneficial(pos); + + while (loop_header != nullptr) { + // We are going to spill live range inside the loop. + // If possible try to move spilling position backwards to loop header + // inside the current range. This will reduce number of memory moves on + // the back edge. + LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex( + loop_header->first_instruction_index()); + + if (range->Covers(loop_start)) { + if (prev_use == nullptr || prev_use->pos() < loop_start) { + // No register beneficial use inside the loop before the pos. + pos = loop_start; + } + } - // Try hoisting out to an outer loop. - loop_header = GetContainingLoop(code(), loop_header); + // Try hoisting out to an outer loop. + loop_header = GetContainingLoop(code(), loop_header); + } } - return pos; } @@ -3064,6 +3102,28 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, inactive_live_ranges().reserve(8); } +void LinearScanAllocator::MaybeSpillPreviousRanges(LiveRange* begin_range, + LifetimePosition begin_pos, + LiveRange* end_range) { + // Spill begin_range after begin_pos, then spill every live range of this + // virtual register until but excluding end_range. + DCHECK(begin_range->Covers(begin_pos)); + DCHECK_EQ(begin_range->TopLevel(), end_range->TopLevel()); + + if (begin_range != end_range) { + DCHECK_LE(begin_range->End(), end_range->Start()); + if (!begin_range->spilled()) { + SpillAfter(begin_range, begin_pos, SpillMode::kSpillAtDefinition); + } + for (LiveRange* range = begin_range->next(); range != end_range; + range = range->next()) { + if (!range->spilled()) { + range->Spill(); + } + } + } +} + void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) { if (range->next() != nullptr && range->next()->ShouldRecombine()) { LiveRange* to_remove = range->next(); @@ -4407,11 +4467,10 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current, } UsePosition* next_pos = range->NextRegisterPosition(current->Start()); - // TODO(herhut): Be more clever here as long as we do not move split_pos - // out of deferred code. - LifetimePosition spill_pos = spill_mode == SpillMode::kSpillDeferred - ? split_pos - : FindOptimalSpillingPos(range, split_pos); + LiveRange* begin_spill = nullptr; + LifetimePosition spill_pos = + FindOptimalSpillingPos(range, split_pos, spill_mode, &begin_spill); + MaybeSpillPreviousRanges(begin_spill, spill_pos, range); if (next_pos == nullptr) { SpillAfter(range, spill_pos, spill_mode); } else { diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h index 55f8a8dd1f..bc7b09d147 100644 --- a/deps/v8/src/compiler/backend/register-allocator.h +++ b/deps/v8/src/compiler/backend/register-allocator.h @@ -1238,7 +1238,9 @@ class RegisterAllocator : public ZoneObject { // If we are trying to spill a range inside the loop try to // hoist spill position out to the point just before the loop. LifetimePosition FindOptimalSpillingPos(LiveRange* range, - LifetimePosition pos); + LifetimePosition pos, + SpillMode spill_mode, + LiveRange** begin_spill_out); const ZoneVector<TopLevelLiveRange*>& GetFixedRegisters() const; const char* RegisterName(int allocation_index) const; @@ -1292,6 +1294,9 @@ class LinearScanAllocator final : public RegisterAllocator { ZoneUnorderedSet<RangeWithRegister, RangeWithRegister::Hash, RangeWithRegister::Equals>; + void MaybeSpillPreviousRanges(LiveRange* begin_range, + LifetimePosition begin_pos, + LiveRange* end_range); void MaybeUndoPreviousSplit(LiveRange* range); void SpillNotLiveRanges( RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references) diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc index 6457b7c8b4..4c2d862fc4 100644 --- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc +++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc @@ -1578,19 +1578,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // don't emit code for nops. break; case kArchDeoptimize: { - int deopt_state_id = + DeoptimizationExit* exit = BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); - CodeGenResult result = - AssembleDeoptimizerCall(deopt_state_id, current_source_position_); + CodeGenResult result = AssembleDeoptimizerCall(exit); if (result != kSuccess) return result; break; } case kArchRet: AssembleReturn(instr->InputAt(0)); break; - case kArchStackPointer: - __ LoadRR(i.OutputRegister(), sp); - break; case kArchFramePointer: __ LoadRR(i.OutputRegister(), fp); break; @@ -1601,6 +1597,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ LoadRR(i.OutputRegister(), fp); } break; + case kArchStackPointerGreaterThan: { + constexpr size_t kValueIndex = 0; + DCHECK(instr->InputAt(kValueIndex)->IsRegister()); + __ CmpLogicalP(sp, i.InputRegister(kValueIndex)); + break; + } case kArchTruncateDoubleToI: __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), i.InputDoubleRegister(0), DetermineStubCallMode()); @@ -3193,6 +3195,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { void CodeGenerator::FinishCode() {} +void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} + void CodeGenerator::AssembleMove(InstructionOperand* source, InstructionOperand* destination) { S390OperandConverter g(this, nullptr); diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc index 99d3b0fa0f..7f3277fc68 100644 --- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc +++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc @@ -243,17 +243,6 @@ class S390OperandGenerator final : public OperandGenerator { bool Is64BitOperand(Node* node) { return MachineRepresentation::kWord64 == GetRepresentation(node); } - - // Use the stack pointer if the node is LoadStackPointer, otherwise assign a - // register. - InstructionOperand UseRegisterOrStackPointer(Node* node) { - if (node->opcode() == IrOpcode::kLoadStackPointer) { - return LocationOperand(LocationOperand::EXPLICIT, - LocationOperand::REGISTER, - MachineRepresentation::kWord32, sp.code()); - } - return UseRegister(node); - } }; namespace { @@ -727,7 +716,8 @@ static void VisitGeneralStore( Node* base = node->InputAt(0); Node* offset = node->InputAt(1); Node* value = node->InputAt(2); - if (write_barrier_kind != kNoWriteBarrier) { + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!FLAG_disable_write_barriers)) { DCHECK(CanBeTaggedPointer(rep)); AddressingMode addressing_mode; InstructionOperand inputs[3]; @@ -837,6 +827,15 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); } // Architecture supports unaligned access, therefore VisitStore is used instead void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); } +void InstructionSelector::VisitStackPointerGreaterThan( + Node* node, FlagsContinuation* cont) { + Node* const value = node->InputAt(0); + InstructionCode opcode = kArchStackPointerGreaterThan; + + S390OperandGenerator g(this); + EmitWithContinuation(opcode, g.UseRegister(value), cont); +} + #if 0 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) { int mask_width = base::bits::CountPopulation(value); @@ -1681,7 +1680,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, return VisitLoadAndTest(selector, load_and_test, node, left, cont, true); } - inputs[input_count++] = g.UseRegisterOrStackPointer(left); + inputs[input_count++] = g.UseRegister(left); if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) { // generate memory operand AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand( @@ -2008,6 +2007,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, // doesn't generate cc, so ignore break; #endif + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); default: break; } @@ -2689,6 +2691,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); } diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc index a108edeff0..a4f82b153b 100644 --- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -155,10 +155,18 @@ class X64OperandConverter : public InstructionOperandConverter { namespace { +bool HasAddressingMode(Instruction* instr) { + return instr->addressing_mode() != kMode_None; +} + bool HasImmediateInput(Instruction* instr, size_t index) { return instr->InputAt(index)->IsImmediate(); } +bool HasRegisterInput(Instruction* instr, size_t index) { + return instr->InputAt(index)->IsRegister(); +} + class OutOfLineLoadFloat32NaN final : public OutOfLineCode { public: OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result) @@ -210,6 +218,10 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode { // Just encode the stub index. This will be patched when the code // is added to the native module and copied into wasm code space. __ near_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); + } else if (tasm()->options().inline_offheap_trampolines) { + // With embedded builtins we do not need the isolate here. This allows + // the call to be generated asynchronously. + __ CallBuiltin(Builtins::kDoubleToI); } else { __ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET); } @@ -380,60 +392,60 @@ void EmitWordLoadPoisoningIfNeeded( } \ } while (false) -#define ASSEMBLE_BINOP(asm_instr) \ - do { \ - if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \ - size_t index = 1; \ - Operand right = i.MemoryOperand(&index); \ - __ asm_instr(i.InputRegister(0), right); \ - } else { \ - if (HasImmediateInput(instr, 1)) { \ - if (instr->InputAt(0)->IsRegister()) { \ - __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \ - } else { \ - __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \ - } \ - } else { \ - if (instr->InputAt(1)->IsRegister()) { \ - __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \ - } else { \ - __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \ - } \ - } \ - } \ +#define ASSEMBLE_BINOP(asm_instr) \ + do { \ + if (HasAddressingMode(instr)) { \ + size_t index = 1; \ + Operand right = i.MemoryOperand(&index); \ + __ asm_instr(i.InputRegister(0), right); \ + } else { \ + if (HasImmediateInput(instr, 1)) { \ + if (HasRegisterInput(instr, 0)) { \ + __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \ + } else { \ + __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \ + } \ + } else { \ + if (HasRegisterInput(instr, 1)) { \ + __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \ + } else { \ + __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \ + } \ + } \ + } \ } while (false) -#define ASSEMBLE_COMPARE(asm_instr) \ - do { \ - if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \ - size_t index = 0; \ - Operand left = i.MemoryOperand(&index); \ - if (HasImmediateInput(instr, index)) { \ - __ asm_instr(left, i.InputImmediate(index)); \ - } else { \ - __ asm_instr(left, i.InputRegister(index)); \ - } \ - } else { \ - if (HasImmediateInput(instr, 1)) { \ - if (instr->InputAt(0)->IsRegister()) { \ - __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \ - } else { \ - __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \ - } \ - } else { \ - if (instr->InputAt(1)->IsRegister()) { \ - __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \ - } else { \ - __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \ - } \ - } \ - } \ +#define ASSEMBLE_COMPARE(asm_instr) \ + do { \ + if (HasAddressingMode(instr)) { \ + size_t index = 0; \ + Operand left = i.MemoryOperand(&index); \ + if (HasImmediateInput(instr, index)) { \ + __ asm_instr(left, i.InputImmediate(index)); \ + } else { \ + __ asm_instr(left, i.InputRegister(index)); \ + } \ + } else { \ + if (HasImmediateInput(instr, 1)) { \ + if (HasRegisterInput(instr, 0)) { \ + __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \ + } else { \ + __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \ + } \ + } else { \ + if (HasRegisterInput(instr, 1)) { \ + __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \ + } else { \ + __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \ + } \ + } \ + } \ } while (false) #define ASSEMBLE_MULT(asm_instr) \ do { \ if (HasImmediateInput(instr, 1)) { \ - if (instr->InputAt(0)->IsRegister()) { \ + if (HasRegisterInput(instr, 0)) { \ __ asm_instr(i.OutputRegister(), i.InputRegister(0), \ i.InputImmediate(1)); \ } else { \ @@ -441,7 +453,7 @@ void EmitWordLoadPoisoningIfNeeded( i.InputImmediate(1)); \ } \ } else { \ - if (instr->InputAt(1)->IsRegister()) { \ + if (HasRegisterInput(instr, 1)) { \ __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \ } else { \ __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \ @@ -468,9 +480,9 @@ void EmitWordLoadPoisoningIfNeeded( #define ASSEMBLE_MOVX(asm_instr) \ do { \ - if (instr->addressing_mode() != kMode_None) { \ + if (HasAddressingMode(instr)) { \ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ - } else if (instr->InputAt(0)->IsRegister()) { \ + } else if (HasRegisterInput(instr, 0)) { \ __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \ } else { \ __ asm_instr(i.OutputRegister(), i.InputOperand(0)); \ @@ -576,17 +588,18 @@ void EmitWordLoadPoisoningIfNeeded( __ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \ } while (false) -#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ - do { \ - CpuFeatureScope sse_scope(tasm(), SSE4_1); \ - Register dst = i.OutputRegister(); \ - Register tmp = i.TempRegister(0); \ - __ movq(tmp, Immediate(1)); \ - __ xorq(dst, dst); \ - __ pxor(kScratchDoubleReg, kScratchDoubleReg); \ - __ opcode(kScratchDoubleReg, i.InputSimd128Register(0)); \ - __ ptest(kScratchDoubleReg, kScratchDoubleReg); \ - __ cmovq(zero, dst, tmp); \ +#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ + do { \ + CpuFeatureScope sse_scope(tasm(), SSE4_1); \ + Register dst = i.OutputRegister(); \ + Register tmp1 = i.TempRegister(0); \ + XMMRegister tmp2 = i.TempSimd128Register(1); \ + __ movq(tmp1, Immediate(1)); \ + __ xorq(dst, dst); \ + __ pxor(tmp2, tmp2); \ + __ opcode(tmp2, i.InputSimd128Register(0)); \ + __ ptest(tmp2, tmp2); \ + __ cmovq(zero, dst, tmp1); \ } while (false) void CodeGenerator::AssembleDeconstructFrame() { @@ -989,10 +1002,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // don't emit code for nops. break; case kArchDeoptimize: { - int deopt_state_id = + DeoptimizationExit* exit = BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); - CodeGenResult result = - AssembleDeoptimizerCall(deopt_state_id, current_source_position_); + CodeGenResult result = AssembleDeoptimizerCall(exit); if (result != kSuccess) return result; unwinding_info_writer_.MarkBlockWillExit(); break; @@ -1000,9 +1012,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchRet: AssembleReturn(instr->InputAt(0)); break; - case kArchStackPointer: - __ movq(i.OutputRegister(), rsp); - break; case kArchFramePointer: __ movq(i.OutputRegister(), rbp); break; @@ -1013,6 +1022,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ movq(i.OutputRegister(), rbp); } break; + case kArchStackPointerGreaterThan: { + constexpr size_t kValueIndex = 0; + if (HasAddressingMode(instr)) { + __ cmpq(rsp, i.MemoryOperand(kValueIndex)); + } else { + __ cmpq(rsp, i.InputRegister(kValueIndex)); + } + break; + } case kArchTruncateDoubleToI: { auto result = i.OutputRegister(); auto input = i.InputDoubleRegister(0); @@ -1176,14 +1194,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_MULT(imulq); break; case kX64ImulHigh32: - if (instr->InputAt(1)->IsRegister()) { + if (HasRegisterInput(instr, 1)) { __ imull(i.InputRegister(1)); } else { __ imull(i.InputOperand(1)); } break; case kX64UmulHigh32: - if (instr->InputAt(1)->IsRegister()) { + if (HasRegisterInput(instr, 1)) { __ mull(i.InputRegister(1)); } else { __ mull(i.InputOperand(1)); @@ -1254,42 +1272,42 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_SHIFT(rorq, 6); break; case kX64Lzcnt: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Lzcntq(i.OutputRegister(), i.InputRegister(0)); } else { __ Lzcntq(i.OutputRegister(), i.InputOperand(0)); } break; case kX64Lzcnt32: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Lzcntl(i.OutputRegister(), i.InputRegister(0)); } else { __ Lzcntl(i.OutputRegister(), i.InputOperand(0)); } break; case kX64Tzcnt: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Tzcntq(i.OutputRegister(), i.InputRegister(0)); } else { __ Tzcntq(i.OutputRegister(), i.InputOperand(0)); } break; case kX64Tzcnt32: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Tzcntl(i.OutputRegister(), i.InputRegister(0)); } else { __ Tzcntl(i.OutputRegister(), i.InputOperand(0)); } break; case kX64Popcnt: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Popcntq(i.OutputRegister(), i.InputRegister(0)); } else { __ Popcntq(i.OutputRegister(), i.InputOperand(0)); } break; case kX64Popcnt32: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Popcntl(i.OutputRegister(), i.InputRegister(0)); } else { __ Popcntl(i.OutputRegister(), i.InputOperand(0)); @@ -1321,16 +1339,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kSSEFloat32Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ Psrlq(kScratchDoubleReg, 33); - __ Andps(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0)); + __ Pcmpeqd(tmp, tmp); + __ Psrlq(tmp, 33); + __ Andps(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat32Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ Psllq(kScratchDoubleReg, 31); - __ Xorps(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0)); + __ Pcmpeqd(tmp, tmp); + __ Psllq(tmp, 31); + __ Xorps(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat32Sqrt: @@ -1532,17 +1552,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64F64x2Abs: case kSSEFloat64Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ Psrlq(kScratchDoubleReg, 1); - __ Andpd(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0)); + __ Pcmpeqd(tmp, tmp); + __ Psrlq(tmp, 1); + __ Andpd(i.OutputDoubleRegister(), tmp); break; } case kX64F64x2Neg: case kSSEFloat64Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ Psllq(kScratchDoubleReg, 63); - __ Xorpd(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0)); + __ Pcmpeqd(tmp, tmp); + __ Psllq(tmp, 63); + __ Xorpd(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat64Sqrt: @@ -1659,56 +1681,56 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kSSEInt32ToFloat64: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0)); } break; case kSSEInt32ToFloat32: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputOperand(0)); } break; case kSSEInt64ToFloat32: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputOperand(0)); } break; case kSSEInt64ToFloat64: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0)); } break; case kSSEUint64ToFloat32: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Cvtqui2ss(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Cvtqui2ss(i.OutputDoubleRegister(), i.InputOperand(0)); } break; case kSSEUint64ToFloat64: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Cvtqui2sd(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Cvtqui2sd(i.OutputDoubleRegister(), i.InputOperand(0)); } break; case kSSEUint32ToFloat64: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Cvtlui2sd(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Cvtlui2sd(i.OutputDoubleRegister(), i.InputOperand(0)); } break; case kSSEUint32ToFloat32: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Cvtlui2ss(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Cvtlui2ss(i.OutputDoubleRegister(), i.InputOperand(0)); @@ -1729,21 +1751,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kSSEFloat64InsertLowWord32: - if (instr->InputAt(1)->IsRegister()) { + if (HasRegisterInput(instr, 1)) { __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0); } else { __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0); } break; case kSSEFloat64InsertHighWord32: - if (instr->InputAt(1)->IsRegister()) { + if (HasRegisterInput(instr, 1)) { __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1); } else { __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1); } break; case kSSEFloat64LoadLowWord32: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Movd(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Movd(i.OutputDoubleRegister(), i.InputOperand(0)); @@ -1800,56 +1822,52 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kAVXFloat32Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. CpuFeatureScope avx_scope(tasm(), AVX); - __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); - __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33); + XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0)); + __ vpcmpeqd(tmp, tmp, tmp); + __ vpsrlq(tmp, tmp, 33); if (instr->InputAt(0)->IsFPRegister()) { - __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, - i.InputDoubleRegister(0)); + __ vandps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0)); } else { - __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, - i.InputOperand(0)); + __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); } break; } case kAVXFloat32Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. CpuFeatureScope avx_scope(tasm(), AVX); - __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); - __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31); + XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0)); + __ vpcmpeqd(tmp, tmp, tmp); + __ vpsllq(tmp, tmp, 31); if (instr->InputAt(0)->IsFPRegister()) { - __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, - i.InputDoubleRegister(0)); + __ vxorps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0)); } else { - __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, - i.InputOperand(0)); + __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); } break; } case kAVXFloat64Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. CpuFeatureScope avx_scope(tasm(), AVX); - __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); - __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1); + XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0)); + __ vpcmpeqd(tmp, tmp, tmp); + __ vpsrlq(tmp, tmp, 1); if (instr->InputAt(0)->IsFPRegister()) { - __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, - i.InputDoubleRegister(0)); + __ vandpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0)); } else { - __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, - i.InputOperand(0)); + __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); } break; } case kAVXFloat64Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. CpuFeatureScope avx_scope(tasm(), AVX); - __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); - __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63); + XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0)); + __ vpcmpeqd(tmp, tmp, tmp); + __ vpsllq(tmp, tmp, 63); if (instr->InputAt(0)->IsFPRegister()) { - __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, - i.InputDoubleRegister(0)); + __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0)); } else { - __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, - i.InputOperand(0)); + __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); } break; } @@ -1929,14 +1947,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64Movl: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); if (instr->HasOutput()) { - if (instr->addressing_mode() == kMode_None) { - if (instr->InputAt(0)->IsRegister()) { + if (HasAddressingMode(instr)) { + __ movl(i.OutputRegister(), i.MemoryOperand()); + } else { + if (HasRegisterInput(instr, 0)) { __ movl(i.OutputRegister(), i.InputRegister(0)); } else { __ movl(i.OutputRegister(), i.InputOperand(0)); } - } else { - __ movl(i.OutputRegister(), i.MemoryOperand()); } __ AssertZeroExtended(i.OutputRegister()); } else { @@ -2002,12 +2020,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } - case kX64CompressSigned: // Fall through. - case kX64CompressPointer: // Fall through. - case kX64CompressAny: { - ASSEMBLE_MOVX(movl); - break; - } case kX64Movq: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); if (instr->HasOutput()) { @@ -2082,14 +2094,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kX64BitcastIF: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Movd(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Movss(i.OutputDoubleRegister(), i.InputOperand(0)); } break; case kX64BitcastLD: - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ Movq(i.OutputDoubleRegister(), i.InputRegister(0)); } else { __ Movsd(i.OutputDoubleRegister(), i.InputOperand(0)); @@ -2177,7 +2189,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ incl(i.OutputRegister()); break; case kX64Push: - if (AddressingModeField::decode(instr->opcode()) != kMode_None) { + if (HasAddressingMode(instr)) { size_t index = 0; Operand operand = i.MemoryOperand(&index); __ pushq(operand); @@ -2189,7 +2201,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( frame_access_state()->IncreaseSPDelta(1); unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(), kSystemPointerSize); - } else if (instr->InputAt(0)->IsRegister()) { + } else if (HasRegisterInput(instr, 0)) { __ pushq(i.InputRegister(0)); frame_access_state()->IncreaseSPDelta(1); unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(), @@ -2256,11 +2268,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64F64x2Splat: { + CpuFeatureScope sse_scope(tasm(), SSE3); XMMRegister dst = i.OutputSimd128Register(); if (instr->InputAt(0)->IsFPRegister()) { - __ pshufd(dst, i.InputDoubleRegister(0), 0x44); + __ movddup(dst, i.InputDoubleRegister(0)); } else { - __ pshufd(dst, i.InputOperand(0), 0x44); + __ movddup(dst, i.InputOperand(0)); } break; } @@ -2280,6 +2293,61 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ movq(i.OutputDoubleRegister(), kScratchRegister); break; } + case kX64F64x2Add: { + ASSEMBLE_SSE_BINOP(addpd); + break; + } + case kX64F64x2Sub: { + ASSEMBLE_SSE_BINOP(subpd); + break; + } + case kX64F64x2Mul: { + ASSEMBLE_SSE_BINOP(mulpd); + break; + } + case kX64F64x2Div: { + ASSEMBLE_SSE_BINOP(divpd); + break; + } + case kX64F64x2Min: { + XMMRegister src1 = i.InputSimd128Register(1), + dst = i.OutputSimd128Register(); + DCHECK_EQ(dst, i.InputSimd128Register(0)); + // The minpd instruction doesn't propagate NaNs and +0's in its first + // operand. Perform minpd in both orders, merge the resuls, and adjust. + __ movapd(kScratchDoubleReg, src1); + __ minpd(kScratchDoubleReg, dst); + __ minpd(dst, src1); + // propagate -0's and NaNs, which may be non-canonical. + __ orpd(kScratchDoubleReg, dst); + // Canonicalize NaNs by quieting and clearing the payload. + __ cmppd(dst, kScratchDoubleReg, 3); + __ orpd(kScratchDoubleReg, dst); + __ psrlq(dst, 13); + __ andnpd(dst, kScratchDoubleReg); + break; + } + case kX64F64x2Max: { + XMMRegister src1 = i.InputSimd128Register(1), + dst = i.OutputSimd128Register(); + DCHECK_EQ(dst, i.InputSimd128Register(0)); + // The maxpd instruction doesn't propagate NaNs and +0's in its first + // operand. Perform maxpd in both orders, merge the resuls, and adjust. + __ movapd(kScratchDoubleReg, src1); + __ maxpd(kScratchDoubleReg, dst); + __ maxpd(dst, src1); + // Find discrepancies. + __ xorpd(dst, kScratchDoubleReg); + // Propagate NaNs, which may be non-canonical. + __ orpd(kScratchDoubleReg, dst); + // Propagate sign discrepancy and (subtle) quiet NaNs. + __ subpd(kScratchDoubleReg, dst); + // Canonicalize NaNs by clearing the payload. Sign is non-deterministic. + __ cmppd(dst, kScratchDoubleReg, 3); + __ psrlq(dst, 13); + __ andnpd(dst, kScratchDoubleReg); + break; + } case kX64F64x2Eq: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); @@ -2406,6 +2474,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ mulps(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } + case kX64F32x4Div: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ divps(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } case kX64F32x4Min: { XMMRegister src1 = i.InputSimd128Register(1), dst = i.OutputSimd128Register(); @@ -2466,13 +2539,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I64x2Splat: { + CpuFeatureScope sse_scope(tasm(), SSE3); XMMRegister dst = i.OutputSimd128Register(); - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ movq(dst, i.InputRegister(0)); } else { __ movq(dst, i.InputOperand(0)); } - __ pshufd(dst, dst, 0x44); + __ movddup(dst, dst); break; } case kX64I64x2ExtractLane: { @@ -2482,7 +2556,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I64x2ReplaceLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - if (instr->InputAt(2)->IsRegister()) { + if (HasRegisterInput(instr, 2)) { __ pinsrq(i.OutputSimd128Register(), i.InputRegister(2), i.InputInt8(1)); } else { @@ -2502,7 +2576,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I64x2Shl: { - __ psllq(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movq(tmp, i.InputRegister(1)); + __ psllq(i.OutputSimd128Register(), tmp); break; } case kX64I64x2ShrS: { @@ -2511,16 +2587,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // ShrS on each quadword one at a time XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(0); + Register tmp = i.ToRegister(instr->TempAt(0)); // lower quadword - __ pextrq(kScratchRegister, src, 0x0); - __ sarq(kScratchRegister, Immediate(i.InputInt8(1))); - __ pinsrq(dst, kScratchRegister, 0x0); + __ pextrq(tmp, src, 0x0); + __ sarq_cl(tmp); + __ pinsrq(dst, tmp, 0x0); // upper quadword - __ pextrq(kScratchRegister, src, 0x1); - __ sarq(kScratchRegister, Immediate(i.InputInt8(1))); - __ pinsrq(dst, kScratchRegister, 0x1); + __ pextrq(tmp, src, 0x1); + __ sarq_cl(tmp); + __ pinsrq(dst, tmp, 0x1); break; } case kX64I64x2Add: { @@ -2538,8 +2615,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSE4_1); XMMRegister left = i.InputSimd128Register(0); XMMRegister right = i.InputSimd128Register(1); - XMMRegister tmp1 = i.ToSimd128Register(instr->TempAt(0)); - XMMRegister tmp2 = i.ToSimd128Register(instr->TempAt(1)); + XMMRegister tmp1 = i.TempSimd128Register(0); + XMMRegister tmp2 = i.TempSimd128Register(1); __ movaps(tmp1, left); __ movaps(tmp2, right); @@ -2559,6 +2636,66 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ paddq(left, tmp2); // left == dst break; } + case kX64I64x2MinS: { + if (CpuFeatures::IsSupported(SSE4_2)) { + CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.TempSimd128Register(0); + DCHECK_EQ(dst, i.InputSimd128Register(0)); + DCHECK_EQ(src, xmm0); + + __ movaps(tmp, src); + __ pcmpgtq(src, dst); + __ blendvpd(tmp, dst); // implicit use of xmm0 as mask + __ movaps(dst, tmp); + } else { + CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.TempSimd128Register(0); + Register tmp1 = i.TempRegister(1); + Register tmp2 = i.TempRegister(2); + DCHECK_EQ(dst, i.InputSimd128Register(0)); + // backup src since we cannot change it + __ movaps(tmp, src); + + // compare the lower quardwords + __ movq(tmp1, dst); + __ movq(tmp2, tmp); + __ cmpq(tmp1, tmp2); + // tmp2 now has the min of lower quadwords + __ cmovq(less_equal, tmp2, tmp1); + // tmp1 now has the higher quadword + // must do this before movq, movq clears top quadword + __ pextrq(tmp1, dst, 1); + // save tmp2 into dst + __ movq(dst, tmp2); + // tmp2 now has the higher quadword + __ pextrq(tmp2, tmp, 1); + // compare higher quadwords + __ cmpq(tmp1, tmp2); + // tmp2 now has the min of higher quadwords + __ cmovq(less_equal, tmp2, tmp1); + __ movq(tmp, tmp2); + // dst = [tmp[0], dst[0]] + __ punpcklqdq(dst, tmp); + } + break; + } + case kX64I64x2MaxS: { + CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.TempSimd128Register(0); + DCHECK_EQ(dst, i.InputSimd128Register(0)); + DCHECK_EQ(src, xmm0); + + __ movaps(tmp, src); + __ pcmpgtq(src, dst); + __ blendvpd(dst, tmp); // implicit use of xmm0 as mask + break; + } case kX64I64x2Eq: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); CpuFeatureScope sse_scope(tasm(), SSE4_1); @@ -2568,9 +2705,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64I64x2Ne: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); CpuFeatureScope sse_scope(tasm(), SSE4_1); + XMMRegister tmp = i.TempSimd128Register(0); __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1)); - __ pcmpeqq(kScratchDoubleReg, kScratchDoubleReg); - __ pxor(i.OutputSimd128Register(), kScratchDoubleReg); + __ pcmpeqq(tmp, tmp); + __ pxor(i.OutputSimd128Register(), tmp); break; } case kX64I64x2GtS: { @@ -2584,7 +2722,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSE4_2); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); - XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp = i.TempSimd128Register(0); __ movaps(tmp, src); __ pcmpgtq(tmp, dst); @@ -2593,7 +2731,56 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I64x2ShrU: { - __ psrlq(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movq(tmp, i.InputRegister(1)); + __ psrlq(i.OutputSimd128Register(), tmp); + break; + } + case kX64I64x2MinU: { + CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2); + CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister src_tmp = i.TempSimd128Register(0); + XMMRegister dst_tmp = i.TempSimd128Register(1); + DCHECK_EQ(dst, i.InputSimd128Register(0)); + DCHECK_EQ(src, xmm0); + + __ movaps(src_tmp, src); + __ movaps(dst_tmp, dst); + + __ pcmpeqd(src, src); + __ psllq(src, 63); + + __ pxor(dst_tmp, src); + __ pxor(src, src_tmp); + + __ pcmpgtq(src, dst_tmp); + __ blendvpd(src_tmp, dst); // implicit use of xmm0 as mask + __ movaps(dst, src_tmp); + break; + } + case kX64I64x2MaxU: { + CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2); + CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister src_tmp = i.TempSimd128Register(0); + XMMRegister dst_tmp = i.TempSimd128Register(1); + DCHECK_EQ(dst, i.InputSimd128Register(0)); + DCHECK_EQ(src, xmm0); + + __ movaps(src_tmp, src); + __ movaps(dst_tmp, dst); + + __ pcmpeqd(src, src); + __ psllq(src, 63); + + __ pxor(dst_tmp, src); + __ pxor(src, src_tmp); + + __ pcmpgtq(src, dst_tmp); + __ blendvpd(dst, src_tmp); // implicit use of xmm0 as mask break; } case kX64I64x2GtU: { @@ -2601,7 +2788,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSE4_2); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); - XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp = i.TempSimd128Register(0); __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); __ psllq(kScratchDoubleReg, 63); @@ -2617,7 +2804,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSE4_2); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); - XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp = i.TempSimd128Register(0); __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); __ psllq(kScratchDoubleReg, 63); @@ -2632,7 +2819,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I32x4Splat: { XMMRegister dst = i.OutputSimd128Register(); - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ movd(dst, i.InputRegister(0)); } else { __ movd(dst, i.InputOperand(0)); @@ -2647,7 +2834,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I32x4ReplaceLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - if (instr->InputAt(2)->IsRegister()) { + if (HasRegisterInput(instr, 2)) { __ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2), i.InputInt8(1)); } else { @@ -2658,19 +2845,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64I32x4SConvertF32x4: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister dst = i.OutputSimd128Register(); + XMMRegister tmp = i.TempSimd128Register(0); // NAN->0 - __ movaps(kScratchDoubleReg, dst); - __ cmpeqps(kScratchDoubleReg, kScratchDoubleReg); - __ pand(dst, kScratchDoubleReg); + __ movaps(tmp, dst); + __ cmpeqps(tmp, tmp); + __ pand(dst, tmp); // Set top bit if >= 0 (but not -0.0!) - __ pxor(kScratchDoubleReg, dst); + __ pxor(tmp, dst); // Convert __ cvttps2dq(dst, dst); // Set top bit if >=0 is now < 0 - __ pand(kScratchDoubleReg, dst); - __ psrad(kScratchDoubleReg, 31); + __ pand(tmp, dst); + __ psrad(tmp, 31); // Set positive overflow lanes to 0x7FFFFFFF - __ pxor(dst, kScratchDoubleReg); + __ pxor(dst, tmp); break; } case kX64I32x4SConvertI16x8Low: { @@ -2699,11 +2887,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I32x4Shl: { - __ pslld(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movq(tmp, i.InputRegister(1)); + __ pslld(i.OutputSimd128Register(), tmp); break; } case kX64I32x4ShrS: { - __ psrad(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movq(tmp, i.InputRegister(1)); + __ psrad(i.OutputSimd128Register(), tmp); break; } case kX64I32x4Add: { @@ -2739,9 +2931,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I32x4Ne: { + XMMRegister tmp = i.TempSimd128Register(0); __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ pxor(i.OutputSimd128Register(), kScratchDoubleReg); + __ pcmpeqd(tmp, tmp); + __ pxor(i.OutputSimd128Register(), tmp); break; } case kX64I32x4GtS: { @@ -2760,24 +2953,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); CpuFeatureScope sse_scope(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); - XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp = i.TempSimd128Register(0); + XMMRegister tmp2 = i.TempSimd128Register(1); // NAN->0, negative->0 - __ pxor(kScratchDoubleReg, kScratchDoubleReg); - __ maxps(dst, kScratchDoubleReg); + __ pxor(tmp2, tmp2); + __ maxps(dst, tmp2); // scratch: float representation of max_signed - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrld(kScratchDoubleReg, 1); // 0x7fffffff - __ cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // 0x4f000000 + __ pcmpeqd(tmp2, tmp2); + __ psrld(tmp2, 1); // 0x7fffffff + __ cvtdq2ps(tmp2, tmp2); // 0x4f000000 // tmp: convert (src-max_signed). // Positive overflow lanes -> 0x7FFFFFFF // Negative lanes -> 0 __ movaps(tmp, dst); - __ subps(tmp, kScratchDoubleReg); - __ cmpleps(kScratchDoubleReg, tmp); + __ subps(tmp, tmp2); + __ cmpleps(tmp2, tmp); __ cvttps2dq(tmp, tmp); - __ pxor(tmp, kScratchDoubleReg); - __ pxor(kScratchDoubleReg, kScratchDoubleReg); - __ pmaxsd(tmp, kScratchDoubleReg); + __ pxor(tmp, tmp2); + __ pxor(tmp2, tmp2); + __ pmaxsd(tmp, tmp2); // convert. Overflow lanes above max_signed will be 0x80000000 __ cvttps2dq(dst, dst); // Add (src-max_signed) for overflow lanes. @@ -2797,7 +2991,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I32x4ShrU: { - __ psrld(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movq(tmp, i.InputRegister(1)); + __ psrld(i.OutputSimd128Register(), tmp); break; } case kX64I32x4MinU: { @@ -2814,10 +3010,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.TempSimd128Register(0); __ pmaxud(dst, src); __ pcmpeqd(dst, src); - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ pxor(dst, kScratchDoubleReg); + __ pcmpeqd(tmp, tmp); + __ pxor(dst, tmp); break; } case kX64I32x4GeU: { @@ -2835,7 +3032,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I16x8Splat: { XMMRegister dst = i.OutputSimd128Register(); - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ movd(dst, i.InputRegister(0)); } else { __ movd(dst, i.InputOperand(0)); @@ -2853,7 +3050,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I16x8ReplaceLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - if (instr->InputAt(2)->IsRegister()) { + if (HasRegisterInput(instr, 2)) { __ pinsrw(i.OutputSimd128Register(), i.InputRegister(2), i.InputInt8(1)); } else { @@ -2887,11 +3084,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I16x8Shl: { - __ psllw(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movq(tmp, i.InputRegister(1)); + __ psllw(i.OutputSimd128Register(), tmp); break; } case kX64I16x8ShrS: { - __ psraw(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movq(tmp, i.InputRegister(1)); + __ psraw(i.OutputSimd128Register(), tmp); break; } case kX64I16x8SConvertI32x4: { @@ -2940,9 +3141,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I16x8Ne: { + XMMRegister tmp = i.TempSimd128Register(0); __ pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1)); - __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg); - __ pxor(i.OutputSimd128Register(), kScratchDoubleReg); + __ pcmpeqw(tmp, tmp); + __ pxor(i.OutputSimd128Register(), tmp); break; } case kX64I16x8GtS: { @@ -2970,7 +3172,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I16x8ShrU: { - __ psrlw(i.OutputSimd128Register(), i.InputInt8(1)); + XMMRegister tmp = i.TempSimd128Register(0); + __ movq(tmp, i.InputRegister(1)); + __ psrlw(i.OutputSimd128Register(), tmp); break; } case kX64I16x8UConvertI32x4: { @@ -3007,10 +3211,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.TempSimd128Register(0); __ pmaxuw(dst, src); __ pcmpeqw(dst, src); - __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg); - __ pxor(dst, kScratchDoubleReg); + __ pcmpeqw(tmp, tmp); + __ pxor(dst, tmp); break; } case kX64I16x8GeU: { @@ -3024,7 +3229,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64I8x16Splat: { CpuFeatureScope sse_scope(tasm(), SSSE3); XMMRegister dst = i.OutputSimd128Register(); - if (instr->InputAt(0)->IsRegister()) { + if (HasRegisterInput(instr, 0)) { __ movd(dst, i.InputRegister(0)); } else { __ movd(dst, i.InputOperand(0)); @@ -3042,7 +3247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I8x16ReplaceLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - if (instr->InputAt(2)->IsRegister()) { + if (HasRegisterInput(instr, 2)) { __ pinsrb(i.OutputSimd128Register(), i.InputRegister(2), i.InputInt8(1)); } else { @@ -3071,31 +3276,36 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64I8x16Shl: { XMMRegister dst = i.OutputSimd128Register(); DCHECK_EQ(dst, i.InputSimd128Register(0)); - int8_t shift = i.InputInt8(1) & 0x7; - if (shift < 4) { - // For small shifts, doubling is faster. - for (int i = 0; i < shift; ++i) { - __ paddb(dst, dst); - } - } else { - // Mask off the unwanted bits before word-shifting. - __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg); - __ psrlw(kScratchDoubleReg, 8 + shift); - __ packuswb(kScratchDoubleReg, kScratchDoubleReg); - __ pand(dst, kScratchDoubleReg); - __ psllw(dst, shift); - } + // Temp registers for shift mask andadditional moves to XMM registers. + Register tmp = i.ToRegister(instr->TempAt(0)); + XMMRegister tmp_simd = i.TempSimd128Register(1); + // Mask off the unwanted bits before word-shifting. + __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg); + __ movq(tmp, i.InputRegister(1)); + __ addq(tmp, Immediate(8)); + __ movq(tmp_simd, tmp); + __ psrlw(kScratchDoubleReg, tmp_simd); + __ packuswb(kScratchDoubleReg, kScratchDoubleReg); + __ pand(dst, kScratchDoubleReg); + __ movq(tmp_simd, i.InputRegister(1)); + __ psllw(dst, tmp_simd); break; } case kX64I8x16ShrS: { XMMRegister dst = i.OutputSimd128Register(); - XMMRegister src = i.InputSimd128Register(0); - int8_t shift = i.InputInt8(1) & 0x7; + DCHECK_EQ(dst, i.InputSimd128Register(0)); + // Temp registers for shift mask andadditional moves to XMM registers. + Register tmp = i.ToRegister(instr->TempAt(0)); + XMMRegister tmp_simd = i.TempSimd128Register(1); // Unpack the bytes into words, do arithmetic shifts, and repack. - __ punpckhbw(kScratchDoubleReg, src); - __ punpcklbw(dst, src); - __ psraw(kScratchDoubleReg, 8 + shift); - __ psraw(dst, 8 + shift); + __ punpckhbw(kScratchDoubleReg, dst); + __ punpcklbw(dst, dst); + // Prepare shift value + __ movq(tmp, i.InputRegister(1)); + __ addq(tmp, Immediate(8)); + __ movq(tmp_simd, tmp); + __ psraw(kScratchDoubleReg, tmp_simd); + __ psraw(dst, tmp_simd); __ packsswb(dst, kScratchDoubleReg); break; } @@ -3119,7 +3329,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister dst = i.OutputSimd128Register(); DCHECK_EQ(dst, i.InputSimd128Register(0)); XMMRegister right = i.InputSimd128Register(1); - XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp = i.TempSimd128Register(0); // I16x8 view of I8x16 // left = AAaa AAaa ... AAaa AAaa // right= BBbb BBbb ... BBbb BBbb @@ -3163,9 +3373,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I8x16Ne: { + XMMRegister tmp = i.TempSimd128Register(0); __ pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1)); - __ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg); - __ pxor(i.OutputSimd128Register(), kScratchDoubleReg); + __ pcmpeqb(tmp, tmp); + __ pxor(i.OutputSimd128Register(), tmp); break; } case kX64I8x16GtS: { @@ -3194,13 +3405,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I8x16ShrU: { XMMRegister dst = i.OutputSimd128Register(); - XMMRegister src = i.InputSimd128Register(0); - int8_t shift = i.InputInt8(1) & 0x7; // Unpack the bytes into words, do logical shifts, and repack. - __ punpckhbw(kScratchDoubleReg, src); - __ punpcklbw(dst, src); - __ psrlw(kScratchDoubleReg, 8 + shift); - __ psrlw(dst, 8 + shift); + DCHECK_EQ(dst, i.InputSimd128Register(0)); + // Temp registers for shift mask andadditional moves to XMM registers. + Register tmp = i.ToRegister(instr->TempAt(0)); + XMMRegister tmp_simd = i.TempSimd128Register(1); + __ punpckhbw(kScratchDoubleReg, dst); + __ punpcklbw(dst, dst); + // Prepare shift value + __ movq(tmp, i.InputRegister(1)); + __ addq(tmp, Immediate(8)); + __ movq(tmp_simd, tmp); + __ psrlw(kScratchDoubleReg, tmp_simd); + __ psrlw(dst, tmp_simd); __ packuswb(dst, kScratchDoubleReg); break; } @@ -3226,10 +3443,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.TempSimd128Register(0); __ pmaxub(dst, src); __ pcmpeqb(dst, src); - __ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg); - __ pxor(dst, kScratchDoubleReg); + __ pcmpeqb(tmp, tmp); + __ pxor(dst, tmp); break; } case kX64I8x16GeU: { @@ -3561,9 +3779,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb); break; } - case kX64StackCheck: - __ CompareRoot(rsp, RootIndex::kStackLimit); - break; case kWord32AtomicExchangeInt8: { __ xchgb(i.InputRegister(0), i.MemoryOperand(1)); __ movsxbl(i.InputRegister(0), i.InputRegister(0)); @@ -4167,6 +4382,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); } +void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} + void CodeGenerator::AssembleMove(InstructionOperand* source, InstructionOperand* destination) { X64OperandConverter g(this, nullptr); diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h index d6ac3f43df..8a0a45a916 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h +++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h @@ -140,9 +140,6 @@ namespace compiler { V(X64DecompressSigned) \ V(X64DecompressPointer) \ V(X64DecompressAny) \ - V(X64CompressSigned) \ - V(X64CompressPointer) \ - V(X64CompressAny) \ V(X64Movq) \ V(X64Movsd) \ V(X64Movss) \ @@ -158,12 +155,17 @@ namespace compiler { V(X64Push) \ V(X64Poke) \ V(X64Peek) \ - V(X64StackCheck) \ V(X64F64x2Splat) \ V(X64F64x2ExtractLane) \ V(X64F64x2ReplaceLane) \ V(X64F64x2Abs) \ V(X64F64x2Neg) \ + V(X64F64x2Add) \ + V(X64F64x2Sub) \ + V(X64F64x2Mul) \ + V(X64F64x2Div) \ + V(X64F64x2Min) \ + V(X64F64x2Max) \ V(X64F64x2Eq) \ V(X64F64x2Ne) \ V(X64F64x2Lt) \ @@ -181,6 +183,7 @@ namespace compiler { V(X64F32x4AddHoriz) \ V(X64F32x4Sub) \ V(X64F32x4Mul) \ + V(X64F32x4Div) \ V(X64F32x4Min) \ V(X64F32x4Max) \ V(X64F32x4Eq) \ @@ -196,11 +199,15 @@ namespace compiler { V(X64I64x2Add) \ V(X64I64x2Sub) \ V(X64I64x2Mul) \ + V(X64I64x2MinS) \ + V(X64I64x2MaxS) \ V(X64I64x2Eq) \ V(X64I64x2Ne) \ V(X64I64x2GtS) \ V(X64I64x2GeS) \ V(X64I64x2ShrU) \ + V(X64I64x2MinU) \ + V(X64I64x2MaxU) \ V(X64I64x2GtU) \ V(X64I64x2GeU) \ V(X64I32x4Splat) \ diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc index 6389ef2e50..e9fa450c38 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc @@ -129,6 +129,12 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F64x2ReplaceLane: case kX64F64x2Abs: case kX64F64x2Neg: + case kX64F64x2Add: + case kX64F64x2Sub: + case kX64F64x2Mul: + case kX64F64x2Div: + case kX64F64x2Min: + case kX64F64x2Max: case kX64F64x2Eq: case kX64F64x2Ne: case kX64F64x2Lt: @@ -146,6 +152,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F32x4AddHoriz: case kX64F32x4Sub: case kX64F32x4Mul: + case kX64F32x4Div: case kX64F32x4Min: case kX64F32x4Max: case kX64F32x4Eq: @@ -161,11 +168,15 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64I64x2Add: case kX64I64x2Sub: case kX64I64x2Mul: + case kX64I64x2MinS: + case kX64I64x2MaxS: case kX64I64x2Eq: case kX64I64x2Ne: case kX64I64x2GtS: case kX64I64x2GeS: case kX64I64x2ShrU: + case kX64I64x2MinU: + case kX64I64x2MaxU: case kX64I64x2GtU: case kX64I64x2GeU: case kX64I32x4Splat: @@ -295,9 +306,6 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64DecompressSigned: case kX64DecompressPointer: case kX64DecompressAny: - case kX64CompressSigned: - case kX64CompressPointer: - case kX64CompressAny: return (instr->addressing_mode() == kMode_None) ? kNoOpcodeFlags : kIsLoadOperation | kHasSideEffect; @@ -346,7 +354,6 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64Movdqu: return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect; - case kX64StackCheck: case kX64Peek: return kIsLoadOperation; diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc index a4908fb846..5379074bac 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -170,9 +170,10 @@ class X64OperandGenerator final : public OperandGenerator { AddressingMode GetEffectiveAddressMemoryOperand(Node* operand, InstructionOperand inputs[], size_t* input_count) { - if (selector()->CanAddressRelativeToRootsRegister()) { + { LoadMatcher<ExternalReferenceMatcher> m(operand); - if (m.index().HasValue() && m.object().HasValue()) { + if (m.index().HasValue() && m.object().HasValue() && + selector()->CanAddressRelativeToRootsRegister(m.object().Value())) { ptrdiff_t const delta = m.index().Value() + TurboAssemblerBase::RootRegisterOffsetForExternalReference( @@ -350,7 +351,8 @@ void InstructionSelector::VisitStore(Node* node) { StoreRepresentation store_rep = StoreRepresentationOf(node->op()); WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); - if (write_barrier_kind != kNoWriteBarrier) { + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!FLAG_disable_write_barriers)) { DCHECK(CanBeTaggedOrCompressedPointer(store_rep.representation())); AddressingMode addressing_mode; InstructionOperand inputs[] = { @@ -528,6 +530,35 @@ void InstructionSelector::VisitWord64Xor(Node* node) { } } +void InstructionSelector::VisitStackPointerGreaterThan( + Node* node, FlagsContinuation* cont) { + Node* const value = node->InputAt(0); + InstructionCode opcode = kArchStackPointerGreaterThan; + + DCHECK(cont->IsBranch()); + const int effect_level = + GetEffectLevel(cont->true_block()->PredecessorAt(0)->control_input()); + + X64OperandGenerator g(this); + if (g.CanBeMemoryOperand(kX64Cmp, node, value, effect_level)) { + DCHECK_EQ(IrOpcode::kLoad, value->opcode()); + + // GetEffectiveAddressMemoryOperand can create at most 3 inputs. + static constexpr int kMaxInputCount = 3; + + size_t input_count = 0; + InstructionOperand inputs[kMaxInputCount]; + AddressingMode addressing_mode = + g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count); + opcode |= AddressingModeField::encode(addressing_mode); + DCHECK_LE(input_count, kMaxInputCount); + + EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont); + } else { + EmitWithContinuation(opcode, g.UseRegister(value), cont); + } +} + namespace { bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector, @@ -1238,23 +1269,23 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { } void InstructionSelector::VisitChangeTaggedToCompressed(Node* node) { - X64OperandGenerator g(this); - Node* value = node->InputAt(0); - Emit(kX64CompressAny, g.DefineAsRegister(node), g.Use(value)); + // The top 32 bits in the 64-bit register will be undefined, and + // must not be used by a dependent node. + return EmitIdentity(node); } void InstructionSelector::VisitChangeTaggedPointerToCompressedPointer( Node* node) { - X64OperandGenerator g(this); - Node* value = node->InputAt(0); - Emit(kX64CompressPointer, g.DefineAsRegister(node), g.Use(value)); + // The top 32 bits in the 64-bit register will be undefined, and + // must not be used by a dependent node. + return EmitIdentity(node); } void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned( Node* node) { - X64OperandGenerator g(this); - Node* value = node->InputAt(0); - Emit(kX64CompressSigned, g.DefineAsRegister(node), g.Use(value)); + // The top 32 bits in the 64-bit register will be undefined, and + // must not be used by a dependent node. + return EmitIdentity(node); } void InstructionSelector::VisitChangeCompressedToTagged(Node* node) { @@ -1338,10 +1369,13 @@ void VisitFloatBinop(InstructionSelector* selector, Node* node, void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input, ArchOpcode avx_opcode, ArchOpcode sse_opcode) { X64OperandGenerator g(selector); + InstructionOperand temps[] = {g.TempDoubleRegister()}; if (selector->IsSupported(AVX)) { - selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input)); + selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input), + arraysize(temps), temps); } else { - selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input)); + selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input), + arraysize(temps), temps); } } @@ -1838,30 +1872,6 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node, g.UseRegister(m.right().node()), cont); } } - if (selector->isolate() != nullptr) { - StackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> m( - selector->isolate(), node); - if (m.Matched()) { - // Compare(Load(js_stack_limit), LoadStackPointer) - if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute(); - InstructionCode opcode = cont->Encode(kX64StackCheck); - CHECK(cont->IsBranch()); - selector->EmitWithContinuation(opcode, cont); - return; - } - } - WasmStackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> wasm_m( - node); - if (wasm_m.Matched()) { - // This is a wasm stack check. By structure, we know that we can use the - // stack pointer directly, as wasm code does not modify the stack at points - // where stack checks are performed. - Node* left = node->InputAt(0); - LocationOperand rsp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER, - InstructionSequence::DefaultRepresentation(), - RegisterCode::kRegCode_rsp); - return VisitCompareWithMemoryOperand(selector, kX64Cmp, left, rsp, cont); - } VisitWordCompare(selector, node, kX64Cmp, cont); } @@ -2157,6 +2167,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, return VisitWordCompare(this, value, kX64Cmp32, cont); case IrOpcode::kWord32And: return VisitWordCompare(this, value, kX64Test32, cont); + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); default: break; } @@ -2586,6 +2599,12 @@ VISIT_ATOMIC_BINOP(Xor) V(I8x16) #define SIMD_BINOP_LIST(V) \ + V(F64x2Add) \ + V(F64x2Sub) \ + V(F64x2Mul) \ + V(F64x2Div) \ + V(F64x2Min) \ + V(F64x2Max) \ V(F64x2Eq) \ V(F64x2Ne) \ V(F64x2Lt) \ @@ -2594,6 +2613,7 @@ VISIT_ATOMIC_BINOP(Xor) V(F32x4AddHoriz) \ V(F32x4Sub) \ V(F32x4Mul) \ + V(F32x4Div) \ V(F32x4Min) \ V(F32x4Max) \ V(F32x4Eq) \ @@ -2603,7 +2623,6 @@ VISIT_ATOMIC_BINOP(Xor) V(I64x2Add) \ V(I64x2Sub) \ V(I64x2Eq) \ - V(I64x2Ne) \ V(I64x2GtS) \ V(I32x4Add) \ V(I32x4AddHoriz) \ @@ -2612,12 +2631,10 @@ VISIT_ATOMIC_BINOP(Xor) V(I32x4MinS) \ V(I32x4MaxS) \ V(I32x4Eq) \ - V(I32x4Ne) \ V(I32x4GtS) \ V(I32x4GeS) \ V(I32x4MinU) \ V(I32x4MaxU) \ - V(I32x4GtU) \ V(I32x4GeU) \ V(I16x8SConvertI32x4) \ V(I16x8Add) \ @@ -2629,14 +2646,12 @@ VISIT_ATOMIC_BINOP(Xor) V(I16x8MinS) \ V(I16x8MaxS) \ V(I16x8Eq) \ - V(I16x8Ne) \ V(I16x8GtS) \ V(I16x8GeS) \ V(I16x8AddSaturateU) \ V(I16x8SubSaturateU) \ V(I16x8MinU) \ V(I16x8MaxU) \ - V(I16x8GtU) \ V(I16x8GeU) \ V(I8x16SConvertI16x8) \ V(I8x16Add) \ @@ -2646,23 +2661,28 @@ VISIT_ATOMIC_BINOP(Xor) V(I8x16MinS) \ V(I8x16MaxS) \ V(I8x16Eq) \ - V(I8x16Ne) \ V(I8x16GtS) \ V(I8x16GeS) \ V(I8x16AddSaturateU) \ V(I8x16SubSaturateU) \ V(I8x16MinU) \ V(I8x16MaxU) \ - V(I8x16GtU) \ V(I8x16GeU) \ V(S128And) \ V(S128Or) \ V(S128Xor) #define SIMD_BINOP_ONE_TEMP_LIST(V) \ + V(I64x2Ne) \ V(I64x2GeS) \ V(I64x2GtU) \ - V(I64x2GeU) + V(I64x2GeU) \ + V(I32x4Ne) \ + V(I32x4GtU) \ + V(I16x8Ne) \ + V(I16x8GtU) \ + V(I8x16Ne) \ + V(I8x16GtU) #define SIMD_UNOP_LIST(V) \ V(F32x4SConvertI32x4) \ @@ -2686,16 +2706,17 @@ VISIT_ATOMIC_BINOP(Xor) #define SIMD_SHIFT_OPCODES(V) \ V(I64x2Shl) \ - V(I64x2ShrS) \ V(I64x2ShrU) \ V(I32x4Shl) \ V(I32x4ShrS) \ V(I32x4ShrU) \ V(I16x8Shl) \ V(I16x8ShrS) \ - V(I16x8ShrU) \ - V(I8x16Shl) \ - V(I8x16ShrS) \ + V(I16x8ShrU) + +#define SIMD_NARROW_SHIFT_OPCODES(V) \ + V(I8x16Shl) \ + V(I8x16ShrS) \ V(I8x16ShrU) #define SIMD_ANYTRUE_LIST(V) \ @@ -2745,17 +2766,30 @@ SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE) SIMD_TYPES(VISIT_SIMD_REPLACE_LANE) #undef VISIT_SIMD_REPLACE_LANE -#define VISIT_SIMD_SHIFT(Opcode) \ - void InstructionSelector::Visit##Opcode(Node* node) { \ - X64OperandGenerator g(this); \ - int32_t value = OpParameter<int32_t>(node->op()); \ - Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ - g.UseRegister(node->InputAt(0)), g.UseImmediate(value)); \ +#define VISIT_SIMD_SHIFT(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + X64OperandGenerator g(this); \ + InstructionOperand temps[] = {g.TempSimd128Register()}; \ + Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ + g.UseUniqueRegister(node->InputAt(0)), \ + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \ } SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT) #undef VISIT_SIMD_SHIFT #undef SIMD_SHIFT_OPCODES +#define VISIT_SIMD_NARROW_SHIFT(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + X64OperandGenerator g(this); \ + InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \ + Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ + g.UseUniqueRegister(node->InputAt(0)), \ + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \ + } +SIMD_NARROW_SHIFT_OPCODES(VISIT_SIMD_NARROW_SHIFT) +#undef VISIT_SIMD_NARROW_SHIFT +#undef SIMD_NARROW_SHIFT_OPCODES + #define VISIT_SIMD_UNOP(Opcode) \ void InstructionSelector::Visit##Opcode(Node* node) { \ X64OperandGenerator g(this); \ @@ -2799,12 +2833,12 @@ SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE) #undef VISIT_SIMD_ANYTRUE #undef SIMD_ANYTRUE_LIST -#define VISIT_SIMD_ALLTRUE(Opcode) \ - void InstructionSelector::Visit##Opcode(Node* node) { \ - X64OperandGenerator g(this); \ - InstructionOperand temps[] = {g.TempRegister()}; \ - Emit(kX64##Opcode, g.DefineAsRegister(node), \ - g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \ +#define VISIT_SIMD_ALLTRUE(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + X64OperandGenerator g(this); \ + InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \ + Emit(kX64##Opcode, g.DefineAsRegister(node), \ + g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \ } SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE) #undef VISIT_SIMD_ALLTRUE @@ -2820,14 +2854,16 @@ void InstructionSelector::VisitS128Select(Node* node) { void InstructionSelector::VisitF64x2Abs(Node* node) { X64OperandGenerator g(this); - Emit(kX64F64x2Abs, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0))); + InstructionOperand temps[] = {g.TempDoubleRegister()}; + Emit(kX64F64x2Abs, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), + arraysize(temps), temps); } void InstructionSelector::VisitF64x2Neg(Node* node) { X64OperandGenerator g(this); - Emit(kX64F64x2Neg, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0))); + InstructionOperand temps[] = {g.TempDoubleRegister()}; + Emit(kX64F64x2Neg, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), + arraysize(temps), temps); } void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { @@ -2836,6 +2872,15 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitI64x2ShrS(Node* node) { + X64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempRegister()}; + // Use fixed to rcx, to use sarq_cl in codegen. + Emit(kX64I64x2ShrS, g.DefineSameAsFirst(node), + g.UseUniqueRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), rcx), + arraysize(temps), temps); +} + void InstructionSelector::VisitI64x2Mul(Node* node) { X64OperandGenerator g(this); InstructionOperand temps[] = {g.TempSimd128Register(), @@ -2845,15 +2890,59 @@ void InstructionSelector::VisitI64x2Mul(Node* node) { g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); } +void InstructionSelector::VisitI64x2MinS(Node* node) { + X64OperandGenerator g(this); + if (this->IsSupported(SSE4_2)) { + InstructionOperand temps[] = {g.TempSimd128Register()}; + Emit(kX64I64x2MinS, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), + arraysize(temps), temps); + } else { + InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister(), + g.TempRegister()}; + Emit(kX64I64x2MinS, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), + arraysize(temps), temps); + } +} + +void InstructionSelector::VisitI64x2MaxS(Node* node) { + X64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + Emit(kX64I64x2MaxS, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), + arraysize(temps), temps); +} + +void InstructionSelector::VisitI64x2MinU(Node* node) { + X64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register(), + g.TempSimd128Register()}; + Emit(kX64I64x2MinU, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), + arraysize(temps), temps); +} + +void InstructionSelector::VisitI64x2MaxU(Node* node) { + X64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register(), + g.TempSimd128Register()}; + Emit(kX64I64x2MaxU, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), + arraysize(temps), temps); +} + void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) { X64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0))); + g.UseRegister(node->InputAt(0)), arraysize(temps), temps); } void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) { X64OperandGenerator g(this); - InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand temps[] = {g.TempSimd128Register(), + g.TempSimd128Register()}; Emit(kX64I32x4UConvertF32x4, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), arraysize(temps), temps); } @@ -2997,12 +3086,12 @@ static const ShuffleEntry arch_shuffles[] = { true}, {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kX64S8x8Reverse, - false, - false}, + true, + true}, {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kX64S8x4Reverse, - false, - false}, + true, + true}, {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kX64S8x2Reverse, true, @@ -3060,6 +3149,8 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) { SwapShuffleInputs(node); is_swizzle = false; // It's simpler to just handle the general case. no_same_as_first = false; // SSE requires same-as-first. + // TODO(v8:9608): also see v8:9083 + src1_needs_reg = true; opcode = kX64S8x16Alignr; // palignr takes a single imm8 offset. imms[imm_count++] = offset; diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc index 2583262c07..ffc149ea5d 100644 --- a/deps/v8/src/compiler/branch-elimination.cc +++ b/deps/v8/src/compiler/branch-elimination.cc @@ -4,6 +4,7 @@ #include "src/compiler/branch-elimination.h" +#include "src/base/small-vector.h" #include "src/compiler/js-graph.h" #include "src/compiler/node-properties.h" #include "src/compiler/simplified-operator.h" @@ -13,17 +14,17 @@ namespace internal { namespace compiler { BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph, - Zone* zone) + Zone* zone, Phase phase) : AdvancedReducer(editor), jsgraph_(js_graph), node_conditions_(js_graph->graph()->NodeCount(), zone), reduced_(js_graph->graph()->NodeCount(), zone), zone_(zone), - dead_(js_graph->Dead()) {} + dead_(js_graph->Dead()), + phase_(phase) {} BranchElimination::~BranchElimination() = default; - Reduction BranchElimination::Reduce(Node* node) { switch (node->opcode()) { case IrOpcode::kDead: @@ -52,6 +53,74 @@ Reduction BranchElimination::Reduce(Node* node) { return NoChange(); } +void BranchElimination::SimplifyBranchCondition(Node* branch) { + // Try to use a phi as a branch condition if the control flow from the branch + // is known from previous branches. For example, in the graph below, the + // control flow of the second_branch is predictable because the first_branch + // use the same branch condition. In such case, create a new phi with constant + // inputs and let the second branch use the phi as its branch condition. From + // this transformation, more branch folding opportunities would be exposed to + // later passes through branch cloning in effect-control-linearizer. + // + // condition condition + // | \ | + // | first_branch first_branch + // | / \ / \ + // | / \ / \ + // |first_true first_false first_true first_false + // | \ / \ / + // | \ / \ / + // | first_merge ==> first_merge + // | | | + // second_branch 1 0 | + // / \ \ / | + // / \ phi | + // second_true second_false \ | + // second_branch + // / \ + // / \ + // second_true second_false + // + + DCHECK_EQ(IrOpcode::kBranch, branch->opcode()); + Node* merge = NodeProperties::GetControlInput(branch); + if (merge->opcode() != IrOpcode::kMerge) return; + + Node* branch_condition = branch->InputAt(0); + Node* previous_branch; + bool condition_value; + Graph* graph = jsgraph()->graph(); + base::SmallVector<Node*, 2> phi_inputs; + + Node::Inputs inputs = merge->inputs(); + int input_count = inputs.count(); + for (int i = 0; i != input_count; ++i) { + Node* input = inputs[i]; + ControlPathConditions from_input = node_conditions_.Get(input); + if (!from_input.LookupCondition(branch_condition, &previous_branch, + &condition_value)) + return; + + if (phase_ == kEARLY) { + phi_inputs.emplace_back(condition_value ? jsgraph()->TrueConstant() + : jsgraph()->FalseConstant()); + } else { + phi_inputs.emplace_back( + condition_value + ? graph->NewNode(jsgraph()->common()->Int32Constant(1)) + : graph->NewNode(jsgraph()->common()->Int32Constant(0))); + } + } + phi_inputs.emplace_back(merge); + Node* new_phi = graph->NewNode( + common()->Phi(phase_ == kEARLY ? MachineRepresentation::kTagged + : MachineRepresentation::kWord32, + input_count), + input_count + 1, &phi_inputs.at(0)); + + // Replace the branch condition with the new phi. + NodeProperties::ReplaceValueInput(branch, new_phi, 0); +} Reduction BranchElimination::ReduceBranch(Node* node) { Node* condition = node->InputAt(0); @@ -87,6 +156,7 @@ Reduction BranchElimination::ReduceBranch(Node* node) { } return Replace(dead()); } + SimplifyBranchCondition(node); return TakeConditionsFromFirstControl(node); } @@ -151,7 +221,6 @@ Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) { return UpdateConditions(node, from_branch, condition, branch, is_true_branch); } - Reduction BranchElimination::ReduceLoop(Node* node) { // Here we rely on having only reducible loops: // The loop entry edge always dominates the header, so we can just use @@ -159,7 +228,6 @@ Reduction BranchElimination::ReduceLoop(Node* node) { return TakeConditionsFromFirstControl(node); } - Reduction BranchElimination::ReduceMerge(Node* node) { // Shortcut for the case when we do not know anything about some // input. @@ -188,18 +256,15 @@ Reduction BranchElimination::ReduceMerge(Node* node) { return UpdateConditions(node, conditions); } - Reduction BranchElimination::ReduceStart(Node* node) { return UpdateConditions(node, {}); } - Reduction BranchElimination::ReduceOtherControl(Node* node) { DCHECK_EQ(1, node->op()->ControlInputCount()); return TakeConditionsFromFirstControl(node); } - Reduction BranchElimination::TakeConditionsFromFirstControl(Node* node) { // We just propagate the information from the control input (ideally, // we would only revisit control uses if there is change). diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h index 2730da9c75..b3d9ef7752 100644 --- a/deps/v8/src/compiler/branch-elimination.h +++ b/deps/v8/src/compiler/branch-elimination.h @@ -22,7 +22,12 @@ class JSGraph; class V8_EXPORT_PRIVATE BranchElimination final : public NON_EXPORTED_BASE(AdvancedReducer) { public: - BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone); + enum Phase { + kEARLY, + kLATE, + }; + BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone, + Phase phase = kLATE); ~BranchElimination() final; const char* reducer_name() const override { return "BranchElimination"; } @@ -62,6 +67,7 @@ class V8_EXPORT_PRIVATE BranchElimination final Reduction ReduceMerge(Node* node); Reduction ReduceStart(Node* node); Reduction ReduceOtherControl(Node* node); + void SimplifyBranchCondition(Node* branch); Reduction TakeConditionsFromFirstControl(Node* node); Reduction UpdateConditions(Node* node, ControlPathConditions conditions); @@ -84,6 +90,7 @@ class V8_EXPORT_PRIVATE BranchElimination final NodeAuxData<bool> reduced_; Zone* zone_; Node* dead_; + Phase phase_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc index b44bec5fc8..f1d43fc1a6 100644 --- a/deps/v8/src/compiler/bytecode-analysis.cc +++ b/deps/v8/src/compiler/bytecode-analysis.cc @@ -97,37 +97,35 @@ BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, namespace { -void UpdateInLiveness( - Bytecode bytecode, - BytecodeLivenessState& in_liveness, // NOLINT(runtime/references) - const interpreter::BytecodeArrayAccessor& accessor) { +void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness, + const interpreter::BytecodeArrayAccessor& accessor) { int num_operands = Bytecodes::NumberOfOperands(bytecode); const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode); // Special case Suspend and Resume to just pass through liveness. if (bytecode == Bytecode::kSuspendGenerator) { // The generator object has to be live. - in_liveness.MarkRegisterLive(accessor.GetRegisterOperand(0).index()); + in_liveness->MarkRegisterLive(accessor.GetRegisterOperand(0).index()); // Suspend additionally reads and returns the accumulator DCHECK(Bytecodes::ReadsAccumulator(bytecode)); - in_liveness.MarkAccumulatorLive(); + in_liveness->MarkAccumulatorLive(); return; } if (bytecode == Bytecode::kResumeGenerator) { // The generator object has to be live. - in_liveness.MarkRegisterLive(accessor.GetRegisterOperand(0).index()); + in_liveness->MarkRegisterLive(accessor.GetRegisterOperand(0).index()); return; } if (Bytecodes::WritesAccumulator(bytecode)) { - in_liveness.MarkAccumulatorDead(); + in_liveness->MarkAccumulatorDead(); } for (int i = 0; i < num_operands; ++i) { switch (operand_types[i]) { case OperandType::kRegOut: { interpreter::Register r = accessor.GetRegisterOperand(i); if (!r.is_parameter()) { - in_liveness.MarkRegisterDead(r.index()); + in_liveness->MarkRegisterDead(r.index()); } break; } @@ -137,7 +135,7 @@ void UpdateInLiveness( if (!r.is_parameter()) { for (uint32_t j = 0; j < reg_count; ++j) { DCHECK(!interpreter::Register(r.index() + j).is_parameter()); - in_liveness.MarkRegisterDead(r.index() + j); + in_liveness->MarkRegisterDead(r.index() + j); } } break; @@ -146,8 +144,8 @@ void UpdateInLiveness( interpreter::Register r = accessor.GetRegisterOperand(i); if (!r.is_parameter()) { DCHECK(!interpreter::Register(r.index() + 1).is_parameter()); - in_liveness.MarkRegisterDead(r.index()); - in_liveness.MarkRegisterDead(r.index() + 1); + in_liveness->MarkRegisterDead(r.index()); + in_liveness->MarkRegisterDead(r.index() + 1); } break; } @@ -156,9 +154,9 @@ void UpdateInLiveness( if (!r.is_parameter()) { DCHECK(!interpreter::Register(r.index() + 1).is_parameter()); DCHECK(!interpreter::Register(r.index() + 2).is_parameter()); - in_liveness.MarkRegisterDead(r.index()); - in_liveness.MarkRegisterDead(r.index() + 1); - in_liveness.MarkRegisterDead(r.index() + 2); + in_liveness->MarkRegisterDead(r.index()); + in_liveness->MarkRegisterDead(r.index() + 1); + in_liveness->MarkRegisterDead(r.index() + 2); } break; } @@ -169,14 +167,14 @@ void UpdateInLiveness( } if (Bytecodes::ReadsAccumulator(bytecode)) { - in_liveness.MarkAccumulatorLive(); + in_liveness->MarkAccumulatorLive(); } for (int i = 0; i < num_operands; ++i) { switch (operand_types[i]) { case OperandType::kReg: { interpreter::Register r = accessor.GetRegisterOperand(i); if (!r.is_parameter()) { - in_liveness.MarkRegisterLive(r.index()); + in_liveness->MarkRegisterLive(r.index()); } break; } @@ -184,8 +182,8 @@ void UpdateInLiveness( interpreter::Register r = accessor.GetRegisterOperand(i); if (!r.is_parameter()) { DCHECK(!interpreter::Register(r.index() + 1).is_parameter()); - in_liveness.MarkRegisterLive(r.index()); - in_liveness.MarkRegisterLive(r.index() + 1); + in_liveness->MarkRegisterLive(r.index()); + in_liveness->MarkRegisterLive(r.index() + 1); } break; } @@ -195,7 +193,7 @@ void UpdateInLiveness( if (!r.is_parameter()) { for (uint32_t j = 0; j < reg_count; ++j) { DCHECK(!interpreter::Register(r.index() + j).is_parameter()); - in_liveness.MarkRegisterLive(r.index() + j); + in_liveness->MarkRegisterLive(r.index() + j); } } break; @@ -207,19 +205,17 @@ void UpdateInLiveness( } } -void UpdateOutLiveness( - Bytecode bytecode, - BytecodeLivenessState& out_liveness, // NOLINT(runtime/references) - BytecodeLivenessState* next_bytecode_in_liveness, - const interpreter::BytecodeArrayAccessor& accessor, - Handle<BytecodeArray> bytecode_array, - const BytecodeLivenessMap& liveness_map) { +void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState* out_liveness, + BytecodeLivenessState* next_bytecode_in_liveness, + const interpreter::BytecodeArrayAccessor& accessor, + Handle<BytecodeArray> bytecode_array, + const BytecodeLivenessMap& liveness_map) { int current_offset = accessor.current_offset(); // Special case Suspend and Resume to just pass through liveness. if (bytecode == Bytecode::kSuspendGenerator || bytecode == Bytecode::kResumeGenerator) { - out_liveness.Union(*next_bytecode_in_liveness); + out_liveness->Union(*next_bytecode_in_liveness); return; } @@ -227,10 +223,10 @@ void UpdateOutLiveness( // the liveness iterations. if (Bytecodes::IsForwardJump(bytecode)) { int target_offset = accessor.GetJumpTargetOffset(); - out_liveness.Union(*liveness_map.GetInLiveness(target_offset)); + out_liveness->Union(*liveness_map.GetInLiveness(target_offset)); } else if (Bytecodes::IsSwitch(bytecode)) { for (const auto& entry : accessor.GetJumpTableTargetOffsets()) { - out_liveness.Union(*liveness_map.GetInLiveness(entry.target_offset)); + out_liveness->Union(*liveness_map.GetInLiveness(entry.target_offset)); } } @@ -238,7 +234,7 @@ void UpdateOutLiveness( // unconditional jump). if (next_bytecode_in_liveness != nullptr && !Bytecodes::IsUnconditionalJump(bytecode)) { - out_liveness.Union(*next_bytecode_in_liveness); + out_liveness->Union(*next_bytecode_in_liveness); } // Update from exception handler (if any). @@ -250,15 +246,15 @@ void UpdateOutLiveness( table.LookupRange(current_offset, &handler_context, nullptr); if (handler_offset != -1) { - bool was_accumulator_live = out_liveness.AccumulatorIsLive(); - out_liveness.Union(*liveness_map.GetInLiveness(handler_offset)); - out_liveness.MarkRegisterLive(handler_context); + bool was_accumulator_live = out_liveness->AccumulatorIsLive(); + out_liveness->Union(*liveness_map.GetInLiveness(handler_offset)); + out_liveness->MarkRegisterLive(handler_context); if (!was_accumulator_live) { // The accumulator is reset to the exception on entry into a handler, // and so shouldn't be considered live coming out of this bytecode just // because it's live coming into the handler. So, kill the accumulator // if the handler is the only thing that made it live. - out_liveness.MarkAccumulatorDead(); + out_liveness->MarkAccumulatorDead(); // TODO(leszeks): Ideally the accumulator wouldn't be considered live at // the start of the handler, but looking up if the current bytecode is @@ -269,45 +265,42 @@ void UpdateOutLiveness( } } -void UpdateLiveness(Bytecode bytecode, - BytecodeLiveness& liveness, // NOLINT(runtime/references) +void UpdateLiveness(Bytecode bytecode, BytecodeLiveness const& liveness, BytecodeLivenessState** next_bytecode_in_liveness, const interpreter::BytecodeArrayAccessor& accessor, Handle<BytecodeArray> bytecode_array, const BytecodeLivenessMap& liveness_map) { - UpdateOutLiveness(bytecode, *liveness.out, *next_bytecode_in_liveness, + UpdateOutLiveness(bytecode, liveness.out, *next_bytecode_in_liveness, accessor, bytecode_array, liveness_map); liveness.in->CopyFrom(*liveness.out); - UpdateInLiveness(bytecode, *liveness.in, accessor); + UpdateInLiveness(bytecode, liveness.in, accessor); *next_bytecode_in_liveness = liveness.in; } -void UpdateAssignments( - Bytecode bytecode, - BytecodeLoopAssignments& assignments, // NOLINT(runtime/references) - const interpreter::BytecodeArrayAccessor& accessor) { +void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments* assignments, + const interpreter::BytecodeArrayAccessor& accessor) { int num_operands = Bytecodes::NumberOfOperands(bytecode); const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode); for (int i = 0; i < num_operands; ++i) { switch (operand_types[i]) { case OperandType::kRegOut: { - assignments.Add(accessor.GetRegisterOperand(i)); + assignments->Add(accessor.GetRegisterOperand(i)); break; } case OperandType::kRegOutList: { interpreter::Register r = accessor.GetRegisterOperand(i++); uint32_t reg_count = accessor.GetRegisterCountOperand(i); - assignments.AddList(r, reg_count); + assignments->AddList(r, reg_count); break; } case OperandType::kRegOutPair: { - assignments.AddList(accessor.GetRegisterOperand(i), 2); + assignments->AddList(accessor.GetRegisterOperand(i), 2); break; } case OperandType::kRegOutTriple: { - assignments.AddList(accessor.GetRegisterOperand(i), 3); + assignments->AddList(accessor.GetRegisterOperand(i), 3); break; } default: @@ -365,7 +358,7 @@ void BytecodeAnalysis::Analyze() { // the loop *and* are live when the loop exits. However, this requires // tracking the out-liveness of *all* loop exits, which is not // information we currently have. - UpdateAssignments(bytecode, current_loop_info->assignments(), iterator); + UpdateAssignments(bytecode, ¤t_loop_info->assignments(), iterator); // Update suspend counts for this loop. if (bytecode == Bytecode::kSuspendGenerator) { @@ -433,7 +426,7 @@ void BytecodeAnalysis::Analyze() { } if (analyze_liveness_) { - BytecodeLiveness& liveness = liveness_map_.InitializeLiveness( + BytecodeLiveness const& liveness = liveness_map_.InitializeLiveness( current_offset, bytecode_array()->register_count(), zone()); UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, bytecode_array(), liveness_map_); @@ -496,14 +489,14 @@ void BytecodeAnalysis::Analyze() { for (; iterator.current_offset() > header_offset; --iterator) { Bytecode bytecode = iterator.current_bytecode(); int current_offset = iterator.current_offset(); - BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset); - + BytecodeLiveness const& liveness = + liveness_map_.GetLiveness(current_offset); UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, bytecode_array(), liveness_map_); } // Now we are at the loop header. Since the in-liveness of the header // can't change, we need only to update the out-liveness. - UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out, + UpdateOutLiveness(iterator.current_bytecode(), header_liveness.out, next_bytecode_in_liveness, iterator, bytecode_array(), liveness_map_); } @@ -532,13 +525,14 @@ void BytecodeAnalysis::Analyze() { // bytecodes before it. if (any_changed) { switch_liveness.in->CopyFrom(*switch_liveness.out); - UpdateInLiveness(Bytecode::kSwitchOnGeneratorState, *switch_liveness.in, + UpdateInLiveness(Bytecode::kSwitchOnGeneratorState, switch_liveness.in, iterator); next_bytecode_in_liveness = switch_liveness.in; for (--iterator; iterator.IsValid(); --iterator) { Bytecode bytecode = iterator.current_bytecode(); int current_offset = iterator.current_offset(); - BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset); + BytecodeLiveness const& liveness = + liveness_map_.GetLiveness(current_offset); // There shouldn't be any more loops. DCHECK_NE(bytecode, Bytecode::kJumpLoop); @@ -829,7 +823,7 @@ bool BytecodeAnalysis::LivenessIsValid() { previous_liveness.CopyFrom(*liveness.out); - UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness, + UpdateOutLiveness(bytecode, liveness.out, next_bytecode_in_liveness, iterator, bytecode_array(), liveness_map_); // UpdateOutLiveness skips kJumpLoop, so we update it manually. if (bytecode == Bytecode::kJumpLoop) { @@ -848,7 +842,7 @@ bool BytecodeAnalysis::LivenessIsValid() { previous_liveness.CopyFrom(*liveness.in); liveness.in->CopyFrom(*liveness.out); - UpdateInLiveness(bytecode, *liveness.in, iterator); + UpdateInLiveness(bytecode, liveness.in, iterator); if (!liveness.in->Equals(previous_liveness)) { // Reset the invalid liveness. diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc index 7c71446320..b1051be571 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.cc +++ b/deps/v8/src/compiler/bytecode-graph-builder.cc @@ -16,7 +16,6 @@ #include "src/compiler/operator-properties.h" #include "src/compiler/simplified-operator.h" #include "src/compiler/state-values-utils.h" -#include "src/compiler/vector-slot-pair.h" #include "src/interpreter/bytecode-array-iterator.h" #include "src/interpreter/bytecode-flags.h" #include "src/interpreter/bytecodes.h" @@ -34,13 +33,12 @@ namespace compiler { class BytecodeGraphBuilder { public: BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone, - BytecodeArrayRef bytecode_array, - SharedFunctionInfoRef shared, - FeedbackVectorRef feedback_vector, BailoutId osr_offset, - JSGraph* jsgraph, + NativeContextRef const& native_context, + SharedFunctionInfoRef const& shared_info, + FeedbackVectorRef const& feedback_vector, + BailoutId osr_offset, JSGraph* jsgraph, CallFrequency const& invocation_frequency, - SourcePositionTable* source_positions, - NativeContextRef native_context, int inlining_id, + SourcePositionTable* source_positions, int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter); @@ -68,9 +66,9 @@ class BytecodeGraphBuilder { // Builder for loading the a native context field. Node* BuildLoadNativeContextField(int index); - // Helper function for creating a pair containing type feedback vector and - // a feedback slot. - VectorSlotPair CreateVectorSlotPair(int slot_id); + // Helper function for creating a feedback source containing type feedback + // vector and a feedback slot. + FeedbackSource CreateFeedbackSource(int slot_id); void set_environment(Environment* env) { environment_ = env; } const Environment* environment() const { return environment_; } @@ -168,7 +166,7 @@ class BytecodeGraphBuilder { void PrepareFrameState(Node* node, OutputFrameStateCombine combine); void BuildCreateArguments(CreateArgumentsType type); - Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index, + Node* BuildLoadGlobal(NameRef name, uint32_t feedback_slot_index, TypeofMode typeof_mode); enum class StoreMode { @@ -245,11 +243,12 @@ class BytecodeGraphBuilder { ForInMode GetForInMode(int operand_index); // Helper function to compute call frequency from the recorded type - // feedback. + // feedback. Returns unknown if invocation count is unknown. Returns 0 if + // feedback is insufficient. CallFrequency ComputeCallFrequency(int slot_id) const; // Helper function to extract the speculation mode from the recorded type - // feedback. + // feedback. Returns kDisallowSpeculation if feedback is insufficient. SpeculationMode GetSpeculationMode(int slot_id) const; // Control flow plumbing. @@ -310,7 +309,6 @@ class BytecodeGraphBuilder { int context_register_; // Index of register holding handler context. }; - // Field accessors Graph* graph() const { return jsgraph_->graph(); } CommonOperatorBuilder* common() const { return jsgraph_->common(); } Zone* graph_zone() const { return graph()->zone(); } @@ -321,55 +319,44 @@ class BytecodeGraphBuilder { return jsgraph_->simplified(); } Zone* local_zone() const { return local_zone_; } - const BytecodeArrayRef bytecode_array() const { return bytecode_array_; } - FeedbackVectorRef feedback_vector() const { return feedback_vector_; } + BytecodeArrayRef bytecode_array() const { + return shared_info().GetBytecodeArray(); + } + FeedbackVectorRef const& feedback_vector() const { return feedback_vector_; } const JSTypeHintLowering& type_hint_lowering() const { return type_hint_lowering_; } const FrameStateFunctionInfo* frame_state_function_info() const { return frame_state_function_info_; } - SourcePositionTableIterator& source_position_iterator() { return *source_position_iterator_.get(); } - interpreter::BytecodeArrayIterator& bytecode_iterator() { return bytecode_iterator_; } - BytecodeAnalysis const& bytecode_analysis() const { return bytecode_analysis_; } - int currently_peeled_loop_offset() const { return currently_peeled_loop_offset_; } - void set_currently_peeled_loop_offset(int offset) { currently_peeled_loop_offset_ = offset; } - bool skip_next_stack_check() const { return skip_next_stack_check_; } - void unset_skip_next_stack_check() { skip_next_stack_check_ = false; } - - int current_exception_handler() { return current_exception_handler_; } - + int current_exception_handler() const { return current_exception_handler_; } void set_current_exception_handler(int index) { current_exception_handler_ = index; } - bool needs_eager_checkpoint() const { return needs_eager_checkpoint_; } void mark_as_needing_eager_checkpoint(bool value) { needs_eager_checkpoint_ = value; } - - SharedFunctionInfoRef shared_info() const { return shared_info_; } - - NativeContextRef native_context() const { return native_context_; } - JSHeapBroker* broker() const { return broker_; } + NativeContextRef native_context() const { return native_context_; } + SharedFunctionInfoRef shared_info() const { return shared_info_; } #define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name(); BYTECODE_LIST(DECLARE_VISIT_BYTECODE) @@ -378,9 +365,11 @@ class BytecodeGraphBuilder { JSHeapBroker* const broker_; Zone* const local_zone_; JSGraph* const jsgraph_; + // The native context for which we optimize. + NativeContextRef const native_context_; + SharedFunctionInfoRef const shared_info_; + FeedbackVectorRef const feedback_vector_; CallFrequency const invocation_frequency_; - BytecodeArrayRef const bytecode_array_; - FeedbackVectorRef feedback_vector_; JSTypeHintLowering const type_hint_lowering_; const FrameStateFunctionInfo* const frame_state_function_info_; std::unique_ptr<SourcePositionTableIterator> source_position_iterator_; @@ -431,11 +420,6 @@ class BytecodeGraphBuilder { SourcePosition const start_position_; - SharedFunctionInfoRef const shared_info_; - - // The native context for which we optimize. - NativeContextRef const native_context_; - TickCounter* const tick_counter_; static int const kBinaryOperationHintIndex = 1; @@ -937,33 +921,36 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint( } BytecodeGraphBuilder::BytecodeGraphBuilder( - JSHeapBroker* broker, Zone* local_zone, BytecodeArrayRef bytecode_array, - SharedFunctionInfoRef shared_info, FeedbackVectorRef feedback_vector, - BailoutId osr_offset, JSGraph* jsgraph, - CallFrequency const& invocation_frequency, - SourcePositionTable* source_positions, NativeContextRef native_context, - int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter) + JSHeapBroker* broker, Zone* local_zone, + NativeContextRef const& native_context, + SharedFunctionInfoRef const& shared_info, + FeedbackVectorRef const& feedback_vector, BailoutId osr_offset, + JSGraph* jsgraph, CallFrequency const& invocation_frequency, + SourcePositionTable* source_positions, int inlining_id, + BytecodeGraphBuilderFlags flags, TickCounter* tick_counter) : broker_(broker), local_zone_(local_zone), jsgraph_(jsgraph), - invocation_frequency_(invocation_frequency), - bytecode_array_(bytecode_array), + native_context_(native_context), + shared_info_(shared_info), feedback_vector_(feedback_vector), + invocation_frequency_(invocation_frequency), type_hint_lowering_( - jsgraph, feedback_vector.object(), + broker, jsgraph, feedback_vector, (flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized) ? JSTypeHintLowering::kBailoutOnUninitialized : JSTypeHintLowering::kNoFlags), frame_state_function_info_(common()->CreateFrameStateFunctionInfo( FrameStateType::kInterpretedFunction, - bytecode_array.parameter_count(), bytecode_array.register_count(), + bytecode_array().parameter_count(), bytecode_array().register_count(), shared_info.object())), bytecode_iterator_( - base::make_unique<OffHeapBytecodeArray>(bytecode_array)), + base::make_unique<OffHeapBytecodeArray>(bytecode_array())), bytecode_analysis_(broker_->GetBytecodeAnalysis( - bytecode_array.object(), osr_offset, + bytecode_array().object(), osr_offset, flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness, - !FLAG_concurrent_inlining)), + FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized + : SerializationPolicy::kSerializeIfNeeded)), environment_(nullptr), osr_(!osr_offset.IsNone()), currently_peeled_loop_offset_(-1), @@ -980,19 +967,17 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( state_values_cache_(jsgraph), source_positions_(source_positions), start_position_(shared_info.StartPosition(), inlining_id), - shared_info_(shared_info), - native_context_(native_context), tick_counter_(tick_counter) { if (FLAG_concurrent_inlining) { // With concurrent inlining on, the source position address doesn't change // because it's been copied from the heap. source_position_iterator_ = base::make_unique<SourcePositionTableIterator>( - Vector<const byte>(bytecode_array.source_positions_address(), - bytecode_array.source_positions_size())); + Vector<const byte>(bytecode_array().source_positions_address(), + bytecode_array().source_positions_size())); } else { // Otherwise, we need to access the table through a handle. source_position_iterator_ = base::make_unique<SourcePositionTableIterator>( - handle(bytecode_array.object()->SourcePositionTableIfCollected(), + handle(bytecode_array().object()->SourcePositionTableIfCollected(), isolate())); } } @@ -1014,13 +999,13 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) { return result; } -VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) { +FeedbackSource BytecodeGraphBuilder::CreateFeedbackSource(int slot_id) { FeedbackSlot slot = FeedbackVector::ToSlot(slot_id); - FeedbackNexus nexus(feedback_vector().object(), slot); - return VectorSlotPair(feedback_vector().object(), slot, nexus.ic_state()); + return FeedbackSource(feedback_vector(), slot); } void BytecodeGraphBuilder::CreateGraph() { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); SourcePositionTable::Scope pos_scope(source_positions_, start_position_); // Set up the basic structure of the graph. Outputs for {Start} are the formal @@ -1321,7 +1306,8 @@ void BytecodeGraphBuilder::VisitBytecodes() { VisitSingleBytecode(); } - if (has_one_shot_bytecode) { + if (!FLAG_concurrent_inlining && has_one_shot_bytecode) { + // (For concurrent inlining this is done in the serializer instead.) isolate()->CountUsage( v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode); } @@ -1340,8 +1326,9 @@ void BytecodeGraphBuilder::VisitLdaSmi() { } void BytecodeGraphBuilder::VisitLdaConstant() { - Node* node = jsgraph()->Constant( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + ObjectRef object( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + Node* node = jsgraph()->Constant(object); environment()->BindAccumulator(node); } @@ -1387,20 +1374,20 @@ void BytecodeGraphBuilder::VisitMov() { environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value); } -Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name, +Node* BytecodeGraphBuilder::BuildLoadGlobal(NameRef name, uint32_t feedback_slot_index, TypeofMode typeof_mode) { - VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index); - DCHECK( - IsLoadGlobalICKind(feedback_vector().object()->GetKind(feedback.slot()))); - const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode); + FeedbackSource feedback = CreateFeedbackSource(feedback_slot_index); + DCHECK(IsLoadGlobalICKind(broker()->GetFeedbackSlotKind(feedback))); + const Operator* op = + javascript()->LoadGlobal(name.object(), feedback, typeof_mode); return NewNode(op); } void BytecodeGraphBuilder::VisitLdaGlobal() { PrepareEagerCheckpoint(); - Handle<Name> name = Handle<Name>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + NameRef name(broker(), + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1); Node* node = BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF); @@ -1409,8 +1396,8 @@ void BytecodeGraphBuilder::VisitLdaGlobal() { void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() { PrepareEagerCheckpoint(); - Handle<Name> name = Handle<Name>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + NameRef name(broker(), + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1); Node* node = BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF); @@ -1419,15 +1406,16 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() { void BytecodeGraphBuilder::VisitStaGlobal() { PrepareEagerCheckpoint(); - Handle<Name> name = Handle<Name>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); - VectorSlotPair feedback = - CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1)); + NameRef name(broker(), + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + FeedbackSource feedback = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1)); Node* value = environment()->LookupAccumulator(); LanguageMode language_mode = - feedback.vector()->GetLanguageMode(feedback.slot()); - const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback); + GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(feedback)); + const Operator* op = + javascript()->StoreGlobal(language_mode, name.object(), feedback); Node* node = NewNode(op, value); environment()->RecordAfterState(node, Environment::kAttachFrameState); } @@ -1439,12 +1427,12 @@ void BytecodeGraphBuilder::VisitStaInArrayLiteral() { environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); Node* index = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1)); - VectorSlotPair feedback = - CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2)); + FeedbackSource feedback = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2)); const Operator* op = javascript()->StoreInArrayLiteral(feedback); JSTypeHintLowering::LoweringResult lowering = - TryBuildSimplifiedStoreKeyed(op, array, index, value, feedback.slot()); + TryBuildSimplifiedStoreKeyed(op, array, index, value, feedback.slot); if (lowering.IsExit()) return; Node* node = nullptr; @@ -1467,11 +1455,22 @@ void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() { environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1)); Node* value = environment()->LookupAccumulator(); int flags = bytecode_iterator().GetFlagOperand(2); - VectorSlotPair feedback = - CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3)); - + FeedbackSource feedback = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(3)); const Operator* op = javascript()->StoreDataPropertyInLiteral(feedback); - Node* node = NewNode(op, object, name, value, jsgraph()->Constant(flags)); + + JSTypeHintLowering::LoweringResult lowering = + TryBuildSimplifiedStoreKeyed(op, object, name, value, feedback.slot); + if (lowering.IsExit()) return; + + Node* node = nullptr; + if (lowering.IsSideEffectFree()) { + node = lowering.value(); + } else { + DCHECK(!lowering.Changed()); + node = NewNode(op, object, name, value, jsgraph()->Constant(flags)); + } + environment()->RecordAfterState(node, Environment::kAttachFrameState); } @@ -1545,8 +1544,8 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() { void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) { PrepareEagerCheckpoint(); - Node* name = jsgraph()->Constant( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + Node* name = jsgraph()->Constant(ObjectRef( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()))); const Operator* op = javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF ? Runtime::kLoadLookupSlot @@ -1630,8 +1629,9 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) { // Slow path, do a runtime load lookup. set_environment(slow_environment); { - Node* name = jsgraph()->Constant( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + Node* name = jsgraph()->Constant(ObjectRef( + broker(), + bytecode_iterator().GetConstantForIndexOperand(0, isolate()))); const Operator* op = javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF @@ -1666,8 +1666,8 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) { // Fast path, do a global load. { PrepareEagerCheckpoint(); - Handle<Name> name = Handle<Name>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + NameRef name(broker(), + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1); Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode); environment()->BindAccumulator(node, Environment::kAttachFrameState); @@ -1682,8 +1682,9 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) { // Slow path, do a runtime load lookup. set_environment(slow_environment); { - Node* name = jsgraph()->Constant( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + Node* name = jsgraph()->Constant(NameRef( + broker(), + bytecode_iterator().GetConstantForIndexOperand(0, isolate()))); const Operator* op = javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF @@ -1712,8 +1713,8 @@ void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() { void BytecodeGraphBuilder::VisitStaLookupSlot() { PrepareEagerCheckpoint(); Node* value = environment()->LookupAccumulator(); - Node* name = jsgraph()->Constant( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + Node* name = jsgraph()->Constant(ObjectRef( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()))); int bytecode_flags = bytecode_iterator().GetFlagOperand(1); LanguageMode language_mode = static_cast<LanguageMode>( interpreter::StoreLookupSlotFlags::LanguageModeBit::decode( @@ -1737,14 +1738,14 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() { PrepareEagerCheckpoint(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle<Name> name = Handle<Name>::cast( - bytecode_iterator().GetConstantForIndexOperand(1, isolate())); - VectorSlotPair feedback = - CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2)); - const Operator* op = javascript()->LoadNamed(name, feedback); + NameRef name(broker(), + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); + FeedbackSource feedback = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2)); + const Operator* op = javascript()->LoadNamed(name.object(), feedback); JSTypeHintLowering::LoweringResult lowering = - TryBuildSimplifiedLoadNamed(op, object, feedback.slot()); + TryBuildSimplifiedLoadNamed(op, object, feedback.slot); if (lowering.IsExit()) return; Node* node = nullptr; @@ -1761,9 +1762,9 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() { PrepareEagerCheckpoint(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle<Name> name = Handle<Name>::cast( - bytecode_iterator().GetConstantForIndexOperand(1, isolate())); - const Operator* op = javascript()->LoadNamed(name, VectorSlotPair()); + NameRef name(broker(), + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); + const Operator* op = javascript()->LoadNamed(name.object(), FeedbackSource()); Node* node = NewNode(op, object); environment()->BindAccumulator(node, Environment::kAttachFrameState); } @@ -1773,12 +1774,12 @@ void BytecodeGraphBuilder::VisitLdaKeyedProperty() { Node* key = environment()->LookupAccumulator(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - VectorSlotPair feedback = - CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1)); + FeedbackSource feedback = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1)); const Operator* op = javascript()->LoadProperty(feedback); JSTypeHintLowering::LoweringResult lowering = - TryBuildSimplifiedLoadKeyed(op, object, key, feedback.slot()); + TryBuildSimplifiedLoadKeyed(op, object, key, feedback.slot); if (lowering.IsExit()) return; Node* node = nullptr; @@ -1796,25 +1797,26 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) { Node* value = environment()->LookupAccumulator(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle<Name> name = Handle<Name>::cast( - bytecode_iterator().GetConstantForIndexOperand(1, isolate())); - VectorSlotPair feedback = - CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2)); + NameRef name(broker(), + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); + FeedbackSource feedback = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2)); const Operator* op; if (store_mode == StoreMode::kOwn) { DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed, - feedback.vector()->GetKind(feedback.slot())); - op = javascript()->StoreNamedOwn(name, feedback); + broker()->GetFeedbackSlotKind(feedback)); + + op = javascript()->StoreNamedOwn(name.object(), feedback); } else { DCHECK_EQ(StoreMode::kNormal, store_mode); LanguageMode language_mode = - feedback.vector()->GetLanguageMode(feedback.slot()); - op = javascript()->StoreNamed(language_mode, name, feedback); + GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(feedback)); + op = javascript()->StoreNamed(language_mode, name.object(), feedback); } JSTypeHintLowering::LoweringResult lowering = - TryBuildSimplifiedStoreNamed(op, object, value, feedback.slot()); + TryBuildSimplifiedStoreNamed(op, object, value, feedback.slot); if (lowering.IsExit()) return; Node* node = nullptr; @@ -1836,12 +1838,12 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() { Node* value = environment()->LookupAccumulator(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle<Name> name = Handle<Name>::cast( - bytecode_iterator().GetConstantForIndexOperand(1, isolate())); + NameRef name(broker(), + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); LanguageMode language_mode = static_cast<LanguageMode>(bytecode_iterator().GetFlagOperand(2)); const Operator* op = - javascript()->StoreNamed(language_mode, name, VectorSlotPair()); + javascript()->StoreNamed(language_mode, name.object(), FeedbackSource()); Node* node = NewNode(op, object, value); environment()->RecordAfterState(node, Environment::kAttachFrameState); } @@ -1857,14 +1859,14 @@ void BytecodeGraphBuilder::VisitStaKeyedProperty() { environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); Node* key = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1)); - VectorSlotPair feedback = - CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2)); + FeedbackSource source = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2)); LanguageMode language_mode = - feedback.vector()->GetLanguageMode(feedback.slot()); - const Operator* op = javascript()->StoreProperty(language_mode, feedback); + GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(source)); + const Operator* op = javascript()->StoreProperty(language_mode, source); JSTypeHintLowering::LoweringResult lowering = - TryBuildSimplifiedStoreKeyed(op, object, key, value, feedback.slot()); + TryBuildSimplifiedStoreKeyed(op, object, key, value, source.slot); if (lowering.IsExit()) return; Node* node = nullptr; @@ -1910,71 +1912,76 @@ void BytecodeGraphBuilder::VisitPopContext() { } void BytecodeGraphBuilder::VisitCreateClosure() { - Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + SharedFunctionInfoRef shared_info( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); AllocationType allocation = interpreter::CreateClosureFlags::PretenuredBit::decode( bytecode_iterator().GetFlagOperand(2)) ? AllocationType::kOld : AllocationType::kYoung; + const Operator* op = javascript()->CreateClosure( - shared_info, - feedback_vector().object()->GetClosureFeedbackCell( - bytecode_iterator().GetIndexOperand(1)), - handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy), - isolate()), + shared_info.object(), + feedback_vector() + .GetClosureFeedbackCell(bytecode_iterator().GetIndexOperand(1)) + .object(), + jsgraph()->isolate()->builtins()->builtin_handle(Builtins::kCompileLazy), allocation); Node* closure = NewNode(op); environment()->BindAccumulator(closure); } void BytecodeGraphBuilder::VisitCreateBlockContext() { - Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); - - const Operator* op = javascript()->CreateBlockContext(scope_info); + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + ScopeInfoRef scope_info( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + const Operator* op = javascript()->CreateBlockContext(scope_info.object()); Node* context = NewNode(op); environment()->BindAccumulator(context); } void BytecodeGraphBuilder::VisitCreateFunctionContext() { - Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + ScopeInfoRef scope_info( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); - const Operator* op = - javascript()->CreateFunctionContext(scope_info, slots, FUNCTION_SCOPE); + const Operator* op = javascript()->CreateFunctionContext( + scope_info.object(), slots, FUNCTION_SCOPE); Node* context = NewNode(op); environment()->BindAccumulator(context); } void BytecodeGraphBuilder::VisitCreateEvalContext() { - Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + ScopeInfoRef scope_info( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); - const Operator* op = - javascript()->CreateFunctionContext(scope_info, slots, EVAL_SCOPE); + const Operator* op = javascript()->CreateFunctionContext(scope_info.object(), + slots, EVAL_SCOPE); Node* context = NewNode(op); environment()->BindAccumulator(context); } void BytecodeGraphBuilder::VisitCreateCatchContext() { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0); Node* exception = environment()->LookupRegister(reg); - Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast( - bytecode_iterator().GetConstantForIndexOperand(1, isolate())); + ScopeInfoRef scope_info( + broker(), bytecode_iterator().GetConstantForIndexOperand(1, isolate())); - const Operator* op = javascript()->CreateCatchContext(scope_info); + const Operator* op = javascript()->CreateCatchContext(scope_info.object()); Node* context = NewNode(op, exception); environment()->BindAccumulator(context); } void BytecodeGraphBuilder::VisitCreateWithContext() { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast( - bytecode_iterator().GetConstantForIndexOperand(1, isolate())); + ScopeInfoRef scope_info( + broker(), bytecode_iterator().GetConstantForIndexOperand(1, isolate())); - const Operator* op = javascript()->CreateWithContext(scope_info); + const Operator* op = javascript()->CreateWithContext(scope_info.object()); Node* context = NewNode(op, object); environment()->BindAccumulator(context); } @@ -1998,22 +2005,21 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() { } void BytecodeGraphBuilder::VisitCreateRegExpLiteral() { - Handle<String> constant_pattern = Handle<String>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + StringRef constant_pattern( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int const slot_id = bytecode_iterator().GetIndexOperand(1); - VectorSlotPair pair = CreateVectorSlotPair(slot_id); + FeedbackSource pair = CreateFeedbackSource(slot_id); int literal_flags = bytecode_iterator().GetFlagOperand(2); - Node* literal = NewNode( - javascript()->CreateLiteralRegExp(constant_pattern, pair, literal_flags)); + Node* literal = NewNode(javascript()->CreateLiteralRegExp( + constant_pattern.object(), pair, literal_flags)); environment()->BindAccumulator(literal, Environment::kAttachFrameState); } void BytecodeGraphBuilder::VisitCreateArrayLiteral() { - Handle<ArrayBoilerplateDescription> array_boilerplate_description = - Handle<ArrayBoilerplateDescription>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + ArrayBoilerplateDescriptionRef array_boilerplate_description( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int const slot_id = bytecode_iterator().GetIndexOperand(1); - VectorSlotPair pair = CreateVectorSlotPair(slot_id); + FeedbackSource pair = CreateFeedbackSource(slot_id); int bytecode_flags = bytecode_iterator().GetFlagOperand(2); int literal_flags = interpreter::CreateArrayLiteralFlags::FlagsBits::decode(bytecode_flags); @@ -2025,15 +2031,16 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() { // TODO(mstarzinger): Thread through number of elements. The below number is // only an estimate and does not match {ArrayLiteral::values::length}. int number_of_elements = - array_boilerplate_description->constant_elements().length(); + array_boilerplate_description.constants_elements_length(); Node* literal = NewNode(javascript()->CreateLiteralArray( - array_boilerplate_description, pair, literal_flags, number_of_elements)); + array_boilerplate_description.object(), pair, literal_flags, + number_of_elements)); environment()->BindAccumulator(literal, Environment::kAttachFrameState); } void BytecodeGraphBuilder::VisitCreateEmptyArrayLiteral() { int const slot_id = bytecode_iterator().GetIndexOperand(0); - VectorSlotPair pair = CreateVectorSlotPair(slot_id); + FeedbackSource pair = CreateFeedbackSource(slot_id); Node* literal = NewNode(javascript()->CreateEmptyLiteralArray(pair)); environment()->BindAccumulator(literal); } @@ -2045,19 +2052,18 @@ void BytecodeGraphBuilder::VisitCreateArrayFromIterable() { } void BytecodeGraphBuilder::VisitCreateObjectLiteral() { - Handle<ObjectBoilerplateDescription> constant_properties = - Handle<ObjectBoilerplateDescription>::cast( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + ObjectBoilerplateDescriptionRef constant_properties( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int const slot_id = bytecode_iterator().GetIndexOperand(1); - VectorSlotPair pair = CreateVectorSlotPair(slot_id); + FeedbackSource pair = CreateFeedbackSource(slot_id); int bytecode_flags = bytecode_iterator().GetFlagOperand(2); int literal_flags = interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags); // TODO(mstarzinger): Thread through number of properties. The below number is // only an estimate and does not match {ObjectLiteral::properties_count}. - int number_of_properties = constant_properties->size(); + int number_of_properties = constant_properties.size(); Node* literal = NewNode(javascript()->CreateLiteralObject( - constant_properties, pair, literal_flags, number_of_properties)); + constant_properties.object(), pair, literal_flags, number_of_properties)); environment()->BindAccumulator(literal, Environment::kAttachFrameState); } @@ -2074,7 +2080,7 @@ void BytecodeGraphBuilder::VisitCloneObject() { int flags = bytecode_iterator().GetFlagOperand(1); int slot = bytecode_iterator().GetIndexOperand(2); const Operator* op = - javascript()->CloneObject(CreateVectorSlotPair(slot), flags); + javascript()->CloneObject(CreateFeedbackSource(slot), flags); Node* value = NewNode(op, source); environment()->BindAccumulator(value, Environment::kAttachFrameState); } @@ -2140,14 +2146,14 @@ void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode, receiver_mode); PrepareEagerCheckpoint(); - VectorSlotPair feedback = CreateVectorSlotPair(slot_id); - + FeedbackSource feedback = CreateFeedbackSource(slot_id); CallFrequency frequency = ComputeCallFrequency(slot_id); - const Operator* op = - javascript()->Call(arg_count, frequency, feedback, receiver_mode, - GetSpeculationMode(slot_id)); + SpeculationMode speculation_mode = GetSpeculationMode(slot_id); + const Operator* op = javascript()->Call(arg_count, frequency, feedback, + receiver_mode, speculation_mode); + JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall( - op, args, static_cast<int>(arg_count), feedback.slot()); + op, args, static_cast<int>(arg_count), feedback.slot); if (lowering.IsExit()) return; Node* node = nullptr; @@ -2325,14 +2331,13 @@ void BytecodeGraphBuilder::VisitCallWithSpread() { Node* const* args = GetCallArgumentsFromRegisters(callee, receiver_node, first_arg, arg_count); int const slot_id = bytecode_iterator().GetIndexOperand(3); - VectorSlotPair feedback = CreateVectorSlotPair(slot_id); - + FeedbackSource feedback = CreateFeedbackSource(slot_id); CallFrequency frequency = ComputeCallFrequency(slot_id); const Operator* op = javascript()->CallWithSpread( static_cast<int>(reg_count + 1), frequency, feedback); JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall( - op, args, static_cast<int>(arg_count), feedback.slot()); + op, args, static_cast<int>(arg_count), feedback.slot); if (lowering.IsExit()) return; Node* node = nullptr; @@ -2438,7 +2443,7 @@ void BytecodeGraphBuilder::VisitConstruct() { interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1); size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2); int const slot_id = bytecode_iterator().GetIndexOperand(3); - VectorSlotPair feedback = CreateVectorSlotPair(slot_id); + FeedbackSource feedback = CreateFeedbackSource(slot_id); Node* new_target = environment()->LookupAccumulator(); Node* callee = environment()->LookupRegister(callee_reg); @@ -2450,7 +2455,7 @@ void BytecodeGraphBuilder::VisitConstruct() { Node* const* args = GetConstructArgumentsFromRegister(callee, new_target, first_reg, arg_count); JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct( - op, args, static_cast<int>(arg_count), feedback.slot()); + op, args, static_cast<int>(arg_count), feedback.slot); if (lowering.IsExit()) return; Node* node = nullptr; @@ -2469,7 +2474,7 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() { interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1); size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2); int const slot_id = bytecode_iterator().GetIndexOperand(3); - VectorSlotPair feedback = CreateVectorSlotPair(slot_id); + FeedbackSource feedback = CreateFeedbackSource(slot_id); Node* new_target = environment()->LookupAccumulator(); Node* callee = environment()->LookupRegister(callee_reg); @@ -2481,7 +2486,7 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() { Node* const* args = GetConstructArgumentsFromRegister(callee, new_target, first_reg, arg_count); JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct( - op, args, static_cast<int>(arg_count), feedback.slot()); + op, args, static_cast<int>(arg_count), feedback.slot); if (lowering.IsExit()) return; Node* node = nullptr; @@ -2568,8 +2573,8 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() { Node* accumulator = environment()->LookupAccumulator(); Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator, jsgraph()->TheHoleConstant()); - Node* name = jsgraph()->Constant( - bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + Node* name = jsgraph()->Constant(ObjectRef( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()))); BuildHoleCheckAndThrow(check_for_hole, Runtime::kThrowAccessedUninitializedVariable, name); } @@ -2640,23 +2645,23 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) { BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint( int operand_index) { FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index); - FeedbackNexus nexus(feedback_vector().object(), slot); - return nexus.GetBinaryOperationFeedback(); + FeedbackSource source(feedback_vector(), slot); + return broker()->GetFeedbackForBinaryOperation(source); } // Helper function to create compare operation hint from the recorded type // feedback. CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() { FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1); - FeedbackNexus nexus(feedback_vector().object(), slot); - return nexus.GetCompareOperationFeedback(); + FeedbackSource source(feedback_vector(), slot); + return broker()->GetFeedbackForCompareOperation(source); } // Helper function to create for-in mode from the recorded type feedback. ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) { FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index); - FeedbackNexus nexus(feedback_vector().object(), slot); - switch (nexus.GetForInFeedback()) { + FeedbackSource source(feedback_vector(), slot); + switch (broker()->GetFeedbackForForIn(source)) { case ForInHint::kNone: case ForInHint::kEnumCacheKeysAndIndices: return ForInMode::kUseEnumCacheKeysAndIndices; @@ -2670,11 +2675,12 @@ ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) { CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const { if (invocation_frequency_.IsUnknown()) return CallFrequency(); - FeedbackNexus nexus(feedback_vector().object(), - FeedbackVector::ToSlot(slot_id)); - float feedback_frequency = nexus.ComputeCallFrequency(); - if (feedback_frequency == 0.0f) { - // This is to prevent multiplying zero and infinity. + FeedbackSlot slot = FeedbackVector::ToSlot(slot_id); + FeedbackSource source(feedback_vector(), slot); + ProcessedFeedback const& feedback = broker()->GetFeedbackForCall(source); + float feedback_frequency = + feedback.IsInsufficient() ? 0.0f : feedback.AsCall().frequency(); + if (feedback_frequency == 0.0f) { // Prevent multiplying zero and infinity. return CallFrequency(0.0f); } else { return CallFrequency(feedback_frequency * invocation_frequency_.value()); @@ -2682,9 +2688,11 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const { } SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const { - FeedbackNexus nexus(feedback_vector().object(), - FeedbackVector::ToSlot(slot_id)); - return nexus.GetSpeculationMode(); + FeedbackSlot slot = FeedbackVector::ToSlot(slot_id); + FeedbackSource source(feedback_vector(), slot); + ProcessedFeedback const& feedback = broker()->GetFeedbackForCall(source); + return feedback.IsInsufficient() ? SpeculationMode::kDisallowSpeculation + : feedback.AsCall().speculation_mode(); } void BytecodeGraphBuilder::VisitBitwiseNot() { @@ -2922,15 +2930,15 @@ void BytecodeGraphBuilder::VisitTestIn() { Node* object = environment()->LookupAccumulator(); Node* key = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - VectorSlotPair feedback = - CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1)); + FeedbackSource feedback = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1)); Node* node = NewNode(javascript()->HasProperty(feedback), object, key); environment()->BindAccumulator(node, Environment::kAttachFrameState); } void BytecodeGraphBuilder::VisitTestInstanceOf() { int const slot_index = bytecode_iterator().GetIndexOperand(1); - BuildCompareOp(javascript()->InstanceOf(CreateVectorSlotPair(slot_index))); + BuildCompareOp(javascript()->InstanceOf(CreateFeedbackSource(slot_index))); } void BytecodeGraphBuilder::VisitTestUndetectable() { @@ -3132,6 +3140,16 @@ void BytecodeGraphBuilder::VisitJumpIfNotUndefinedConstant() { BuildJumpIfNotEqual(jsgraph()->UndefinedConstant()); } +void BytecodeGraphBuilder::VisitJumpIfUndefinedOrNull() { + BuildJumpIfEqual(jsgraph()->UndefinedConstant()); + BuildJumpIfEqual(jsgraph()->NullConstant()); +} + +void BytecodeGraphBuilder::VisitJumpIfUndefinedOrNullConstant() { + BuildJumpIfEqual(jsgraph()->UndefinedConstant()); + BuildJumpIfEqual(jsgraph()->NullConstant()); +} + void BytecodeGraphBuilder::VisitJumpLoop() { BuildJump(); } void BytecodeGraphBuilder::BuildSwitchOnSmi(Node* condition) { @@ -3151,7 +3169,7 @@ void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() { PrepareEagerCheckpoint(); Node* acc = environment()->LookupAccumulator(); - Node* acc_smi = NewNode(simplified()->CheckSmi(VectorSlotPair()), acc); + Node* acc_smi = NewNode(simplified()->CheckSmi(FeedbackSource()), acc); BuildSwitchOnSmi(acc_smi); } @@ -3277,6 +3295,23 @@ void BytecodeGraphBuilder::VisitForInStep() { environment()->BindAccumulator(index, Environment::kAttachFrameState); } +void BytecodeGraphBuilder::VisitGetIterator() { + PrepareEagerCheckpoint(); + Node* object = + environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); + FeedbackSource feedback = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1)); + const Operator* op = javascript()->GetIterator(feedback); + + JSTypeHintLowering::LoweringResult lowering = + TryBuildSimplifiedLoadNamed(op, object, feedback.slot); + if (lowering.IsExit()) return; + + DCHECK(!lowering.Changed()); + Node* node = NewNode(op, object); + environment()->BindAccumulator(node, Environment::kAttachFrameState); +} + void BytecodeGraphBuilder::VisitSuspendGenerator() { Node* generator = environment()->LookupRegister( bytecode_iterator().GetRegisterOperand(0)); @@ -4016,25 +4051,18 @@ void BytecodeGraphBuilder::UpdateSourcePosition(int offset) { } void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, - Handle<BytecodeArray> bytecode_array, - Handle<SharedFunctionInfo> shared, - Handle<FeedbackVector> feedback_vector, + SharedFunctionInfoRef const& shared_info, + FeedbackVectorRef const& feedback_vector, BailoutId osr_offset, JSGraph* jsgraph, CallFrequency const& invocation_frequency, SourcePositionTable* source_positions, - Handle<NativeContext> native_context, int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter) { - BytecodeArrayRef bytecode_array_ref(broker, bytecode_array); - DCHECK(bytecode_array_ref.IsSerializedForCompilation()); - FeedbackVectorRef feedback_vector_ref(broker, feedback_vector); - SharedFunctionInfoRef shared_ref(broker, shared); - DCHECK(shared_ref.IsSerializedForCompilation(feedback_vector_ref)); - NativeContextRef native_context_ref(broker, native_context); + DCHECK(shared_info.IsSerializedForCompilation(feedback_vector)); BytecodeGraphBuilder builder( - broker, local_zone, bytecode_array_ref, shared_ref, feedback_vector_ref, - osr_offset, jsgraph, invocation_frequency, source_positions, - native_context_ref, inlining_id, flags, tick_counter); + broker, local_zone, broker->target_native_context(), shared_info, + feedback_vector, osr_offset, jsgraph, invocation_frequency, + source_positions, inlining_id, flags, tick_counter); builder.CreateGraph(); } diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h index 682569778f..03e900c214 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.h +++ b/deps/v8/src/compiler/bytecode-graph-builder.h @@ -39,13 +39,11 @@ using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>; // Note: {invocation_frequency} is taken by reference to work around a GCC bug // on AIX (v8:8193). void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, - Handle<BytecodeArray> bytecode_array, - Handle<SharedFunctionInfo> shared, - Handle<FeedbackVector> feedback_vector, + SharedFunctionInfoRef const& shared_info, + FeedbackVectorRef const& feedback_vector, BailoutId osr_offset, JSGraph* jsgraph, CallFrequency const& invocation_frequency, SourcePositionTable* source_positions, - Handle<NativeContext> native_context, int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter); diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc index e472a6a72c..428ba058a7 100644 --- a/deps/v8/src/compiler/c-linkage.cc +++ b/deps/v8/src/compiler/c-linkage.cc @@ -140,8 +140,9 @@ namespace { // General code uses the above configuration data. -CallDescriptor* Linkage::GetSimplifiedCDescriptor( - Zone* zone, const MachineSignature* msig, bool set_initialize_root_flag) { +CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone, + const MachineSignature* msig, + CallDescriptor::Flags flags) { DCHECK_LE(msig->parameter_count(), static_cast<size_t>(kMaxCParameters)); LocationSignature::Builder locations(zone, msig->return_count(), @@ -220,10 +221,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor( // The target for C calls is always an address (i.e. machine pointer). MachineType target_type = MachineType::Pointer(); LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type); - CallDescriptor::Flags flags = CallDescriptor::kNoAllocate; - if (set_initialize_root_flag) { - flags |= CallDescriptor::kInitializeRootRegister; - } + flags |= CallDescriptor::kNoAllocate; return new (zone) CallDescriptor( // -- CallDescriptor::kCallAddress, // kind diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc index af0ba98ffd..4f18011463 100644 --- a/deps/v8/src/compiler/code-assembler.cc +++ b/deps/v8/src/compiler/code-assembler.cc @@ -34,9 +34,9 @@ namespace compiler { static_assert(std::is_convertible<TNode<Number>, TNode<Object>>::value, "test subtyping"); -static_assert(std::is_convertible<TNode<UnionT<Smi, HeapNumber>>, - TNode<UnionT<Smi, HeapObject>>>::value, - "test subtyping"); +static_assert( + std::is_convertible<TNode<Number>, TNode<UnionT<Smi, HeapObject>>>::value, + "test subtyping"); static_assert( !std::is_convertible<TNode<UnionT<Smi, HeapObject>>, TNode<Number>>::value, "test subtyping"); @@ -188,6 +188,7 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state, } bool CodeAssembler::Is64() const { return raw_assembler()->machine()->Is64(); } +bool CodeAssembler::Is32() const { return raw_assembler()->machine()->Is32(); } bool CodeAssembler::IsFloat64RoundUpSupported() const { return raw_assembler()->machine()->Float64RoundUp().IsSupported(); @@ -228,7 +229,7 @@ void CodeAssembler::GenerateCheckMaybeObjectIsObject(Node* node, &ok); EmbeddedVector<char, 1024> message; SNPrintF(message, "no Object: %s", location); - Node* message_node = StringConstant(message.begin()); + TNode<String> message_node = StringConstant(message.begin()); // This somewhat misuses the AbortCSAAssert runtime function. This will print // "abort: CSA_ASSERT failed: <message>", which is good enough. AbortCSAAssert(message_node); @@ -259,7 +260,7 @@ TNode<Number> CodeAssembler::NumberConstant(double value) { // (see AllocateAndInstallRequestedHeapObjects) since that makes it easier // to generate constant lookups for embedded builtins. return UncheckedCast<Number>(HeapConstant( - isolate()->factory()->NewHeapNumber(value, AllocationType::kOld))); + isolate()->factory()->NewHeapNumberForCodeAssembler(value))); } } @@ -299,16 +300,12 @@ TNode<Float64T> CodeAssembler::Float64Constant(double value) { return UncheckedCast<Float64T>(raw_assembler()->Float64Constant(value)); } -TNode<HeapNumber> CodeAssembler::NaNConstant() { - return UncheckedCast<HeapNumber>(LoadRoot(RootIndex::kNanValue)); -} - -bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) { +bool CodeAssembler::ToInt32Constant(Node* node, int32_t* out_value) { { Int64Matcher m(node); if (m.HasValue() && m.IsInRange(std::numeric_limits<int32_t>::min(), std::numeric_limits<int32_t>::max())) { - out_value = static_cast<int32_t>(m.Value()); + *out_value = static_cast<int32_t>(m.Value()); return true; } } @@ -316,7 +313,7 @@ bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) { { Int32Matcher m(node); if (m.HasValue()) { - out_value = m.Value(); + *out_value = m.Value(); return true; } } @@ -324,9 +321,9 @@ bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) { return false; } -bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) { +bool CodeAssembler::ToInt64Constant(Node* node, int64_t* out_value) { Int64Matcher m(node); - if (m.HasValue()) out_value = m.Value(); + if (m.HasValue()) *out_value = m.Value(); return m.HasValue(); } @@ -345,13 +342,13 @@ bool CodeAssembler::ToSmiConstant(Node* node, Smi* out_value) { return false; } -bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) { +bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t* out_value) { if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned || node->opcode() == IrOpcode::kBitcastWordToTagged) { node = node->InputAt(0); } IntPtrMatcher m(node); - if (m.HasValue()) out_value = m.Value(); + if (m.HasValue()) *out_value = m.Value(); return m.HasValue(); } @@ -383,6 +380,9 @@ TNode<Context> CodeAssembler::GetJSContextParameter() { } void CodeAssembler::Return(SloppyTNode<Object> value) { + // TODO(leszeks): This could also return a non-object, depending on the call + // descriptor. We should probably have multiple return overloads with + // different TNode types which DCHECK the call descriptor. return raw_assembler()->Return(value); } @@ -453,10 +453,6 @@ TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() { return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer()); } -TNode<RawPtrT> CodeAssembler::LoadStackPointer() { - return UncheckedCast<RawPtrT>(raw_assembler()->LoadStackPointer()); -} - TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation( SloppyTNode<Object> value) { return UncheckedCast<Object>( @@ -478,9 +474,9 @@ CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP) TNode<WordT> CodeAssembler::IntPtrAdd(SloppyTNode<WordT> left, SloppyTNode<WordT> right) { intptr_t left_constant; - bool is_left_constant = ToIntPtrConstant(left, left_constant); + bool is_left_constant = ToIntPtrConstant(left, &left_constant); intptr_t right_constant; - bool is_right_constant = ToIntPtrConstant(right, right_constant); + bool is_right_constant = ToIntPtrConstant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return IntPtrConstant(left_constant + right_constant); @@ -499,9 +495,9 @@ TNode<WordT> CodeAssembler::IntPtrAdd(SloppyTNode<WordT> left, TNode<IntPtrT> CodeAssembler::IntPtrDiv(TNode<IntPtrT> left, TNode<IntPtrT> right) { intptr_t left_constant; - bool is_left_constant = ToIntPtrConstant(left, left_constant); + bool is_left_constant = ToIntPtrConstant(left, &left_constant); intptr_t right_constant; - bool is_right_constant = ToIntPtrConstant(right, right_constant); + bool is_right_constant = ToIntPtrConstant(right, &right_constant); if (is_right_constant) { if (is_left_constant) { return IntPtrConstant(left_constant / right_constant); @@ -516,9 +512,9 @@ TNode<IntPtrT> CodeAssembler::IntPtrDiv(TNode<IntPtrT> left, TNode<WordT> CodeAssembler::IntPtrSub(SloppyTNode<WordT> left, SloppyTNode<WordT> right) { intptr_t left_constant; - bool is_left_constant = ToIntPtrConstant(left, left_constant); + bool is_left_constant = ToIntPtrConstant(left, &left_constant); intptr_t right_constant; - bool is_right_constant = ToIntPtrConstant(right, right_constant); + bool is_right_constant = ToIntPtrConstant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return IntPtrConstant(left_constant - right_constant); @@ -534,9 +530,9 @@ TNode<WordT> CodeAssembler::IntPtrSub(SloppyTNode<WordT> left, TNode<WordT> CodeAssembler::IntPtrMul(SloppyTNode<WordT> left, SloppyTNode<WordT> right) { intptr_t left_constant; - bool is_left_constant = ToIntPtrConstant(left, left_constant); + bool is_left_constant = ToIntPtrConstant(left, &left_constant); intptr_t right_constant; - bool is_right_constant = ToIntPtrConstant(right, right_constant); + bool is_right_constant = ToIntPtrConstant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return IntPtrConstant(left_constant * right_constant); @@ -568,12 +564,16 @@ TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> value, int shift) { return (shift != 0) ? Word32Shr(value, Int32Constant(shift)) : value; } +TNode<Word32T> CodeAssembler::Word32Sar(SloppyTNode<Word32T> value, int shift) { + return (shift != 0) ? Word32Sar(value, Int32Constant(shift)) : value; +} + TNode<WordT> CodeAssembler::WordOr(SloppyTNode<WordT> left, SloppyTNode<WordT> right) { intptr_t left_constant; - bool is_left_constant = ToIntPtrConstant(left, left_constant); + bool is_left_constant = ToIntPtrConstant(left, &left_constant); intptr_t right_constant; - bool is_right_constant = ToIntPtrConstant(right, right_constant); + bool is_right_constant = ToIntPtrConstant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return IntPtrConstant(left_constant | right_constant); @@ -592,9 +592,9 @@ TNode<WordT> CodeAssembler::WordOr(SloppyTNode<WordT> left, TNode<WordT> CodeAssembler::WordAnd(SloppyTNode<WordT> left, SloppyTNode<WordT> right) { intptr_t left_constant; - bool is_left_constant = ToIntPtrConstant(left, left_constant); + bool is_left_constant = ToIntPtrConstant(left, &left_constant); intptr_t right_constant; - bool is_right_constant = ToIntPtrConstant(right, right_constant); + bool is_right_constant = ToIntPtrConstant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return IntPtrConstant(left_constant & right_constant); @@ -606,9 +606,9 @@ TNode<WordT> CodeAssembler::WordAnd(SloppyTNode<WordT> left, TNode<WordT> CodeAssembler::WordXor(SloppyTNode<WordT> left, SloppyTNode<WordT> right) { intptr_t left_constant; - bool is_left_constant = ToIntPtrConstant(left, left_constant); + bool is_left_constant = ToIntPtrConstant(left, &left_constant); intptr_t right_constant; - bool is_right_constant = ToIntPtrConstant(right, right_constant); + bool is_right_constant = ToIntPtrConstant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return IntPtrConstant(left_constant ^ right_constant); @@ -620,9 +620,9 @@ TNode<WordT> CodeAssembler::WordXor(SloppyTNode<WordT> left, TNode<WordT> CodeAssembler::WordShl(SloppyTNode<WordT> left, SloppyTNode<IntegralT> right) { intptr_t left_constant; - bool is_left_constant = ToIntPtrConstant(left, left_constant); + bool is_left_constant = ToIntPtrConstant(left, &left_constant); intptr_t right_constant; - bool is_right_constant = ToIntPtrConstant(right, right_constant); + bool is_right_constant = ToIntPtrConstant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return IntPtrConstant(left_constant << right_constant); @@ -638,9 +638,9 @@ TNode<WordT> CodeAssembler::WordShl(SloppyTNode<WordT> left, TNode<WordT> CodeAssembler::WordShr(SloppyTNode<WordT> left, SloppyTNode<IntegralT> right) { intptr_t left_constant; - bool is_left_constant = ToIntPtrConstant(left, left_constant); + bool is_left_constant = ToIntPtrConstant(left, &left_constant); intptr_t right_constant; - bool is_right_constant = ToIntPtrConstant(right, right_constant); + bool is_right_constant = ToIntPtrConstant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return IntPtrConstant(static_cast<uintptr_t>(left_constant) >> @@ -657,9 +657,9 @@ TNode<WordT> CodeAssembler::WordShr(SloppyTNode<WordT> left, TNode<WordT> CodeAssembler::WordSar(SloppyTNode<WordT> left, SloppyTNode<IntegralT> right) { intptr_t left_constant; - bool is_left_constant = ToIntPtrConstant(left, left_constant); + bool is_left_constant = ToIntPtrConstant(left, &left_constant); intptr_t right_constant; - bool is_right_constant = ToIntPtrConstant(right, right_constant); + bool is_right_constant = ToIntPtrConstant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return IntPtrConstant(left_constant >> right_constant); @@ -675,9 +675,9 @@ TNode<WordT> CodeAssembler::WordSar(SloppyTNode<WordT> left, TNode<Word32T> CodeAssembler::Word32Or(SloppyTNode<Word32T> left, SloppyTNode<Word32T> right) { int32_t left_constant; - bool is_left_constant = ToInt32Constant(left, left_constant); + bool is_left_constant = ToInt32Constant(left, &left_constant); int32_t right_constant; - bool is_right_constant = ToInt32Constant(right, right_constant); + bool is_right_constant = ToInt32Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int32Constant(left_constant | right_constant); @@ -696,9 +696,9 @@ TNode<Word32T> CodeAssembler::Word32Or(SloppyTNode<Word32T> left, TNode<Word32T> CodeAssembler::Word32And(SloppyTNode<Word32T> left, SloppyTNode<Word32T> right) { int32_t left_constant; - bool is_left_constant = ToInt32Constant(left, left_constant); + bool is_left_constant = ToInt32Constant(left, &left_constant); int32_t right_constant; - bool is_right_constant = ToInt32Constant(right, right_constant); + bool is_right_constant = ToInt32Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int32Constant(left_constant & right_constant); @@ -710,9 +710,9 @@ TNode<Word32T> CodeAssembler::Word32And(SloppyTNode<Word32T> left, TNode<Word32T> CodeAssembler::Word32Xor(SloppyTNode<Word32T> left, SloppyTNode<Word32T> right) { int32_t left_constant; - bool is_left_constant = ToInt32Constant(left, left_constant); + bool is_left_constant = ToInt32Constant(left, &left_constant); int32_t right_constant; - bool is_right_constant = ToInt32Constant(right, right_constant); + bool is_right_constant = ToInt32Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int32Constant(left_constant ^ right_constant); @@ -724,9 +724,9 @@ TNode<Word32T> CodeAssembler::Word32Xor(SloppyTNode<Word32T> left, TNode<Word32T> CodeAssembler::Word32Shl(SloppyTNode<Word32T> left, SloppyTNode<Word32T> right) { int32_t left_constant; - bool is_left_constant = ToInt32Constant(left, left_constant); + bool is_left_constant = ToInt32Constant(left, &left_constant); int32_t right_constant; - bool is_right_constant = ToInt32Constant(right, right_constant); + bool is_right_constant = ToInt32Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int32Constant(left_constant << right_constant); @@ -742,9 +742,9 @@ TNode<Word32T> CodeAssembler::Word32Shl(SloppyTNode<Word32T> left, TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> left, SloppyTNode<Word32T> right) { int32_t left_constant; - bool is_left_constant = ToInt32Constant(left, left_constant); + bool is_left_constant = ToInt32Constant(left, &left_constant); int32_t right_constant; - bool is_right_constant = ToInt32Constant(right, right_constant); + bool is_right_constant = ToInt32Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int32Constant(static_cast<uint32_t>(left_constant) >> @@ -761,9 +761,9 @@ TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> left, TNode<Word32T> CodeAssembler::Word32Sar(SloppyTNode<Word32T> left, SloppyTNode<Word32T> right) { int32_t left_constant; - bool is_left_constant = ToInt32Constant(left, left_constant); + bool is_left_constant = ToInt32Constant(left, &left_constant); int32_t right_constant; - bool is_right_constant = ToInt32Constant(right, right_constant); + bool is_right_constant = ToInt32Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int32Constant(left_constant >> right_constant); @@ -779,9 +779,9 @@ TNode<Word32T> CodeAssembler::Word32Sar(SloppyTNode<Word32T> left, TNode<Word64T> CodeAssembler::Word64Or(SloppyTNode<Word64T> left, SloppyTNode<Word64T> right) { int64_t left_constant; - bool is_left_constant = ToInt64Constant(left, left_constant); + bool is_left_constant = ToInt64Constant(left, &left_constant); int64_t right_constant; - bool is_right_constant = ToInt64Constant(right, right_constant); + bool is_right_constant = ToInt64Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int64Constant(left_constant | right_constant); @@ -800,9 +800,9 @@ TNode<Word64T> CodeAssembler::Word64Or(SloppyTNode<Word64T> left, TNode<Word64T> CodeAssembler::Word64And(SloppyTNode<Word64T> left, SloppyTNode<Word64T> right) { int64_t left_constant; - bool is_left_constant = ToInt64Constant(left, left_constant); + bool is_left_constant = ToInt64Constant(left, &left_constant); int64_t right_constant; - bool is_right_constant = ToInt64Constant(right, right_constant); + bool is_right_constant = ToInt64Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int64Constant(left_constant & right_constant); @@ -814,9 +814,9 @@ TNode<Word64T> CodeAssembler::Word64And(SloppyTNode<Word64T> left, TNode<Word64T> CodeAssembler::Word64Xor(SloppyTNode<Word64T> left, SloppyTNode<Word64T> right) { int64_t left_constant; - bool is_left_constant = ToInt64Constant(left, left_constant); + bool is_left_constant = ToInt64Constant(left, &left_constant); int64_t right_constant; - bool is_right_constant = ToInt64Constant(right, right_constant); + bool is_right_constant = ToInt64Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int64Constant(left_constant ^ right_constant); @@ -828,9 +828,9 @@ TNode<Word64T> CodeAssembler::Word64Xor(SloppyTNode<Word64T> left, TNode<Word64T> CodeAssembler::Word64Shl(SloppyTNode<Word64T> left, SloppyTNode<Word64T> right) { int64_t left_constant; - bool is_left_constant = ToInt64Constant(left, left_constant); + bool is_left_constant = ToInt64Constant(left, &left_constant); int64_t right_constant; - bool is_right_constant = ToInt64Constant(right, right_constant); + bool is_right_constant = ToInt64Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int64Constant(left_constant << right_constant); @@ -846,9 +846,9 @@ TNode<Word64T> CodeAssembler::Word64Shl(SloppyTNode<Word64T> left, TNode<Word64T> CodeAssembler::Word64Shr(SloppyTNode<Word64T> left, SloppyTNode<Word64T> right) { int64_t left_constant; - bool is_left_constant = ToInt64Constant(left, left_constant); + bool is_left_constant = ToInt64Constant(left, &left_constant); int64_t right_constant; - bool is_right_constant = ToInt64Constant(right, right_constant); + bool is_right_constant = ToInt64Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int64Constant(static_cast<uint64_t>(left_constant) >> @@ -865,9 +865,9 @@ TNode<Word64T> CodeAssembler::Word64Shr(SloppyTNode<Word64T> left, TNode<Word64T> CodeAssembler::Word64Sar(SloppyTNode<Word64T> left, SloppyTNode<Word64T> right) { int64_t left_constant; - bool is_left_constant = ToInt64Constant(left, left_constant); + bool is_left_constant = ToInt64Constant(left, &left_constant); int64_t right_constant; - bool is_right_constant = ToInt64Constant(right, right_constant); + bool is_right_constant = ToInt64Constant(right, &right_constant); if (is_left_constant) { if (is_right_constant) { return Int64Constant(left_constant >> right_constant); @@ -880,14 +880,13 @@ TNode<Word64T> CodeAssembler::Word64Sar(SloppyTNode<Word64T> left, return UncheckedCast<Word64T>(raw_assembler()->Word64Sar(left, right)); } -#define CODE_ASSEMBLER_COMPARE(Name, ArgT, VarT, ToConstant, op) \ - TNode<BoolT> CodeAssembler::Name(SloppyTNode<ArgT> left, \ - SloppyTNode<ArgT> right) { \ - VarT lhs, rhs; \ - if (ToConstant(left, lhs) && ToConstant(right, rhs)) { \ - return BoolConstant(lhs op rhs); \ - } \ - return UncheckedCast<BoolT>(raw_assembler()->Name(left, right)); \ +#define CODE_ASSEMBLER_COMPARE(Name, ArgT, VarT, ToConstant, op) \ + TNode<BoolT> CodeAssembler::Name(TNode<ArgT> left, TNode<ArgT> right) { \ + VarT lhs, rhs; \ + if (ToConstant(left, &lhs) && ToConstant(right, &rhs)) { \ + return BoolConstant(lhs op rhs); \ + } \ + return UncheckedCast<BoolT>(raw_assembler()->Name(left, right)); \ } CODE_ASSEMBLER_COMPARE(IntPtrEqual, WordT, intptr_t, ToIntPtrConstant, ==) @@ -959,14 +958,14 @@ Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset, return raw_assembler()->Load(type, base, offset, needs_poisoning); } -Node* CodeAssembler::LoadFullTagged(Node* base, - LoadSensitivity needs_poisoning) { +TNode<Object> CodeAssembler::LoadFullTagged(Node* base, + LoadSensitivity needs_poisoning) { return BitcastWordToTagged( Load(MachineType::Pointer(), base, needs_poisoning)); } -Node* CodeAssembler::LoadFullTagged(Node* base, Node* offset, - LoadSensitivity needs_poisoning) { +TNode<Object> CodeAssembler::LoadFullTagged(Node* base, Node* offset, + LoadSensitivity needs_poisoning) { return BitcastWordToTagged( Load(MachineType::Pointer(), base, offset, needs_poisoning)); } @@ -993,7 +992,7 @@ TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) { // TODO(jgruber): In theory we could generate better code for this by // letting the macro assembler decide how to load from the roots list. In most // cases, it would boil down to loading from a fixed kRootRegister offset. - Node* isolate_root = + TNode<ExternalReference> isolate_root = ExternalConstant(ExternalReference::isolate_root(isolate())); int offset = IsolateData::root_slot_offset(root_index); return UncheckedCast<Object>( @@ -1133,7 +1132,7 @@ Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base, Node* CodeAssembler::StoreRoot(RootIndex root_index, Node* value) { DCHECK(!RootsTable::IsImmortalImmovable(root_index)); - Node* isolate_root = + TNode<ExternalReference> isolate_root = ExternalConstant(ExternalReference::isolate_root(isolate())); int offset = IsolateData::root_slot_offset(root_index); return StoreFullTaggedNoWriteBarrier(isolate_root, IntPtrConstant(offset), @@ -1248,8 +1247,9 @@ TNode<Object> CodeAssembler::CallRuntimeWithCEntryImpl( Runtime::MayAllocate(function) ? CallDescriptor::kNoFlags : CallDescriptor::kNoAllocate); - Node* ref = ExternalConstant(ExternalReference::Create(function)); - Node* arity = Int32Constant(argc); + TNode<ExternalReference> ref = + ExternalConstant(ExternalReference::Create(function)); + TNode<Int32T> arity = Int32Constant(argc); NodeArray<kMaxNumArgs + 4> inputs; inputs.Add(centry); @@ -1285,7 +1285,8 @@ void CodeAssembler::TailCallRuntimeWithCEntryImpl( zone(), function, argc, Operator::kNoProperties, CallDescriptor::kNoFlags); - Node* ref = ExternalConstant(ExternalReference::Create(function)); + TNode<ExternalReference> ref = + ExternalConstant(ExternalReference::Create(function)); NodeArray<kMaxNumArgs + 4> inputs; inputs.Add(centry); @@ -1468,7 +1469,7 @@ void CodeAssembler::GotoIfNot(SloppyTNode<IntegralT> condition, void CodeAssembler::Branch(SloppyTNode<IntegralT> condition, Label* true_label, Label* false_label) { int32_t constant; - if (ToInt32Constant(condition, constant)) { + if (ToInt32Constant(condition, &constant)) { if ((true_label->is_used() || true_label->is_bound()) && (false_label->is_used() || false_label->is_bound())) { return Goto(constant ? true_label : false_label); @@ -1484,7 +1485,7 @@ void CodeAssembler::Branch(TNode<BoolT> condition, const std::function<void()>& true_body, const std::function<void()>& false_body) { int32_t constant; - if (ToInt32Constant(condition, constant)) { + if (ToInt32Constant(condition, &constant)) { return constant ? true_body() : false_body(); } @@ -1501,7 +1502,7 @@ void CodeAssembler::Branch(TNode<BoolT> condition, void CodeAssembler::Branch(TNode<BoolT> condition, Label* true_label, const std::function<void()>& false_body) { int32_t constant; - if (ToInt32Constant(condition, constant)) { + if (ToInt32Constant(condition, &constant)) { return constant ? Goto(true_label) : false_body(); } @@ -1515,7 +1516,7 @@ void CodeAssembler::Branch(TNode<BoolT> condition, const std::function<void()>& true_body, Label* false_label) { int32_t constant; - if (ToInt32Constant(condition, constant)) { + if (ToInt32Constant(condition, &constant)) { return constant ? true_body() : Goto(false_label); } diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h index cc432214aa..c9adb1601d 100644 --- a/deps/v8/src/compiler/code-assembler.h +++ b/deps/v8/src/compiler/code-assembler.h @@ -5,9 +5,9 @@ #ifndef V8_COMPILER_CODE_ASSEMBLER_H_ #define V8_COMPILER_CODE_ASSEMBLER_H_ +#include <initializer_list> #include <map> #include <memory> -#include <initializer_list> // Clients of this interface shouldn't depend on lots of compiler internals. // Do not include anything from src/compiler here! @@ -43,7 +43,6 @@ class BigInt; class CallInterfaceDescriptor; class Callable; class Factory; -class FinalizationGroupCleanupJobTask; class InterpreterData; class Isolate; class JSAsyncFunctionObject; @@ -317,6 +316,7 @@ class CompilationCacheTable; class Constructor; class Filler; class FunctionTemplateRareData; +class HeapNumber; class InternalizedString; class JSArgumentsObject; class JSArrayBufferView; @@ -324,7 +324,6 @@ class JSContextExtensionObject; class JSError; class JSSloppyArgumentsObject; class MapCache; -class MutableHeapNumber; class NativeContext; class NumberWrapper; class ScriptWrapper; @@ -645,7 +644,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b); V(BitcastInt32ToFloat32, Float32T, Word32T) \ V(BitcastFloat32ToInt32, Uint32T, Float32T) \ V(RoundFloat64ToInt32, Int32T, Float64T) \ - V(RoundInt32ToFloat32, Int32T, Float32T) \ + V(RoundInt32ToFloat32, Float32T, Int32T) \ V(Float64SilenceNaN, Float64T, Float64T) \ V(Float64RoundDown, Float64T, Float64T) \ V(Float64RoundUp, Float64T, Float64T) \ @@ -657,7 +656,8 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b); V(Int32AbsWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T) \ V(Int64AbsWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T) \ V(IntPtrAbsWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT) \ - V(Word32BinaryNot, BoolT, Word32T) + V(Word32BinaryNot, BoolT, Word32T) \ + V(StackPointerGreaterThan, BoolT, WordT) // A "public" interface used by components outside of compiler directory to // create code objects with TurboFan's backend. This class is mostly a thin @@ -688,6 +688,7 @@ class V8_EXPORT_PRIVATE CodeAssembler { const AssemblerOptions& options); bool Is64() const; + bool Is32() const; bool IsFloat64RoundUpSupported() const; bool IsFloat64RoundDownSupported() const; bool IsFloat64RoundTiesEvenSupported() const; @@ -738,7 +739,7 @@ class V8_EXPORT_PRIVATE CodeAssembler { if (std::is_same<PreviousType, MaybeObject>::value) { code_assembler_->GenerateCheckMaybeObjectIsObject(node_, location_); } - Node* function = code_assembler_->ExternalConstant( + TNode<ExternalReference> function = code_assembler_->ExternalConstant( ExternalReference::check_object_type()); code_assembler_->CallCFunction( function, MachineType::AnyTagged(), @@ -842,7 +843,6 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode<Oddball> BooleanConstant(bool value); TNode<ExternalReference> ExternalConstant(ExternalReference address); TNode<Float64T> Float64Constant(double value); - TNode<HeapNumber> NaNConstant(); TNode<BoolT> Int32TrueConstant() { return ReinterpretCast<BoolT>(Int32Constant(1)); } @@ -853,15 +853,10 @@ class V8_EXPORT_PRIVATE CodeAssembler { return value ? Int32TrueConstant() : Int32FalseConstant(); } - // TODO(jkummerow): The style guide wants pointers for output parameters. - // https://google.github.io/styleguide/cppguide.html#Output_Parameters - bool ToInt32Constant(Node* node, - int32_t& out_value); // NOLINT(runtime/references) - bool ToInt64Constant(Node* node, - int64_t& out_value); // NOLINT(runtime/references) + bool ToInt32Constant(Node* node, int32_t* out_value); + bool ToInt64Constant(Node* node, int64_t* out_value); + bool ToIntPtrConstant(Node* node, intptr_t* out_value); bool ToSmiConstant(Node* node, Smi* out_value); - bool ToIntPtrConstant(Node* node, - intptr_t& out_value); // NOLINT(runtime/references) bool IsUndefinedConstant(TNode<Object> node); bool IsNullConstant(TNode<Object> node); @@ -959,9 +954,6 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode<RawPtrT> LoadFramePointer(); TNode<RawPtrT> LoadParentFramePointer(); - // Access to the stack pointer - TNode<RawPtrT> LoadStackPointer(); - // Poison |value| on speculative paths. TNode<Object> TaggedPoisonOnSpeculation(SloppyTNode<Object> value); TNode<WordT> WordPoisonOnSpeculation(SloppyTNode<WordT> value); @@ -977,12 +969,24 @@ class V8_EXPORT_PRIVATE CodeAssembler { } Node* Load(MachineType type, Node* base, Node* offset, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); + template <class Type> + TNode<Type> Load(Node* base, + LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + return UncheckedCast<Type>( + Load(MachineTypeOf<Type>::value, base, needs_poisoning)); + } + template <class Type> + TNode<Type> Load(Node* base, SloppyTNode<WordT> offset, + LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + return UncheckedCast<Type>( + Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning)); + } Node* AtomicLoad(MachineType type, Node* base, Node* offset); // Load uncompressed tagged value from (most likely off JS heap) memory // location. - Node* LoadFullTagged( + TNode<Object> LoadFullTagged( Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); - Node* LoadFullTagged( + TNode<Object> LoadFullTagged( Node* base, Node* offset, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); @@ -1119,50 +1123,13 @@ class V8_EXPORT_PRIVATE CodeAssembler { Word32Or(static_cast<Node*>(left), static_cast<Node*>(right))); } - template <class Left, class Right, - class = typename std::enable_if< - std::is_base_of<Object, Left>::value && - std::is_base_of<Object, Right>::value>::type> - TNode<BoolT> WordEqual(TNode<Left> left, TNode<Right> right) { - return WordEqual(ReinterpretCast<WordT>(left), - ReinterpretCast<WordT>(right)); - } - TNode<BoolT> WordEqual(TNode<Object> left, Node* right) { - return WordEqual(ReinterpretCast<WordT>(left), - ReinterpretCast<WordT>(right)); - } - TNode<BoolT> WordEqual(Node* left, TNode<Object> right) { - return WordEqual(ReinterpretCast<WordT>(left), - ReinterpretCast<WordT>(right)); - } - template <class Left, class Right, - class = typename std::enable_if< - std::is_base_of<Object, Left>::value && - std::is_base_of<Object, Right>::value>::type> - TNode<BoolT> WordNotEqual(TNode<Left> left, TNode<Right> right) { - return WordNotEqual(ReinterpretCast<WordT>(left), - ReinterpretCast<WordT>(right)); - } - TNode<BoolT> WordNotEqual(TNode<Object> left, Node* right) { - return WordNotEqual(ReinterpretCast<WordT>(left), - ReinterpretCast<WordT>(right)); - } - TNode<BoolT> WordNotEqual(Node* left, TNode<Object> right) { - return WordNotEqual(ReinterpretCast<WordT>(left), - ReinterpretCast<WordT>(right)); - } - - TNode<BoolT> IntPtrEqual(SloppyTNode<WordT> left, SloppyTNode<WordT> right); - TNode<BoolT> WordEqual(SloppyTNode<WordT> left, SloppyTNode<WordT> right); - TNode<BoolT> WordNotEqual(SloppyTNode<WordT> left, SloppyTNode<WordT> right); - TNode<BoolT> Word32Equal(SloppyTNode<Word32T> left, - SloppyTNode<Word32T> right); - TNode<BoolT> Word32NotEqual(SloppyTNode<Word32T> left, - SloppyTNode<Word32T> right); - TNode<BoolT> Word64Equal(SloppyTNode<Word64T> left, - SloppyTNode<Word64T> right); - TNode<BoolT> Word64NotEqual(SloppyTNode<Word64T> left, - SloppyTNode<Word64T> right); + TNode<BoolT> IntPtrEqual(TNode<WordT> left, TNode<WordT> right); + TNode<BoolT> WordEqual(TNode<WordT> left, TNode<WordT> right); + TNode<BoolT> WordNotEqual(TNode<WordT> left, TNode<WordT> right); + TNode<BoolT> Word32Equal(TNode<Word32T> left, TNode<Word32T> right); + TNode<BoolT> Word32NotEqual(TNode<Word32T> left, TNode<Word32T> right); + TNode<BoolT> Word64Equal(TNode<Word64T> left, TNode<Word64T> right); + TNode<BoolT> Word64NotEqual(TNode<Word64T> left, TNode<Word64T> right); TNode<BoolT> Word32Or(TNode<BoolT> left, TNode<BoolT> right) { return UncheckedCast<BoolT>( @@ -1234,6 +1201,7 @@ class V8_EXPORT_PRIVATE CodeAssembler { return UncheckedCast<IntPtrT>(WordSar(static_cast<Node*>(value), shift)); } TNode<Word32T> Word32Shr(SloppyTNode<Word32T> value, int shift); + TNode<Word32T> Word32Sar(SloppyTNode<Word32T> value, int shift); TNode<WordT> WordOr(SloppyTNode<WordT> left, SloppyTNode<WordT> right); TNode<WordT> WordAnd(SloppyTNode<WordT> left, SloppyTNode<WordT> right); @@ -1433,7 +1401,7 @@ class V8_EXPORT_PRIVATE CodeAssembler { Node* CallJS(Callable const& callable, Node* context, Node* function, Node* receiver, TArgs... args) { int argc = static_cast<int>(sizeof...(args)); - Node* arity = Int32Constant(argc); + TNode<Int32T> arity = Int32Constant(argc); return CallStub(callable, context, function, arity, receiver, args...); } @@ -1441,8 +1409,8 @@ class V8_EXPORT_PRIVATE CodeAssembler { Node* ConstructJSWithTarget(Callable const& callable, Node* context, Node* target, Node* new_target, TArgs... args) { int argc = static_cast<int>(sizeof...(args)); - Node* arity = Int32Constant(argc); - Node* receiver = LoadRoot(RootIndex::kUndefinedValue); + TNode<Int32T> arity = Int32Constant(argc); + TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue); // Construct(target, new_target, arity, receiver, arguments...) return CallStub(callable, context, target, new_target, arity, receiver, @@ -1842,9 +1810,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerScopedExceptionHandler { } // namespace compiler -#if defined(V8_HOST_ARCH_32_BIT) +#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS) +#define BINT_IS_SMI using BInt = Smi; #elif defined(V8_HOST_ARCH_64_BIT) +#define BINT_IS_INTPTR using BInt = IntPtrT; #else #error Unknown architecture. diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc index 0ef6402264..c2d8cf4469 100644 --- a/deps/v8/src/compiler/common-operator.cc +++ b/deps/v8/src/compiler/common-operator.cc @@ -99,7 +99,8 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) { } size_t hash_value(DeoptimizeParameters p) { - return base::hash_combine(p.kind(), p.reason(), p.feedback(), + FeedbackSource::Hash feebdack_hash; + return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()), p.is_safety_check()); } @@ -179,7 +180,6 @@ SelectParameters const& SelectParametersOf(const Operator* const op) { CallDescriptor const* CallDescriptorOf(const Operator* const op) { DCHECK(op->opcode() == IrOpcode::kCall || - op->opcode() == IrOpcode::kCallWithCallerSavedRegisters || op->opcode() == IrOpcode::kTailCall); return OpParameter<CallDescriptor const*>(op); } @@ -729,7 +729,7 @@ struct CommonOperatorGlobalCache final { Operator::kFoldable | Operator::kNoThrow, // properties "Deoptimize", // name 1, 1, 1, 0, 0, 1, // counts - DeoptimizeParameters(kKind, kReason, VectorSlotPair(), + DeoptimizeParameters(kKind, kReason, FeedbackSource(), IsSafetyCheck::kNoSafetyCheck)) {} }; #define CACHED_DEOPTIMIZE(Kind, Reason) \ @@ -747,7 +747,7 @@ struct CommonOperatorGlobalCache final { Operator::kFoldable | Operator::kNoThrow, // properties "DeoptimizeIf", // name 2, 1, 1, 0, 1, 1, // counts - DeoptimizeParameters(kKind, kReason, VectorSlotPair(), + DeoptimizeParameters(kKind, kReason, FeedbackSource(), is_safety_check)) {} }; #define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \ @@ -767,7 +767,7 @@ struct CommonOperatorGlobalCache final { Operator::kFoldable | Operator::kNoThrow, // properties "DeoptimizeUnless", // name 2, 1, 1, 0, 1, 1, // counts - DeoptimizeParameters(kKind, kReason, VectorSlotPair(), + DeoptimizeParameters(kKind, kReason, FeedbackSource(), is_safety_check)) {} }; #define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \ @@ -948,7 +948,7 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint, const Operator* CommonOperatorBuilder::Deoptimize( DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback) { + FeedbackSource const& feedback) { #define CACHED_DEOPTIMIZE(Kind, Reason) \ if (kind == DeoptimizeKind::k##Kind && \ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \ @@ -969,7 +969,7 @@ const Operator* CommonOperatorBuilder::Deoptimize( const Operator* CommonOperatorBuilder::DeoptimizeIf( DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, IsSafetyCheck is_safety_check) { + FeedbackSource const& feedback, IsSafetyCheck is_safety_check) { #define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \ if (kind == DeoptimizeKind::k##Kind && \ reason == DeoptimizeReason::k##Reason && \ @@ -990,7 +990,7 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf( const Operator* CommonOperatorBuilder::DeoptimizeUnless( DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, IsSafetyCheck is_safety_check) { + FeedbackSource const& feedback, IsSafetyCheck is_safety_check) { #define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \ if (kind == DeoptimizeKind::k##Kind && \ reason == DeoptimizeReason::k##Reason && \ @@ -1481,31 +1481,6 @@ const Operator* CommonOperatorBuilder::Call( return new (zone()) CallOperator(call_descriptor); } -const Operator* CommonOperatorBuilder::CallWithCallerSavedRegisters( - const CallDescriptor* call_descriptor) { - class CallOperator final : public Operator1<const CallDescriptor*> { - public: - explicit CallOperator(const CallDescriptor* call_descriptor) - : Operator1<const CallDescriptor*>( - IrOpcode::kCallWithCallerSavedRegisters, - call_descriptor->properties(), "CallWithCallerSavedRegisters", - call_descriptor->InputCount() + - call_descriptor->FrameStateCount(), - Operator::ZeroIfPure(call_descriptor->properties()), - Operator::ZeroIfEliminatable(call_descriptor->properties()), - call_descriptor->ReturnCount(), - Operator::ZeroIfPure(call_descriptor->properties()), - Operator::ZeroIfNoThrow(call_descriptor->properties()), - call_descriptor) {} - - void PrintParameter(std::ostream& os, - PrintVerbosity verbose) const override { - os << "[" << *parameter() << "]"; - } - }; - return new (zone()) CallOperator(call_descriptor); -} - const Operator* CommonOperatorBuilder::TailCall( const CallDescriptor* call_descriptor) { class TailCallOperator final : public Operator1<const CallDescriptor*> { diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h index 9f634e72ec..2b0dcc7db9 100644 --- a/deps/v8/src/compiler/common-operator.h +++ b/deps/v8/src/compiler/common-operator.h @@ -10,8 +10,8 @@ #include "src/codegen/reloc-info.h" #include "src/codegen/string-constants.h" #include "src/common/globals.h" +#include "src/compiler/feedback-source.h" #include "src/compiler/frame-states.h" -#include "src/compiler/vector-slot-pair.h" #include "src/deoptimizer/deoptimize-reason.h" #include "src/zone/zone-containers.h" #include "src/zone/zone-handle-set.h" @@ -104,7 +104,7 @@ int ValueInputCountOfReturn(Operator const* const op); class DeoptimizeParameters final { public: DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, IsSafetyCheck is_safety_check) : kind_(kind), reason_(reason), @@ -113,13 +113,13 @@ class DeoptimizeParameters final { DeoptimizeKind kind() const { return kind_; } DeoptimizeReason reason() const { return reason_; } - const VectorSlotPair& feedback() const { return feedback_; } + const FeedbackSource& feedback() const { return feedback_; } IsSafetyCheck is_safety_check() const { return is_safety_check_; } private: DeoptimizeKind const kind_; DeoptimizeReason const reason_; - VectorSlotPair const feedback_; + FeedbackSource const feedback_; IsSafetyCheck is_safety_check_; }; @@ -468,14 +468,14 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final const Operator* IfDefault(BranchHint hint = BranchHint::kNone); const Operator* Throw(); const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback); + FeedbackSource const& feedback); const Operator* DeoptimizeIf( DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck); const Operator* DeoptimizeUnless( DeoptimizeKind kind, DeoptimizeReason reason, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck); const Operator* TrapIf(TrapId trap_id); const Operator* TrapUnless(TrapId trap_id); @@ -530,8 +530,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final OutputFrameStateCombine state_combine, const FrameStateFunctionInfo* function_info); const Operator* Call(const CallDescriptor* call_descriptor); - const Operator* CallWithCallerSavedRegisters( - const CallDescriptor* call_descriptor); const Operator* TailCall(const CallDescriptor* call_descriptor); const Operator* Projection(size_t index); const Operator* Retain(); diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc index 673f4a341b..592d85440c 100644 --- a/deps/v8/src/compiler/compilation-dependencies.cc +++ b/deps/v8/src/compiler/compilation-dependencies.cc @@ -550,13 +550,7 @@ namespace { // This function expects to never see a JSProxy. void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map, base::Optional<JSObjectRef> last_prototype) { - // TODO(neis): Remove heap access (SerializePrototype call). - AllowCodeDependencyChange dependency_change_; - AllowHandleAllocation handle_allocation_; - AllowHandleDereference handle_dereference_; - AllowHeapAllocation heap_allocation_; while (true) { - map.SerializePrototype(); HeapObjectRef proto = map.prototype(); if (!proto.IsJSObject()) { CHECK_EQ(proto.map().oddball_type(), OddballType::kNull); @@ -580,7 +574,7 @@ void CompilationDependencies::DependOnStablePrototypeChains( // Perform the implicit ToObject for primitives here. // Implemented according to ES6 section 7.3.2 GetV (V, P). base::Optional<JSFunctionRef> constructor = - broker_->native_context().GetConstructorFunction(receiver_map); + broker_->target_native_context().GetConstructorFunction(receiver_map); if (constructor.has_value()) receiver_map = constructor->initial_map(); } DependOnStablePrototypeChain(this, receiver_map, last_prototype); diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc index 788638fe68..8dfe356c34 100644 --- a/deps/v8/src/compiler/effect-control-linearizer.cc +++ b/deps/v8/src/compiler/effect-control-linearizer.cc @@ -9,6 +9,7 @@ #include "src/common/ptr-compr-inl.h" #include "src/compiler/access-builder.h" #include "src/compiler/compiler-source-position-table.h" +#include "src/compiler/feedback-source.h" #include "src/compiler/graph-assembler.h" #include "src/compiler/js-graph.h" #include "src/compiler/linkage.h" @@ -209,13 +210,13 @@ class EffectControlLinearizer { Node* AllocateHeapNumberWithValue(Node* node); Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode, - const VectorSlotPair& feedback, Node* value, + const FeedbackSource& feedback, Node* value, Node* frame_state); Node* BuildCheckedFloat64ToInt64(CheckForMinusZeroMode mode, - const VectorSlotPair& feedback, Node* value, + const FeedbackSource& feedback, Node* value, Node* frame_state); Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode, - const VectorSlotPair& feedback, + const FeedbackSource& feedback, Node* value, Node* frame_state); Node* BuildReverseBytes(ExternalArrayType type, Node* value); @@ -239,6 +240,7 @@ class EffectControlLinearizer { Node* ChangeSmiToInt32(Node* value); Node* ChangeSmiToInt64(Node* value); Node* ObjectIsSmi(Node* value); + Node* CompressedObjectIsSmi(Node* value); Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte); Node* SmiMaxValueConstant(); @@ -1525,7 +1527,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) { Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) { Node* value = node->InputAt(0); - return __ WordEqual(value, __ TrueConstant()); + return __ TaggedEqual(value, __ TrueConstant()); } void EffectControlLinearizer::TruncateTaggedPointerToBit( @@ -1539,10 +1541,10 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit( Node* fzero = __ Float64Constant(0.0); // Check if {value} is false. - __ GotoIf(__ WordEqual(value, __ FalseConstant()), done, zero); + __ GotoIf(__ TaggedEqual(value, __ FalseConstant()), done, zero); // Check if {value} is the empty string. - __ GotoIf(__ WordEqual(value, __ EmptyStringConstant()), done, zero); + __ GotoIf(__ TaggedEqual(value, __ EmptyStringConstant()), done, zero); // Load the map of {value}. Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); @@ -1559,11 +1561,11 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit( done, zero); // Check if {value} is a HeapNumber. - __ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()), + __ GotoIf(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &if_heapnumber); // Check if {value} is a BigInt. - __ GotoIf(__ WordEqual(value_map, __ BigIntMapConstant()), &if_bigint); + __ GotoIf(__ TaggedEqual(value_map, __ BigIntMapConstant()), &if_bigint); // All other values that reach here are true. __ Goto(done, __ Int32Constant(1)); @@ -1599,7 +1601,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) { __ Bind(&if_smi); { // If {value} is a Smi, then we only need to check that it's not zero. - __ Goto(&done, __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)), + __ Goto(&done, __ Word32Equal(__ IntPtrEqual(value, __ IntPtrConstant(0)), __ Int32Constant(0))); } @@ -1711,7 +1713,7 @@ Node* EffectControlLinearizer::LowerChangeCompressedToTaggedSigned(Node* node) { auto if_not_smi = __ MakeDeferredLabel(); auto done = __ MakeLabel(MachineRepresentation::kWord32); - Node* check = ObjectIsSmi(value); + Node* check = CompressedObjectIsSmi(value); __ GotoIfNot(check, &if_not_smi); __ Goto(&done, __ ChangeCompressedSignedToTaggedSigned(value)); @@ -1795,7 +1797,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) { // Perform the map checks. for (size_t i = 0; i < map_count; ++i) { Node* map = __ HeapConstant(maps[i]); - Node* check = __ WordEqual(value_map, map); + Node* check = __ TaggedEqual(value_map, map); if (i == map_count - 1) { __ Branch(check, &done, &migrate, IsSafetyCheck::kCriticalSafetyCheck); } else { @@ -1811,7 +1813,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) { // If map is not deprecated the migration attempt does not make sense. Node* bitfield3 = __ LoadField(AccessBuilder::ForMapBitField3(), value_map); - Node* if_not_deprecated = __ WordEqual( + Node* if_not_deprecated = __ Word32Equal( __ Word32And(bitfield3, __ Int32Constant(Map::IsDeprecatedBit::kMask)), __ Int32Constant(0)); @@ -1837,7 +1839,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) { // Perform the map checks again. for (size_t i = 0; i < map_count; ++i) { Node* map = __ HeapConstant(maps[i]); - Node* check = __ WordEqual(value_map, map); + Node* check = __ TaggedEqual(value_map, map); if (i == map_count - 1) { __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check, frame_state, IsSafetyCheck::kCriticalSafetyCheck); @@ -1858,7 +1860,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) { for (size_t i = 0; i < map_count; ++i) { Node* map = __ HeapConstant(maps[i]); - Node* check = __ WordEqual(value_map, map); + Node* check = __ TaggedEqual(value_map, map); if (i == map_count - 1) { __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check, @@ -1886,7 +1888,7 @@ Node* EffectControlLinearizer::LowerCompareMaps(Node* node) { for (size_t i = 0; i < map_count; ++i) { Node* map = __ HeapConstant(maps[i]); - Node* check = __ WordEqual(value_map, map); + Node* check = __ TaggedEqual(value_map, map); auto next_map = __ MakeLabel(); auto passed = __ MakeLabel(); @@ -1916,7 +1918,7 @@ Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) { __ Bind(&if_not_smi); Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); - Node* check1 = __ WordEqual(value_map, __ HeapNumberMapConstant()); + Node* check1 = __ TaggedEqual(value_map, __ HeapNumberMapConstant()); __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(), check1, frame_state); __ Goto(&done); @@ -1936,7 +1938,7 @@ Node* EffectControlLinearizer::LowerCheckReceiver(Node* node, STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); Node* check = __ Uint32LessThanOrEqual( __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type); - __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, VectorSlotPair(), + __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, FeedbackSource(), check, frame_state); return value; } @@ -1955,12 +1957,12 @@ Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined( Node* check0 = __ Uint32LessThanOrEqual(__ Uint32Constant(ODDBALL_TYPE), value_instance_type); __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined, - VectorSlotPair(), check0, frame_state); + FeedbackSource(), check0, frame_state); // Rule out booleans. - Node* check1 = __ WordEqual(value_map, __ BooleanMapConstant()); + Node* check1 = __ TaggedEqual(value_map, __ BooleanMapConstant()); __ DeoptimizeIf(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined, - VectorSlotPair(), check1, frame_state); + FeedbackSource(), check1, frame_state); return value; } @@ -1970,8 +1972,8 @@ Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) { Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); Node* check = - __ WordEqual(value_map, __ HeapConstant(factory()->symbol_map())); - __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, VectorSlotPair(), check, + __ TaggedEqual(value_map, __ HeapConstant(factory()->symbol_map())); + __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, FeedbackSource(), check, frame_state); return value; } @@ -2003,7 +2005,7 @@ Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node, __ Word32And(value_instance_type, __ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)), __ Int32Constant(kInternalizedTag)); - __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, VectorSlotPair(), + __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, FeedbackSource(), check, frame_state); return value; @@ -2040,7 +2042,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* value = __ Int32AddWithOverflow(lhs, rhs); Node* check = __ Projection(1, value); - __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check, + __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check, frame_state); return __ Projection(0, value); } @@ -2052,7 +2054,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* value = __ Int32SubWithOverflow(lhs, rhs); Node* check = __ Projection(1, value); - __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check, + __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check, frame_state); return __ Projection(0, value); } @@ -2075,7 +2077,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* mask = __ Int32Constant(divisor - 1); Node* shift = __ Int32Constant(WhichPowerOf2(divisor)); Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero); - __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), + __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(), check, frame_state); return __ Word32Sar(lhs, shift); } else { @@ -2100,12 +2102,12 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node, // Check if {rhs} is zero. Node* check_rhs_zero = __ Word32Equal(rhs, zero); - __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), + __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(), check_rhs_zero, frame_state); // Check if {lhs} is zero, as that would produce minus zero. Node* check_lhs_zero = __ Word32Equal(lhs, zero); - __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), + __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(), check_lhs_zero, frame_state); // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have @@ -2118,7 +2120,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node, { // Check that {rhs} is not -1, otherwise result would be -kMinInt. Node* check_rhs_minusone = __ Word32Equal(rhs, __ Int32Constant(-1)); - __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), + __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check_rhs_minusone, frame_state); // Perform the actual integer division. @@ -2137,7 +2139,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node, // Check if the remainder is non-zero. Node* check = __ Word32Equal(lhs, __ Int32Mul(value, rhs)); - __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), + __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(), check, frame_state); return value; @@ -2219,7 +2221,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* vtrue0 = __ Int32Sub(zero, rhs); // Ensure that {rhs} is not zero, otherwise we'd have to return NaN. - __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), + __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(), __ Word32Equal(vtrue0, zero), frame_state); __ Goto(&rhs_checked, vtrue0); } @@ -2242,7 +2244,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* res = __ Uint32Mod(__ Int32Sub(zero, lhs), rhs); // Check if we would have to return -0. - __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), + __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(), __ Word32Equal(res, zero), frame_state); __ Goto(&done, __ Int32Sub(zero, res)); } @@ -2269,13 +2271,13 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node, Node* mask = __ Uint32Constant(divisor - 1); Node* shift = __ Uint32Constant(WhichPowerOf2(divisor)); Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero); - __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), + __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(), check, frame_state); return __ Word32Shr(lhs, shift); } else { // Ensure that {rhs} is not zero, otherwise we'd have to return NaN. Node* check = __ Word32Equal(rhs, zero); - __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check, + __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(), check, frame_state); // Perform the actual unsigned integer division. @@ -2283,7 +2285,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node, // Check if the remainder is non-zero. check = __ Word32Equal(lhs, __ Int32Mul(rhs, value)); - __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), + __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(), check, frame_state); return value; } @@ -2298,7 +2300,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node, // Ensure that {rhs} is not zero, otherwise we'd have to return NaN. Node* check = __ Word32Equal(rhs, zero); - __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check, + __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(), check, frame_state); // Perform the actual unsigned integer modulus. @@ -2313,7 +2315,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, Node* projection = __ Int32MulWithOverflow(lhs, rhs); Node* check = __ Projection(1, projection); - __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check, + __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check, frame_state); Node* value = __ Projection(0, projection); @@ -2329,7 +2331,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, __ Bind(&if_zero); // We may need to return negative zero. Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero); - __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check_or, + __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(), check_or, frame_state); __ Goto(&check_done); @@ -2489,7 +2491,7 @@ Node* EffectControlLinearizer::LowerCheckedUint64ToTaggedSigned( } Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32( - CheckForMinusZeroMode mode, const VectorSlotPair& feedback, Node* value, + CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value, Node* frame_state) { Node* value32 = __ RoundFloat64ToInt32(value); Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32)); @@ -2528,7 +2530,7 @@ Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node, } Node* EffectControlLinearizer::BuildCheckedFloat64ToInt64( - CheckForMinusZeroMode mode, const VectorSlotPair& feedback, Node* value, + CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value, Node* frame_state) { Node* value64 = __ TruncateFloat64ToInt64(value); Node* check_same = __ Float64Equal(value, __ ChangeInt64ToFloat64(value64)); @@ -2594,7 +2596,7 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node, // to int32. __ Bind(&if_not_smi); Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); - Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant()); + Node* check_map = __ TaggedEqual(value_map, __ HeapNumberMapConstant()); __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(), check_map, frame_state); Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value); @@ -2624,7 +2626,7 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt64(Node* node, // to int64. __ Bind(&if_not_smi); Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); - Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant()); + Node* check_map = __ TaggedEqual(value_map, __ HeapNumberMapConstant()); __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(), check_map, frame_state); Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value); @@ -2637,10 +2639,10 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt64(Node* node, } Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64( - CheckTaggedInputMode mode, const VectorSlotPair& feedback, Node* value, + CheckTaggedInputMode mode, const FeedbackSource& feedback, Node* value, Node* frame_state) { Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); - Node* check_number = __ WordEqual(value_map, __ HeapNumberMapConstant()); + Node* check_number = __ TaggedEqual(value_map, __ HeapNumberMapConstant()); switch (mode) { case CheckTaggedInputMode::kNumber: { __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, feedback, @@ -2731,7 +2733,7 @@ Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) { // Check for BigInt. Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); - Node* bi_check = __ WordEqual(value_map, __ BigIntMapConstant()); + Node* bi_check = __ TaggedEqual(value_map, __ BigIntMapConstant()); __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(), bi_check, frame_state); @@ -2840,7 +2842,7 @@ Node* EffectControlLinearizer::LowerCheckedCompressedToTaggedSigned( Node* value = node->InputAt(0); const CheckParameters& params = CheckParametersOf(node->op()); - Node* check = ObjectIsSmi(value); + Node* check = CompressedObjectIsSmi(value); __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check, frame_state); @@ -2852,7 +2854,7 @@ Node* EffectControlLinearizer::LowerCheckedCompressedToTaggedPointer( Node* value = node->InputAt(0); const CheckParameters& params = CheckParametersOf(node->op()); - Node* check = ObjectIsSmi(value); + Node* check = CompressedObjectIsSmi(value); __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), check, frame_state); return __ ChangeCompressedPointerToTaggedPointer(value); @@ -2983,7 +2985,7 @@ Node* EffectControlLinearizer::LowerObjectIsBigInt(Node* node) { Node* check = ObjectIsSmi(value); __ GotoIf(check, &if_smi); Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); - Node* vfalse = __ WordEqual(value_map, __ BigIntMapConstant()); + Node* vfalse = __ TaggedEqual(value_map, __ BigIntMapConstant()); __ Goto(&done, vfalse); __ Bind(&if_smi); @@ -3095,7 +3097,7 @@ Node* EffectControlLinearizer::LowerObjectIsFiniteNumber(Node* node) { // Check if {object} is a HeapNumber. Node* value_map = __ LoadField(AccessBuilder::ForMap(), object); - __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done, + __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done, zero); // {object} is a HeapNumber. @@ -3128,7 +3130,7 @@ Node* EffectControlLinearizer::LowerObjectIsInteger(Node* node) { // Check if {object} is a HeapNumber. Node* value_map = __ LoadField(AccessBuilder::ForMap(), object); - __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done, + __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done, zero); // {object} is a HeapNumber. @@ -3171,7 +3173,7 @@ Node* EffectControlLinearizer::LowerObjectIsSafeInteger(Node* node) { // Check if {object} is a HeapNumber. Node* value_map = __ LoadField(AccessBuilder::ForMap(), object); - __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done, + __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done, zero); // {object} is a HeapNumber. @@ -3190,9 +3192,14 @@ Node* EffectControlLinearizer::LowerObjectIsSafeInteger(Node* node) { namespace { -const int64_t kMinusZeroBits = bit_cast<int64_t>(-0.0); -const int32_t kMinusZeroLoBits = static_cast<int32_t>(kMinusZeroBits); -const int32_t kMinusZeroHiBits = static_cast<int32_t>(kMinusZeroBits >> 32); +// There is no (currently) available constexpr version of bit_cast, so we have +// to make do with constructing the -0.0 bits manually (by setting the sign bit +// to 1 and everything else to 0). +// TODO(leszeks): Revisit when upgrading to C++20. +constexpr int32_t kMinusZeroLoBits = static_cast<int32_t>(0); +constexpr int32_t kMinusZeroHiBits = static_cast<int32_t>(1) << 31; +constexpr int64_t kMinusZeroBits = + (static_cast<uint64_t>(kMinusZeroHiBits) << 32) | kMinusZeroLoBits; } // namespace @@ -3207,7 +3214,7 @@ Node* EffectControlLinearizer::LowerObjectIsMinusZero(Node* node) { // Check if {value} is a HeapNumber. Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); - __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done, + __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done, zero); // Check if {value} contains -0. @@ -3260,7 +3267,7 @@ Node* EffectControlLinearizer::LowerObjectIsNaN(Node* node) { // Check if {value} is a HeapNumber. Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); - __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done, + __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done, zero); // Check if {value} contains a NaN. @@ -3319,7 +3326,7 @@ Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) { __ GotoIf(ObjectIsSmi(value), &if_smi); Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); - __ Goto(&done, __ WordEqual(value_map, __ HeapNumberMapConstant())); + __ Goto(&done, __ TaggedEqual(value_map, __ HeapNumberMapConstant())); __ Bind(&if_smi); __ Goto(&done, __ Int32Constant(1)); @@ -3467,7 +3474,7 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) { auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned); Node* frame = __ LoadFramePointer(); - __ GotoIf(__ WordEqual(arguments_frame, frame), &done, __ SmiConstant(0)); + __ GotoIf(__ TaggedEqual(arguments_frame, frame), &done, __ SmiConstant(0)); __ Goto(&if_adaptor_frame); __ Bind(&if_adaptor_frame); @@ -3491,7 +3498,7 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) { auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned); Node* frame = __ LoadFramePointer(); - __ GotoIf(__ WordEqual(arguments_frame, frame), &done, + __ GotoIf(__ TaggedEqual(arguments_frame, frame), &done, __ SmiConstant(formal_parameter_count)); __ Goto(&if_adaptor_frame); @@ -3517,9 +3524,9 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) { MachineType::TypeCompressedTagged(), parent_frame, __ IntPtrConstant(CommonFrameConstants::kContextOrFrameTypeOffset)); - __ GotoIf(__ WordEqual(parent_frame_type, - __ IntPtrConstant(StackFrame::TypeToMarker( - StackFrame::ARGUMENTS_ADAPTOR))), + __ GotoIf(__ IntPtrEqual(parent_frame_type, + __ IntPtrConstant(StackFrame::TypeToMarker( + StackFrame::ARGUMENTS_ADAPTOR))), &done, parent_frame); __ Goto(&done, frame); @@ -3532,7 +3539,7 @@ Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) { Node* length = node->InputAt(0); auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer); - Node* zero_length = __ WordEqual(length, __ IntPtrConstant(0)); + Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0)); __ GotoIf(zero_length, &done, jsgraph()->HeapConstant(factory()->empty_fixed_array())); @@ -3580,7 +3587,7 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) { Node* length = node->InputAt(0); auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer); - Node* zero_length = __ WordEqual(length, __ IntPtrConstant(0)); + Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0)); __ GotoIf(zero_length, &done, jsgraph()->HeapConstant(factory()->empty_fixed_array())); @@ -3832,7 +3839,7 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) { { Node* receiver_second = __ LoadField(AccessBuilder::ForConsStringSecond(), receiver); - __ GotoIfNot(__ WordEqual(receiver_second, __ EmptyStringConstant()), + __ GotoIfNot(__ TaggedEqual(receiver_second, __ EmptyStringConstant()), &if_runtime); Node* receiver_first = __ LoadField(AccessBuilder::ForConsStringFirst(), receiver); @@ -3967,7 +3974,7 @@ Node* EffectControlLinearizer::LowerStringFromSingleCharCode(Node* node) { Node* entry = __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index); - Node* check2 = __ WordEqual(entry, __ UndefinedConstant()); + Node* check2 = __ TaggedEqual(entry, __ UndefinedConstant()); __ GotoIf(check2, &cache_miss); // Use the {entry} from the {cache}. @@ -4093,7 +4100,7 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) { Node* entry = __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index); - Node* check2 = __ WordEqual(entry, __ UndefinedConstant()); + Node* check2 = __ TaggedEqual(entry, __ UndefinedConstant()); __ GotoIf(check2, &cache_miss); // Use the {entry} from the {cache}. @@ -4285,7 +4292,7 @@ Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) { rhs, __ NoContextConstant()); // Check for exception sentinel: Smi is returned to signal BigIntTooBig. - __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, VectorSlotPair{}, + __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{}, ObjectIsSmi(value), frame_state); return value; @@ -4338,8 +4345,8 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node, Node* frame_state) { Node* value = node->InputAt(0); - Node* check = __ WordEqual(value, __ TheHoleConstant()); - __ DeoptimizeIf(DeoptimizeReason::kHole, VectorSlotPair(), check, + Node* check = __ TaggedEqual(value, __ TheHoleConstant()); + __ DeoptimizeIf(DeoptimizeReason::kHole, FeedbackSource(), check, frame_state); return value; } @@ -4350,7 +4357,7 @@ Node* EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node) { auto if_is_hole = __ MakeDeferredLabel(); auto done = __ MakeLabel(MachineRepresentation::kTagged); - Node* check = __ WordEqual(value, __ TheHoleConstant()); + Node* check = __ TaggedEqual(value, __ TheHoleConstant()); __ GotoIf(check, &if_is_hole); __ Goto(&done, value); @@ -4372,12 +4379,12 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString( auto if_notthinstring = __ MakeLabel(); // Check if {exp} and {val} are the same, which is the likely case. - __ Branch(__ WordEqual(exp, val), &if_same, &if_notsame); + __ Branch(__ TaggedEqual(exp, val), &if_same, &if_notsame); __ Bind(&if_notsame); { // Now {val} could still be a non-internalized String that matches {exp}. - __ DeoptimizeIf(DeoptimizeReason::kWrongName, VectorSlotPair(), + __ DeoptimizeIf(DeoptimizeReason::kWrongName, FeedbackSource(), ObjectIsSmi(val), frame_state); Node* val_map = __ LoadField(AccessBuilder::ForMap(), val); Node* val_instance_type = @@ -4396,7 +4403,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString( // Check that the {val} is a non-internalized String, if it's anything // else it cannot match the recorded feedback {exp} anyways. __ DeoptimizeIfNot( - DeoptimizeReason::kWrongName, VectorSlotPair(), + DeoptimizeReason::kWrongName, FeedbackSource(), __ Word32Equal(__ Word32And(val_instance_type, __ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)), @@ -4419,8 +4426,8 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString( try_internalize_string_function, isolate_ptr, val); // Now see if the results match. - __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(), - __ WordEqual(exp, val_internalized), frame_state); + __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(), + __ TaggedEqual(exp, val_internalized), frame_state); __ Goto(&if_same); } @@ -4429,8 +4436,8 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString( // The {val} is a ThinString, let's check the actual value. Node* val_actual = __ LoadField(AccessBuilder::ForThinStringActual(), val); - __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(), - __ WordEqual(exp, val_actual), frame_state); + __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(), + __ TaggedEqual(exp, val_actual), frame_state); __ Goto(&if_same); } } @@ -4442,8 +4449,8 @@ void EffectControlLinearizer::LowerCheckEqualsSymbol(Node* node, Node* frame_state) { Node* exp = node->InputAt(0); Node* val = node->InputAt(1); - Node* check = __ WordEqual(exp, val); - __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(), check, + Node* check = __ TaggedEqual(exp, val); + __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(), check, frame_state); } @@ -4543,8 +4550,13 @@ Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) { } Node* EffectControlLinearizer::ObjectIsSmi(Node* value) { - return __ WordEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)), - __ IntPtrConstant(kSmiTag)); + return __ IntPtrEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)), + __ IntPtrConstant(kSmiTag)); +} + +Node* EffectControlLinearizer::CompressedObjectIsSmi(Node* value) { + return __ Word32Equal(__ Word32And(value, __ Int32Constant(kSmiTagMask)), + __ Int32Constant(kSmiTag)); } Node* EffectControlLinearizer::SmiMaxValueConstant() { @@ -4629,7 +4641,7 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) { Node* elements_map = __ LoadField(AccessBuilder::ForMap(), elements); // Check if {elements} is not a copy-on-write FixedArray. - Node* check = __ WordEqual(elements_map, __ FixedArrayMapConstant()); + Node* check = __ TaggedEqual(elements_map, __ FixedArrayMapConstant()); __ GotoIfNot(check, &if_not_fixed_array); // Nothing to do if the {elements} are not copy-on-write. __ Goto(&done, elements); @@ -4707,7 +4719,7 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) { Node* object_map = __ LoadField(AccessBuilder::ForMap(), object); // Check if {object_map} is the same as {source_map}. - Node* check = __ WordEqual(object_map, source_map); + Node* check = __ TaggedEqual(object_map, source_map); __ GotoIf(check, &if_map_same); __ Goto(&done); @@ -4749,7 +4761,7 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { auto done = __ MakeLabel(MachineRepresentation::kTagged); // Check if field is a mutable double field. - __ GotoIfNot(__ WordEqual(__ WordAnd(index, one), zero), &if_double); + __ GotoIfNot(__ IntPtrEqual(__ WordAnd(index, one), zero), &if_double); // The field is a proper Tagged field on {object}. The {index} is shifted // to the left by one in the code below. @@ -4772,8 +4784,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { // The {index} is equal to the negated out of property index plus 1. __ Bind(&if_outofobject); { - Node* properties = - __ LoadField(AccessBuilder::ForJSObjectPropertiesOrHash(), object); + Node* properties = __ LoadField( + AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), object); Node* offset = __ IntAdd(__ WordShl(__ IntSub(zero, index), __ IntPtrConstant(kTaggedSizeLog2 - 1)), @@ -4786,7 +4798,7 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { } // The field is a Double field, either unboxed in the object on 64-bit - // architectures, or as MutableHeapNumber. + // architectures, or a mutable HeapNumber. __ Bind(&if_double); { auto done_double = __ MakeLabel(MachineRepresentation::kFloat64); @@ -4815,8 +4827,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { __ Bind(&if_outofobject); { - Node* properties = - __ LoadField(AccessBuilder::ForJSObjectPropertiesOrHash(), object); + Node* properties = __ LoadField( + AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), object); Node* offset = __ IntAdd(__ WordShl(__ IntSub(zero, index), __ IntPtrConstant(kTaggedSizeLog2)), @@ -5123,7 +5135,7 @@ void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) { // without effecting a transition. Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); Node* heap_number_map = __ HeapNumberMapConstant(); - Node* check = __ WordEqual(value_map, heap_number_map); + Node* check = __ TaggedEqual(value_map, heap_number_map); __ GotoIfNot(check, &transition_double_to_fast); __ Goto(&do_store, kind); } @@ -5135,7 +5147,7 @@ void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) { auto if_value_not_heap_number = __ MakeLabel(); Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); Node* heap_number_map = __ HeapNumberMapConstant(); - Node* check = __ WordEqual(value_map, heap_number_map); + Node* check = __ TaggedEqual(value_map, heap_number_map); __ GotoIfNot(check, &if_value_not_heap_number); { // {value} is a HeapNumber. @@ -5478,9 +5490,10 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) { // Wrap the primitive {value} into a JSPrimitiveWrapper. __ Bind(&convert_to_object); - __ GotoIf(__ WordEqual(value, __ UndefinedConstant()), + __ GotoIf(__ TaggedEqual(value, __ UndefinedConstant()), + &convert_global_proxy); + __ GotoIf(__ TaggedEqual(value, __ NullConstant()), &convert_global_proxy); - __ GotoIf(__ WordEqual(value, __ NullConstant()), &convert_global_proxy); Operator::Properties properties = Operator::kEliminatable; Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject); CallDescriptor::Flags flags = CallDescriptor::kNoFlags; @@ -5891,7 +5904,7 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key( { Node* entry = loop.PhiAt(0); Node* check = - __ WordEqual(entry, __ IntPtrConstant(OrderedHashMap::kNotFound)); + __ IntPtrEqual(entry, __ IntPtrConstant(OrderedHashMap::kNotFound)); __ GotoIf(check, &done, entry); entry = __ IntAdd( __ IntMul(entry, __ IntPtrConstant(OrderedHashMap::kEntrySize)), @@ -5906,14 +5919,20 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key( auto if_match = __ MakeLabel(); auto if_notmatch = __ MakeLabel(); auto if_notsmi = __ MakeDeferredLabel(); - __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi); - __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match, - &if_notmatch); + if (COMPRESS_POINTERS_BOOL) { + __ GotoIfNot(CompressedObjectIsSmi(candidate_key), &if_notsmi); + __ Branch(__ Word32Equal(ChangeCompressedSmiToInt32(candidate_key), key), + &if_match, &if_notmatch); + } else { + __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi); + __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match, + &if_notmatch); + } __ Bind(&if_notsmi); __ GotoIfNot( - __ WordEqual(__ LoadField(AccessBuilder::ForMap(), candidate_key), - __ HeapNumberMapConstant()), + __ TaggedEqual(__ LoadField(AccessBuilder::ForMap(), candidate_key), + __ HeapNumberMapConstant()), &if_notmatch); __ Branch(__ Float64Equal(__ LoadField(AccessBuilder::ForHeapNumberValue(), candidate_key), diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc index aee0121384..b3f684ea61 100644 --- a/deps/v8/src/compiler/escape-analysis.cc +++ b/deps/v8/src/compiler/escape-analysis.cc @@ -736,10 +736,9 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current, current->Get(map_field).To(&map)) { if (map) { Type const map_type = NodeProperties::GetType(map); - AllowHandleDereference handle_dereference; if (map_type.IsHeapConstant() && params.maps().contains( - Handle<Map>::cast(map_type.AsHeapConstant()->Value()))) { + map_type.AsHeapConstant()->Ref().AsMap().object())) { current->MarkForDeletion(); break; } diff --git a/deps/v8/src/compiler/feedback-source.cc b/deps/v8/src/compiler/feedback-source.cc new file mode 100644 index 0000000000..8c3d175c28 --- /dev/null +++ b/deps/v8/src/compiler/feedback-source.cc @@ -0,0 +1,45 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/feedback-source.h" + +namespace v8 { +namespace internal { +namespace compiler { + +FeedbackSource::FeedbackSource(Handle<FeedbackVector> vector_, + FeedbackSlot slot_) + : vector(vector_), slot(slot_) { + DCHECK(!slot.IsInvalid()); +} + +FeedbackSource::FeedbackSource(FeedbackVectorRef vector_, FeedbackSlot slot_) + : FeedbackSource(vector_.object(), slot_) {} + +FeedbackSource::FeedbackSource(FeedbackNexus const& nexus) + : FeedbackSource(nexus.vector_handle(), nexus.slot()) {} + +int FeedbackSource::index() const { + CHECK(IsValid()); + return FeedbackVector::GetIndex(slot); +} + +bool operator==(FeedbackSource const& lhs, FeedbackSource const& rhs) { + return FeedbackSource::Equal()(lhs, rhs); +} + +bool operator!=(FeedbackSource const& lhs, FeedbackSource const& rhs) { + return !(lhs == rhs); +} + +std::ostream& operator<<(std::ostream& os, const FeedbackSource& p) { + if (p.IsValid()) { + return os << "FeedbackSource(" << p.slot << ")"; + } + return os << "FeedbackSource(INVALID)"; +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/feedback-source.h b/deps/v8/src/compiler/feedback-source.h new file mode 100644 index 0000000000..8484acb455 --- /dev/null +++ b/deps/v8/src/compiler/feedback-source.h @@ -0,0 +1,52 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_FEEDBACK_SOURCE_H_ +#define V8_COMPILER_FEEDBACK_SOURCE_H_ + +#include "src/compiler/heap-refs.h" +#include "src/objects/feedback-vector.h" + +namespace v8 { +namespace internal { +namespace compiler { + +struct FeedbackSource { + FeedbackSource() { DCHECK(!IsValid()); } + V8_EXPORT_PRIVATE FeedbackSource(Handle<FeedbackVector> vector_, + FeedbackSlot slot_); + FeedbackSource(FeedbackVectorRef vector_, FeedbackSlot slot_); + explicit FeedbackSource(FeedbackNexus const& nexus); + + bool IsValid() const { return !vector.is_null() && !slot.IsInvalid(); } + int index() const; + + Handle<FeedbackVector> vector; + FeedbackSlot slot; + + struct Hash { + size_t operator()(FeedbackSource const& source) const { + return base::hash_combine(source.vector.address(), source.slot); + } + }; + + struct Equal { + bool operator()(FeedbackSource const& lhs, + FeedbackSource const& rhs) const { + return lhs.vector.equals(rhs.vector) && lhs.slot == rhs.slot; + } + }; +}; + +bool operator==(FeedbackSource const&, FeedbackSource const&); +bool operator!=(FeedbackSource const&, FeedbackSource const&); + +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, + FeedbackSource const&); + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_FEEDBACK_SOURCE_H_ diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc index 5fbf11cdbc..9478c08c6c 100644 --- a/deps/v8/src/compiler/frame-states.cc +++ b/deps/v8/src/compiler/frame-states.cc @@ -106,28 +106,22 @@ Node* CreateBuiltinContinuationFrameStateCommon( Node* closure, Node* context, Node** parameters, int parameter_count, Node* outer_frame_state, Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>()) { - Isolate* const isolate = jsgraph->isolate(); Graph* const graph = jsgraph->graph(); CommonOperatorBuilder* const common = jsgraph->common(); - BailoutId bailout_id = Builtins::GetContinuationBailoutId(name); - Callable callable = Builtins::CallableFor(isolate, name); - const Operator* op_param = common->StateValues(parameter_count, SparseInputMask::Dense()); Node* params_node = graph->NewNode(op_param, parameter_count, parameters); + BailoutId bailout_id = Builtins::GetContinuationBailoutId(name); const FrameStateFunctionInfo* state_info = common->CreateFrameStateFunctionInfo(frame_type, parameter_count, 0, shared); const Operator* op = common->FrameState( bailout_id, OutputFrameStateCombine::Ignore(), state_info); - - Node* frame_state = graph->NewNode( - op, params_node, jsgraph->EmptyStateValues(), jsgraph->EmptyStateValues(), - context, closure, outer_frame_state); - - return frame_state; + return graph->NewNode(op, params_node, jsgraph->EmptyStateValues(), + jsgraph->EmptyStateValues(), context, closure, + outer_frame_state); } } // namespace @@ -136,8 +130,7 @@ Node* CreateStubBuiltinContinuationFrameState( JSGraph* jsgraph, Builtins::Name name, Node* context, Node* const* parameters, int parameter_count, Node* outer_frame_state, ContinuationFrameStateMode mode) { - Isolate* isolate = jsgraph->isolate(); - Callable callable = Builtins::CallableFor(isolate, name); + Callable callable = Builtins::CallableFor(jsgraph->isolate(), name); CallInterfaceDescriptor descriptor = callable.descriptor(); std::vector<Node*> actual_parameters; @@ -172,9 +165,6 @@ Node* CreateJavaScriptBuiltinContinuationFrameState( Node* target, Node* context, Node* const* stack_parameters, int stack_parameter_count, Node* outer_frame_state, ContinuationFrameStateMode mode) { - Isolate* const isolate = jsgraph->isolate(); - Callable const callable = Builtins::CallableFor(isolate, name); - // Depending on {mode}, final parameters are added by the deoptimizer // and aren't explicitly passed in the frame state. DCHECK_EQ(Builtins::GetStackParameterCount(name) + 1, // add receiver @@ -190,11 +180,13 @@ Node* CreateJavaScriptBuiltinContinuationFrameState( actual_parameters.push_back(stack_parameters[i]); } - // Register parameters follow stack paraemters. The context will be added by + Node* new_target = jsgraph->UndefinedConstant(); + + // Register parameters follow stack parameters. The context will be added by // instruction selector during FrameState translation. - actual_parameters.push_back(target); - actual_parameters.push_back(jsgraph->UndefinedConstant()); - actual_parameters.push_back(argc); + actual_parameters.push_back(target); // kJavaScriptCallTargetRegister + actual_parameters.push_back(new_target); // kJavaScriptCallNewTargetRegister + actual_parameters.push_back(argc); // kJavaScriptCallArgCountRegister return CreateBuiltinContinuationFrameStateCommon( jsgraph, diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc index 50f29d968b..b4ad81ecda 100644 --- a/deps/v8/src/compiler/graph-assembler.cc +++ b/deps/v8/src/compiler/graph-assembler.cc @@ -94,6 +94,14 @@ PURE_ASSEMBLER_MACH_BINOP_LIST(PURE_BINOP_DEF) CHECKED_ASSEMBLER_MACH_BINOP_LIST(CHECKED_BINOP_DEF) #undef CHECKED_BINOP_DEF +Node* GraphAssembler::IntPtrEqual(Node* left, Node* right) { + return WordEqual(left, right); +} + +Node* GraphAssembler::TaggedEqual(Node* left, Node* right) { + return WordEqual(left, right); +} + Node* GraphAssembler::Float64RoundDown(Node* value) { CHECK(machine()->Float64RoundDown().IsSupported()); return graph()->NewNode(machine()->Float64RoundDown().op(), value); @@ -237,7 +245,7 @@ Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) { } Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, Node* condition, Node* frame_state, IsSafetyCheck is_safety_check) { return current_control_ = current_effect_ = graph()->NewNode( @@ -247,7 +255,7 @@ Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, } Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, Node* condition, Node* frame_state, IsSafetyCheck is_safety_check) { return current_control_ = current_effect_ = graph()->NewNode( diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h index e2c0005d15..0088f867c5 100644 --- a/deps/v8/src/compiler/graph-assembler.h +++ b/deps/v8/src/compiler/graph-assembler.h @@ -5,10 +5,10 @@ #ifndef V8_COMPILER_GRAPH_ASSEMBLER_H_ #define V8_COMPILER_GRAPH_ASSEMBLER_H_ +#include "src/compiler/feedback-source.h" #include "src/compiler/js-graph.h" #include "src/compiler/node.h" #include "src/compiler/simplified-operator.h" -#include "src/compiler/vector-slot-pair.h" namespace v8 { namespace internal { @@ -224,6 +224,9 @@ class GraphAssembler { Node* Unreachable(); + Node* IntPtrEqual(Node* left, Node* right); + Node* TaggedEqual(Node* left, Node* right); + Node* Float64RoundDown(Node* value); Node* Float64RoundTruncate(Node* value); @@ -251,11 +254,11 @@ class GraphAssembler { Node* Word32PoisonOnSpeculation(Node* value); Node* DeoptimizeIf( - DeoptimizeReason reason, VectorSlotPair const& feedback, Node* condition, + DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition, Node* frame_state, IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck); Node* DeoptimizeIfNot( - DeoptimizeReason reason, VectorSlotPair const& feedback, Node* condition, + DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition, Node* frame_state, IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck); template <typename... Args> diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc index fea76bff81..99e9d5ffdb 100644 --- a/deps/v8/src/compiler/graph.cc +++ b/deps/v8/src/compiler/graph.cc @@ -68,9 +68,10 @@ Node* Graph::CloneNode(const Node* node) { NodeId Graph::NextNodeId() { - NodeId const id = next_node_id_; - CHECK(!base::bits::UnsignedAddOverflow32(id, 1, &next_node_id_)); - return id; + // A node's id is internally stored in a bit field using fewer bits than + // NodeId (see Node::IdField). Hence the addition below won't ever overflow. + DCHECK_LT(next_node_id_, std::numeric_limits<NodeId>::max()); + return next_node_id_++; } void Graph::Print() const { StdoutStream{} << AsRPO(*this); } diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h index 5547039fa6..9b1aa53eb9 100644 --- a/deps/v8/src/compiler/heap-refs.h +++ b/deps/v8/src/compiler/heap-refs.h @@ -27,7 +27,6 @@ class JSRegExp; class JSTypedArray; class NativeContext; class ScriptContextTable; -class VectorSlotPair; namespace compiler { @@ -35,6 +34,8 @@ namespace compiler { // For a store during literal creation, do not walk up the prototype chain. enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas }; +enum class SerializationPolicy { kAssumeSerialized, kSerializeIfNeeded }; + enum class OddballType : uint8_t { kNone, // Not an Oddball. kBoolean, // True or False. @@ -53,6 +54,7 @@ enum class OddballType : uint8_t { V(JSBoundFunction) \ V(JSDataView) \ V(JSFunction) \ + V(JSGlobalObject) \ V(JSGlobalProxy) \ V(JSRegExp) \ V(JSTypedArray) \ @@ -70,8 +72,12 @@ enum class OddballType : uint8_t { V(InternalizedString) \ V(String) \ V(Symbol) \ + /* Subtypes of JSReceiver */ \ + V(JSObject) \ /* Subtypes of HeapObject */ \ + V(AccessorInfo) \ V(AllocationSite) \ + V(ArrayBoilerplateDescription) \ V(BigInt) \ V(CallHandlerInfo) \ V(Cell) \ @@ -82,10 +88,10 @@ enum class OddballType : uint8_t { V(FixedArrayBase) \ V(FunctionTemplateInfo) \ V(HeapNumber) \ - V(JSObject) \ + V(JSReceiver) \ V(Map) \ - V(MutableHeapNumber) \ V(Name) \ + V(ObjectBoilerplateDescription) \ V(PropertyCell) \ V(SharedFunctionInfo) \ V(SourceTextModule) \ @@ -103,8 +109,9 @@ HEAP_BROKER_OBJECT_LIST(FORWARD_DECL) class V8_EXPORT_PRIVATE ObjectRef { public: - ObjectRef(JSHeapBroker* broker, Handle<Object> object); - ObjectRef(JSHeapBroker* broker, ObjectData* data) + ObjectRef(JSHeapBroker* broker, Handle<Object> object, + bool check_type = true); + ObjectRef(JSHeapBroker* broker, ObjectData* data, bool check_type = true) : data_(data), broker_(broker) { CHECK_NOT_NULL(data_); } @@ -131,8 +138,9 @@ class V8_EXPORT_PRIVATE ObjectRef { // Return the element at key {index} if {index} is known to be an own data // property of the object that is non-writable and non-configurable. - base::Optional<ObjectRef> GetOwnConstantElement(uint32_t index, - bool serialize = false) const; + base::Optional<ObjectRef> GetOwnConstantElement( + uint32_t index, SerializationPolicy policy = + SerializationPolicy::kAssumeSerialized) const; Isolate* isolate() const; @@ -157,6 +165,7 @@ class V8_EXPORT_PRIVATE ObjectRef { friend class JSArrayData; friend class JSGlobalProxyRef; friend class JSGlobalProxyData; + friend class JSHeapBroker; friend class JSObjectData; friend class StringData; @@ -200,9 +209,27 @@ class HeapObjectType { Flags const flags_; }; +// Constructors are carefully defined such that we do a type check on +// the outermost Ref class in the inheritance chain only. +#define DEFINE_REF_CONSTRUCTOR(name, base) \ + name##Ref(JSHeapBroker* broker, Handle<Object> object, \ + bool check_type = true) \ + : base(broker, object, false) { \ + if (check_type) { \ + CHECK(Is##name()); \ + } \ + } \ + name##Ref(JSHeapBroker* broker, ObjectData* data, bool check_type = true) \ + : base(broker, data, false) { \ + if (check_type) { \ + CHECK(Is##name()); \ + } \ + } + class HeapObjectRef : public ObjectRef { public: - using ObjectRef::ObjectRef; + DEFINE_REF_CONSTRUCTOR(HeapObject, ObjectRef) + Handle<HeapObject> object() const; MapRef map() const; @@ -213,7 +240,8 @@ class HeapObjectRef : public ObjectRef { class PropertyCellRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(PropertyCell, HeapObjectRef) + Handle<PropertyCell> object() const; PropertyDetails property_details() const; @@ -222,9 +250,17 @@ class PropertyCellRef : public HeapObjectRef { ObjectRef value() const; }; -class JSObjectRef : public HeapObjectRef { +class JSReceiverRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(JSReceiver, HeapObjectRef) + + Handle<JSReceiver> object() const; +}; + +class JSObjectRef : public JSReceiverRef { + public: + DEFINE_REF_CONSTRUCTOR(JSObject, JSReceiverRef) + Handle<JSObject> object() const; uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const; @@ -233,10 +269,10 @@ class JSObjectRef : public HeapObjectRef { // Return the value of the property identified by the field {index} // if {index} is known to be an own data property of the object. - base::Optional<ObjectRef> GetOwnProperty(Representation field_representation, - FieldIndex index, - bool serialize = false) const; - + base::Optional<ObjectRef> GetOwnDataProperty( + Representation field_representation, FieldIndex index, + SerializationPolicy policy = + SerializationPolicy::kAssumeSerialized) const; FixedArrayBaseRef elements() const; void SerializeElements(); void EnsureElementsTenured(); @@ -248,7 +284,8 @@ class JSObjectRef : public HeapObjectRef { class JSDataViewRef : public JSObjectRef { public: - using JSObjectRef::JSObjectRef; + DEFINE_REF_CONSTRUCTOR(JSDataView, JSObjectRef) + Handle<JSDataView> object() const; size_t byte_length() const; @@ -257,20 +294,23 @@ class JSDataViewRef : public JSObjectRef { class JSBoundFunctionRef : public JSObjectRef { public: - using JSObjectRef::JSObjectRef; + DEFINE_REF_CONSTRUCTOR(JSBoundFunction, JSObjectRef) + Handle<JSBoundFunction> object() const; void Serialize(); + bool serialized() const; // The following are available only after calling Serialize(). - ObjectRef bound_target_function() const; + JSReceiverRef bound_target_function() const; ObjectRef bound_this() const; FixedArrayRef bound_arguments() const; }; class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef { public: - using JSObjectRef::JSObjectRef; + DEFINE_REF_CONSTRUCTOR(JSFunction, JSObjectRef) + Handle<JSFunction> object() const; bool has_feedback_vector() const; @@ -295,7 +335,8 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef { class JSRegExpRef : public JSObjectRef { public: - using JSObjectRef::JSObjectRef; + DEFINE_REF_CONSTRUCTOR(JSRegExp, JSObjectRef) + Handle<JSRegExp> object() const; ObjectRef raw_properties_or_hash() const; @@ -307,33 +348,31 @@ class JSRegExpRef : public JSObjectRef { class HeapNumberRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; - Handle<HeapNumber> object() const; + DEFINE_REF_CONSTRUCTOR(HeapNumber, HeapObjectRef) - double value() const; -}; - -class MutableHeapNumberRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle<MutableHeapNumber> object() const; + Handle<HeapNumber> object() const; double value() const; }; class ContextRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(Context, HeapObjectRef) + Handle<Context> object() const; // {previous} decrements {depth} by 1 for each previous link successfully // followed. If {depth} != 0 on function return, then it only got // partway to the desired depth. If {serialize} is true, then // {previous} will cache its findings. - ContextRef previous(size_t* depth, bool serialize = false) const; + ContextRef previous(size_t* depth, + SerializationPolicy policy = + SerializationPolicy::kAssumeSerialized) const; // Only returns a value if the index is valid for this ContextRef. - base::Optional<ObjectRef> get(int index, bool serialize = false) const; + base::Optional<ObjectRef> get( + int index, SerializationPolicy policy = + SerializationPolicy::kAssumeSerialized) const; // We only serialize the ScopeInfo if certain Promise // builtins are called. @@ -351,6 +390,7 @@ class ContextRef : public HeapObjectRef { V(JSFunction, promise_then) \ V(JSFunction, string_function) \ V(JSFunction, symbol_function) \ + V(JSGlobalObject, global_object) \ V(JSGlobalProxy, global_proxy_object) \ V(JSObject, promise_prototype) \ V(Map, bound_function_with_constructor_map) \ @@ -391,7 +431,8 @@ class ContextRef : public HeapObjectRef { class NativeContextRef : public ContextRef { public: - using ContextRef::ContextRef; + DEFINE_REF_CONSTRUCTOR(NativeContext, ContextRef) + Handle<NativeContext> object() const; void Serialize(); @@ -408,7 +449,8 @@ class NativeContextRef : public ContextRef { class NameRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(Name, HeapObjectRef) + Handle<Name> object() const; bool IsUniqueName() const; @@ -416,7 +458,8 @@ class NameRef : public HeapObjectRef { class ScriptContextTableRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(ScriptContextTable, HeapObjectRef) + Handle<ScriptContextTable> object() const; struct LookupResult { @@ -430,13 +473,15 @@ class ScriptContextTableRef : public HeapObjectRef { class DescriptorArrayRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(DescriptorArray, HeapObjectRef) + Handle<DescriptorArray> object() const; }; class FeedbackCellRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(FeedbackCell, HeapObjectRef) + Handle<FeedbackCell> object() const; HeapObjectRef value() const; @@ -444,17 +489,21 @@ class FeedbackCellRef : public HeapObjectRef { class FeedbackVectorRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(FeedbackVector, HeapObjectRef) + Handle<FeedbackVector> object() const; - ObjectRef get(FeedbackSlot slot) const; + double invocation_count() const; - void SerializeSlots(); + void Serialize(); + ObjectRef get(FeedbackSlot slot) const; + FeedbackCellRef GetClosureFeedbackCell(int index) const; }; class CallHandlerInfoRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(CallHandlerInfo, HeapObjectRef) + Handle<CallHandlerInfo> object() const; Address callback() const; @@ -463,9 +512,17 @@ class CallHandlerInfoRef : public HeapObjectRef { ObjectRef data() const; }; +class AccessorInfoRef : public HeapObjectRef { + public: + DEFINE_REF_CONSTRUCTOR(AccessorInfo, HeapObjectRef) + + Handle<AccessorInfo> object() const; +}; + class AllocationSiteRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(AllocationSite, HeapObjectRef) + Handle<AllocationSite> object() const; bool PointsToLiteral() const; @@ -487,7 +544,8 @@ class AllocationSiteRef : public HeapObjectRef { class BigIntRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(BigInt, HeapObjectRef) + Handle<BigInt> object() const; uint64_t AsUint64() const; @@ -495,7 +553,8 @@ class BigIntRef : public HeapObjectRef { class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(Map, HeapObjectRef) + Handle<Map> object() const; int instance_size() const; @@ -526,7 +585,8 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { bool is_migration_target() const; bool supports_fast_array_iteration() const; bool supports_fast_array_resize() const; - bool IsMapOfCurrentGlobalProxy() const; + bool IsMapOfTargetGlobalProxy() const; + bool is_abandoned_prototype_map() const; OddballType oddball_type() const; @@ -550,12 +610,17 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { // Concerning the underlying instance_descriptors: void SerializeOwnDescriptors(); void SerializeOwnDescriptor(int descriptor_index); + bool serialized_own_descriptor(int descriptor_index) const; MapRef FindFieldOwner(int descriptor_index) const; PropertyDetails GetPropertyDetails(int descriptor_index) const; NameRef GetPropertyKey(int descriptor_index) const; FieldIndex GetFieldIndexFor(int descriptor_index) const; ObjectRef GetFieldType(int descriptor_index) const; bool IsUnboxedDoubleField(int descriptor_index) const; + ObjectRef GetStrongValue(int descriptor_number) const; + + void SerializeRootMap(); + base::Optional<MapRef> FindRootMap() const; // Available after calling JSFunctionRef::Serialize on a function that has // this map as initial map. @@ -574,7 +639,8 @@ struct HolderLookupResult { class FunctionTemplateInfoRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(FunctionTemplateInfo, HeapObjectRef) + Handle<FunctionTemplateInfo> object() const; bool is_signature_undefined() const; @@ -585,21 +651,40 @@ class FunctionTemplateInfoRef : public HeapObjectRef { void SerializeCallCode(); base::Optional<CallHandlerInfoRef> call_code() const; - HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map, - bool serialize); + HolderLookupResult LookupHolderOfExpectedType( + MapRef receiver_map, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); }; class FixedArrayBaseRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(FixedArrayBase, HeapObjectRef) + Handle<FixedArrayBase> object() const; int length() const; }; +class ArrayBoilerplateDescriptionRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle<ArrayBoilerplateDescription> object() const; + + int constants_elements_length() const; +}; + +class ObjectBoilerplateDescriptionRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle<ObjectBoilerplateDescription> object() const; + + int size() const; +}; + class FixedArrayRef : public FixedArrayBaseRef { public: - using FixedArrayBaseRef::FixedArrayBaseRef; + DEFINE_REF_CONSTRUCTOR(FixedArray, FixedArrayBaseRef) + Handle<FixedArray> object() const; ObjectRef get(int i) const; @@ -607,7 +692,8 @@ class FixedArrayRef : public FixedArrayBaseRef { class FixedDoubleArrayRef : public FixedArrayBaseRef { public: - using FixedArrayBaseRef::FixedArrayBaseRef; + DEFINE_REF_CONSTRUCTOR(FixedDoubleArray, FixedArrayBaseRef) + Handle<FixedDoubleArray> object() const; double get_scalar(int i) const; @@ -616,7 +702,8 @@ class FixedDoubleArrayRef : public FixedArrayBaseRef { class BytecodeArrayRef : public FixedArrayBaseRef { public: - using FixedArrayBaseRef::FixedArrayBaseRef; + DEFINE_REF_CONSTRUCTOR(BytecodeArray, FixedArrayBaseRef) + Handle<BytecodeArray> object() const; int register_count() const; @@ -646,20 +733,23 @@ class BytecodeArrayRef : public FixedArrayBaseRef { class JSArrayRef : public JSObjectRef { public: - using JSObjectRef::JSObjectRef; + DEFINE_REF_CONSTRUCTOR(JSArray, JSObjectRef) + Handle<JSArray> object() const; ObjectRef length() const; // Return the element at key {index} if the array has a copy-on-write elements // storage and {index} is known to be an own data property. - base::Optional<ObjectRef> GetOwnCowElement(uint32_t index, - bool serialize = false) const; + base::Optional<ObjectRef> GetOwnCowElement( + uint32_t index, SerializationPolicy policy = + SerializationPolicy::kAssumeSerialized) const; }; class ScopeInfoRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(ScopeInfo, HeapObjectRef) + Handle<ScopeInfo> object() const; int ContextLength() const; @@ -683,7 +773,8 @@ class ScopeInfoRef : public HeapObjectRef { class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(SharedFunctionInfo, HeapObjectRef) + Handle<SharedFunctionInfo> object() const; int builtin_id() const; @@ -699,8 +790,9 @@ class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef { // Template objects may not be created at compilation time. This method // wraps the retrieval of the template object and creates it if // necessary. - JSArrayRef GetTemplateObject(ObjectRef description, FeedbackVectorRef vector, - FeedbackSlot slot, bool serialize = false); + JSArrayRef GetTemplateObject( + ObjectRef description, FeedbackVectorRef vector, FeedbackSlot slot, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); void SerializeFunctionTemplateInfo(); base::Optional<FunctionTemplateInfoRef> function_template_info() const; @@ -708,7 +800,8 @@ class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef { class StringRef : public NameRef { public: - using NameRef::NameRef; + DEFINE_REF_CONSTRUCTOR(String, NameRef) + Handle<String> object() const; int length() const; @@ -720,13 +813,15 @@ class StringRef : public NameRef { class SymbolRef : public NameRef { public: - using NameRef::NameRef; + DEFINE_REF_CONSTRUCTOR(Symbol, NameRef) + Handle<Symbol> object() const; }; class JSTypedArrayRef : public JSObjectRef { public: - using JSObjectRef::JSObjectRef; + DEFINE_REF_CONSTRUCTOR(JSTypedArray, JSObjectRef) + Handle<JSTypedArray> object() const; bool is_on_heap() const; @@ -741,25 +836,35 @@ class JSTypedArrayRef : public JSObjectRef { class SourceTextModuleRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(SourceTextModule, HeapObjectRef) + Handle<SourceTextModule> object() const; void Serialize(); - CellRef GetCell(int cell_index) const; + base::Optional<CellRef> GetCell(int cell_index) const; }; class CellRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(Cell, HeapObjectRef) + Handle<Cell> object() const; ObjectRef value() const; }; +class JSGlobalObjectRef : public JSObjectRef { + public: + DEFINE_REF_CONSTRUCTOR(JSGlobalObject, JSObjectRef) + + Handle<JSGlobalObject> object() const; +}; + class JSGlobalProxyRef : public JSObjectRef { public: - using JSObjectRef::JSObjectRef; + DEFINE_REF_CONSTRUCTOR(JSGlobalProxy, JSObjectRef) + Handle<JSGlobalProxy> object() const; // If {serialize} is false: @@ -769,135 +874,26 @@ class JSGlobalProxyRef : public JSObjectRef { // If {serialize} is true: // Like above but potentially access the heap and serialize the necessary // information. - base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name, - bool serialize = false) const; + base::Optional<PropertyCellRef> GetPropertyCell( + NameRef const& name, SerializationPolicy policy = + SerializationPolicy::kAssumeSerialized) const; }; class CodeRef : public HeapObjectRef { public: - using HeapObjectRef::HeapObjectRef; + DEFINE_REF_CONSTRUCTOR(Code, HeapObjectRef) + Handle<Code> object() const; }; class InternalizedStringRef : public StringRef { public: - using StringRef::StringRef; - Handle<InternalizedString> object() const; -}; - -class ElementAccessFeedback; -class NamedAccessFeedback; - -class ProcessedFeedback : public ZoneObject { - public: - enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess }; - Kind kind() const { return kind_; } - - ElementAccessFeedback const* AsElementAccess() const; - NamedAccessFeedback const* AsNamedAccess() const; - - protected: - explicit ProcessedFeedback(Kind kind) : kind_(kind) {} - - private: - Kind const kind_; -}; + DEFINE_REF_CONSTRUCTOR(InternalizedString, StringRef) -class InsufficientFeedback final : public ProcessedFeedback { - public: - InsufficientFeedback(); -}; - -class GlobalAccessFeedback : public ProcessedFeedback { - public: - explicit GlobalAccessFeedback(PropertyCellRef cell); - GlobalAccessFeedback(ContextRef script_context, int slot_index, - bool immutable); - - bool IsPropertyCell() const; - PropertyCellRef property_cell() const; - - bool IsScriptContextSlot() const { return !IsPropertyCell(); } - ContextRef script_context() const; - int slot_index() const; - bool immutable() const; - - base::Optional<ObjectRef> GetConstantHint() const; - - private: - ObjectRef const cell_or_context_; - int const index_and_immutable_; -}; - -class KeyedAccessMode { - public: - static KeyedAccessMode FromNexus(FeedbackNexus const& nexus); - - AccessMode access_mode() const; - bool IsLoad() const; - bool IsStore() const; - KeyedAccessLoadMode load_mode() const; - KeyedAccessStoreMode store_mode() const; - - private: - AccessMode const access_mode_; - union LoadStoreMode { - LoadStoreMode(KeyedAccessLoadMode load_mode); - LoadStoreMode(KeyedAccessStoreMode store_mode); - KeyedAccessLoadMode load_mode; - KeyedAccessStoreMode store_mode; - } const load_store_mode_; - - KeyedAccessMode(AccessMode access_mode, KeyedAccessLoadMode load_mode); - KeyedAccessMode(AccessMode access_mode, KeyedAccessStoreMode store_mode); -}; - -class ElementAccessFeedback : public ProcessedFeedback { - public: - ElementAccessFeedback(Zone* zone, KeyedAccessMode const& keyed_mode); - - // No transition sources appear in {receiver_maps}. - // All transition targets appear in {receiver_maps}. - ZoneVector<Handle<Map>> receiver_maps; - ZoneVector<std::pair<Handle<Map>, Handle<Map>>> transitions; - - KeyedAccessMode const keyed_mode; - - class MapIterator { - public: - bool done() const; - void advance(); - MapRef current() const; - - private: - friend class ElementAccessFeedback; - - explicit MapIterator(ElementAccessFeedback const& processed, - JSHeapBroker* broker); - - ElementAccessFeedback const& processed_; - JSHeapBroker* const broker_; - size_t index_ = 0; - }; - - // Iterator over all maps: first {receiver_maps}, then transition sources. - MapIterator all_maps(JSHeapBroker* broker) const; + Handle<InternalizedString> object() const; }; -class NamedAccessFeedback : public ProcessedFeedback { - public: - NamedAccessFeedback(NameRef const& name, - ZoneVector<PropertyAccessInfo> const& access_infos); - - NameRef const& name() const { return name_; } - ZoneVector<PropertyAccessInfo> const& access_infos() const { - return access_infos_; - } - - private: - NameRef const name_; - ZoneVector<PropertyAccessInfo> const access_infos_; -}; +#undef DEFINE_REF_CONSTRUCTOR } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc index eda866e5f2..45b49757fb 100644 --- a/deps/v8/src/compiler/int64-lowering.cc +++ b/deps/v8/src/compiler/int64-lowering.cc @@ -21,9 +21,11 @@ namespace v8 { namespace internal { namespace compiler { -Int64Lowering::Int64Lowering(Graph* graph, MachineOperatorBuilder* machine, - CommonOperatorBuilder* common, Zone* zone, - Signature<MachineRepresentation>* signature) +Int64Lowering::Int64Lowering( + Graph* graph, MachineOperatorBuilder* machine, + CommonOperatorBuilder* common, Zone* zone, + Signature<MachineRepresentation>* signature, + std::unique_ptr<Int64LoweringSpecialCase> special_case) : zone_(zone), graph_(graph), machine_(machine), @@ -32,8 +34,9 @@ Int64Lowering::Int64Lowering(Graph* graph, MachineOperatorBuilder* machine, stack_(zone), replacements_(nullptr), signature_(signature), - placeholder_(graph->NewNode(common->Parameter(-2, "placeholder"), - graph->start())) { + placeholder_( + graph->NewNode(common->Parameter(-2, "placeholder"), graph->start())), + special_case_(std::move(special_case)) { DCHECK_NOT_NULL(graph); DCHECK_NOT_NULL(graph->end()); replacements_ = zone->NewArray<Replacement>(graph->NodeCount()); @@ -77,7 +80,7 @@ void Int64Lowering::LowerGraph() { namespace { -int GetReturnIndexAfterLowering(CallDescriptor* call_descriptor, +int GetReturnIndexAfterLowering(const CallDescriptor* call_descriptor, int old_index) { int result = old_index; for (int i = 0; i < old_index; i++) { @@ -89,7 +92,7 @@ int GetReturnIndexAfterLowering(CallDescriptor* call_descriptor, return result; } -int GetReturnCountAfterLowering(CallDescriptor* call_descriptor) { +int GetReturnCountAfterLowering(const CallDescriptor* call_descriptor) { return GetReturnIndexAfterLowering( call_descriptor, static_cast<int>(call_descriptor->ReturnCount())); } @@ -336,21 +339,21 @@ void Int64Lowering::LowerNode(Node* node) { if (DefaultLowering(node) || returns_require_lowering) { // Tail calls do not have return values, so adjusting the call // descriptor is enough. - auto new_descriptor = GetI32WasmCallDescriptor(zone(), call_descriptor); - NodeProperties::ChangeOp(node, common()->TailCall(new_descriptor)); + NodeProperties::ChangeOp( + node, common()->TailCall(LowerCallDescriptor(call_descriptor))); } break; } case IrOpcode::kCall: { - auto call_descriptor = - const_cast<CallDescriptor*>(CallDescriptorOf(node->op())); + auto call_descriptor = CallDescriptorOf(node->op()); + bool returns_require_lowering = GetReturnCountAfterLowering(call_descriptor) != static_cast<int>(call_descriptor->ReturnCount()); if (DefaultLowering(node) || returns_require_lowering) { // We have to adjust the call descriptor. - NodeProperties::ChangeOp(node, common()->Call(GetI32WasmCallDescriptor( - zone(), call_descriptor))); + NodeProperties::ChangeOp( + node, common()->Call(LowerCallDescriptor(call_descriptor))); } if (returns_require_lowering) { size_t return_arity = call_descriptor->ReturnCount(); @@ -994,6 +997,19 @@ bool Int64Lowering::DefaultLowering(Node* node, bool low_word_only) { return something_changed; } +CallDescriptor* Int64Lowering::LowerCallDescriptor( + const CallDescriptor* call_descriptor) { + if (special_case_) { + if (call_descriptor == special_case_->bigint_to_i64_call_descriptor) { + return special_case_->bigint_to_i32_pair_call_descriptor; + } + if (call_descriptor == special_case_->i64_to_bigint_call_descriptor) { + return special_case_->i32_pair_to_bigint_call_descriptor; + } + } + return GetI32WasmCallDescriptor(zone(), call_descriptor); +} + void Int64Lowering::ReplaceNode(Node* old, Node* new_low, Node* new_high) { // if new_low == nullptr, then also new_high == nullptr. DCHECK(new_low != nullptr || new_high == nullptr); diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h index 9c77cf41a3..1e2a36089b 100644 --- a/deps/v8/src/compiler/int64-lowering.h +++ b/deps/v8/src/compiler/int64-lowering.h @@ -20,11 +20,30 @@ class Signature; namespace compiler { +// Struct for CallDescriptors that need special lowering. +struct V8_EXPORT_PRIVATE Int64LoweringSpecialCase { + Int64LoweringSpecialCase() + : bigint_to_i64_call_descriptor(nullptr), + i64_to_bigint_call_descriptor(nullptr), + bigint_to_i32_pair_call_descriptor(nullptr), + i32_pair_to_bigint_call_descriptor(nullptr) {} + + // CallDescriptors that need special lowering. + CallDescriptor* bigint_to_i64_call_descriptor; + CallDescriptor* i64_to_bigint_call_descriptor; + + // The replacement CallDescriptors. + CallDescriptor* bigint_to_i32_pair_call_descriptor; + CallDescriptor* i32_pair_to_bigint_call_descriptor; +}; + class V8_EXPORT_PRIVATE Int64Lowering { public: - Int64Lowering(Graph* graph, MachineOperatorBuilder* machine, - CommonOperatorBuilder* common, Zone* zone, - Signature<MachineRepresentation>* signature); + Int64Lowering( + Graph* graph, MachineOperatorBuilder* machine, + CommonOperatorBuilder* common, Zone* zone, + Signature<MachineRepresentation>* signature, + std::unique_ptr<Int64LoweringSpecialCase> special_case = nullptr); void LowerGraph(); @@ -53,6 +72,8 @@ class V8_EXPORT_PRIVATE Int64Lowering { void LowerWord64AtomicBinop(Node* node, const Operator* op); void LowerWord64AtomicNarrowOp(Node* node, const Operator* op); + CallDescriptor* LowerCallDescriptor(const CallDescriptor* call_descriptor); + void ReplaceNode(Node* old, Node* new_low, Node* new_high); bool HasReplacementLow(Node* node); Node* GetReplacementLow(Node* node); @@ -77,6 +98,7 @@ class V8_EXPORT_PRIVATE Int64Lowering { Replacement* replacements_; Signature<MachineRepresentation>* signature_; Node* placeholder_; + std::unique_ptr<Int64LoweringSpecialCase> special_case_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc index 8128f89949..0b7b4a65f4 100644 --- a/deps/v8/src/compiler/js-call-reducer.cc +++ b/deps/v8/src/compiler/js-call-reducer.cc @@ -14,6 +14,7 @@ #include "src/compiler/access-info.h" #include "src/compiler/allocation-builder.h" #include "src/compiler/compilation-dependencies.h" +#include "src/compiler/feedback-source.h" #include "src/compiler/js-graph.h" #include "src/compiler/linkage.h" #include "src/compiler/map-inference.h" @@ -21,7 +22,6 @@ #include "src/compiler/property-access-builder.h" #include "src/compiler/simplified-operator.h" #include "src/compiler/type-cache.h" -#include "src/compiler/vector-slot-pair.h" #include "src/ic/call-optimization.h" #include "src/logging/counters.h" #include "src/objects/arguments-inl.h" @@ -179,101 +179,9 @@ Reduction JSCallReducer::ReduceMathMinMax(Node* node, const Operator* op, return Replace(value); } -// ES section #sec-math.hypot Math.hypot ( value1, value2, ...values ) -Reduction JSCallReducer::ReduceMathHypot(Node* node) { - CallParameters const& p = CallParametersOf(node->op()); - if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { - return NoChange(); - } - if (node->op()->ValueInputCount() < 3) { - Node* value = jsgraph()->ZeroConstant(); - ReplaceWithValue(node, value); - return Replace(value); - } - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); - NodeVector values(graph()->zone()); - - Node* max = effect = - graph()->NewNode(simplified()->SpeculativeToNumber( - NumberOperationHint::kNumberOrOddball, p.feedback()), - NodeProperties::GetValueInput(node, 2), effect, control); - max = graph()->NewNode(simplified()->NumberAbs(), max); - values.push_back(max); - for (int i = 3; i < node->op()->ValueInputCount(); ++i) { - Node* input = effect = graph()->NewNode( - simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball, - p.feedback()), - NodeProperties::GetValueInput(node, i), effect, control); - input = graph()->NewNode(simplified()->NumberAbs(), input); - values.push_back(input); - - // Make sure {max} is NaN in the end in case any argument was NaN. - max = graph()->NewNode( - common()->Select(MachineRepresentation::kTagged), - graph()->NewNode(simplified()->NumberLessThanOrEqual(), input, max), - max, input); - } - - Node* check0 = graph()->NewNode(simplified()->NumberEqual(), max, - jsgraph()->ZeroConstant()); - Node* branch0 = - graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control); - - Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0); - Node* vtrue0 = jsgraph()->ZeroConstant(); - - Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0); - Node* vfalse0; - { - Node* check1 = graph()->NewNode(simplified()->NumberEqual(), max, - jsgraph()->Constant(V8_INFINITY)); - Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse), - check1, if_false0); - - Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1); - Node* vtrue1 = jsgraph()->Constant(V8_INFINITY); - - Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1); - Node* vfalse1; - { - // Kahan summation to avoid rounding errors. - // Normalize the numbers to the largest one to avoid overflow. - Node* sum = jsgraph()->ZeroConstant(); - Node* compensation = jsgraph()->ZeroConstant(); - for (Node* value : values) { - Node* n = graph()->NewNode(simplified()->NumberDivide(), value, max); - Node* summand = graph()->NewNode( - simplified()->NumberSubtract(), - graph()->NewNode(simplified()->NumberMultiply(), n, n), - compensation); - Node* preliminary = - graph()->NewNode(simplified()->NumberAdd(), sum, summand); - compensation = graph()->NewNode( - simplified()->NumberSubtract(), - graph()->NewNode(simplified()->NumberSubtract(), preliminary, sum), - summand); - sum = preliminary; - } - vfalse1 = graph()->NewNode( - simplified()->NumberMultiply(), - graph()->NewNode(simplified()->NumberSqrt(), sum), max); - } - - if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1); - vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), - vtrue1, vfalse1, if_false0); - } - - control = graph()->NewNode(common()->Merge(2), if_true0, if_false0); - Node* value = - graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0, - vfalse0, control); - ReplaceWithValue(node, value, effect, control); - return Replace(value); -} - Reduction JSCallReducer::Reduce(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + switch (node->opcode()) { case IrOpcode::kJSConstruct: return ReduceJSConstruct(node); @@ -313,6 +221,8 @@ void JSCallReducer::Finalize() { // ES6 section 22.1.1 The Array Constructor Reduction JSCallReducer::ReduceArrayConstructor(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* target = NodeProperties::GetValueInput(node, 0); CallParameters const& p = CallParametersOf(node->op()); @@ -480,14 +390,11 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) { // TODO(mslekova): Since this introduces a Call that will get optimized by // the JSCallReducer, we basically might have to do all the serialization // that we do for that here as well. The only difference is that here we - // disable speculation (cf. the empty VectorSlotPair above), causing the + // disable speculation (cf. the empty FeedbackSource above), causing the // JSCallReducer to do much less work. We should revisit this later. NodeProperties::ChangeOp( node, - javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode)); - // TODO(mslekova): Remove once ReduceJSCall is brokerized. - AllowHandleDereference allow_handle_dereference; - AllowHandleAllocation allow_handle_allocation; + javascript()->Call(arity, p.frequency(), FeedbackSource(), convert_mode)); // Try to further reduce the JSCall {node}. Reduction const reduction = ReduceJSCall(node); return reduction.Changed() ? reduction : Changed(node); @@ -495,6 +402,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) { // ES section #sec-function.prototype.bind Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { @@ -506,7 +415,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { // - target, which is Function.prototype.bind JSFunction // - receiver, which is the [[BoundTargetFunction]] // - bound_this (optional), which is the [[BoundThis]] - // - and all the remaining value inouts are [[BoundArguments]] + // - and all the remaining value inputs are [[BoundArguments]] Node* receiver = NodeProperties::GetValueInput(node, 1); Node* bound_this = (node->op()->ValueInputCount() < 3) ? jsgraph()->UndefinedConstant() @@ -525,14 +434,24 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { MapRef first_receiver_map(broker(), receiver_maps[0]); bool const is_constructor = first_receiver_map.is_constructor(); - first_receiver_map.SerializePrototype(); + + if (FLAG_concurrent_inlining && !first_receiver_map.serialized_prototype()) { + TRACE_BROKER_MISSING(broker(), + "serialized prototype on map " << first_receiver_map); + return inference.NoChange(); + } ObjectRef const prototype = first_receiver_map.prototype(); for (Handle<Map> const map : receiver_maps) { MapRef receiver_map(broker(), map); + if (FLAG_concurrent_inlining && !receiver_map.serialized_prototype()) { + TRACE_BROKER_MISSING(broker(), + "serialized prototype on map " << receiver_map); + return inference.NoChange(); + } + // Check for consistency among the {receiver_maps}. STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE); - receiver_map.SerializePrototype(); if (!receiver_map.prototype().equals(prototype) || receiver_map.is_constructor() != is_constructor || receiver_map.instance_type() < FIRST_FUNCTION_TYPE) { @@ -548,22 +467,31 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { // recomputed even if the actual value of the object changes. // This mirrors the checks done in builtins-function-gen.cc at // runtime otherwise. - Handle<DescriptorArray> descriptors( - receiver_map.object()->instance_descriptors(), isolate()); - if (descriptors->number_of_descriptors() < 2) return inference.NoChange(); - if (descriptors->GetKey(JSFunction::kLengthDescriptorIndex) != - ReadOnlyRoots(isolate()).length_string()) { - return inference.NoChange(); - } - if (!descriptors->GetStrongValue(JSFunction::kLengthDescriptorIndex) - .IsAccessorInfo()) { + int minimum_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex, + JSFunction::kNameDescriptorIndex) + + 1; + if (receiver_map.NumberOfOwnDescriptors() < minimum_nof_descriptors) { return inference.NoChange(); } - if (descriptors->GetKey(JSFunction::kNameDescriptorIndex) != - ReadOnlyRoots(isolate()).name_string()) { + if (!receiver_map.serialized_own_descriptor( + JSFunction::kLengthDescriptorIndex) || + !receiver_map.serialized_own_descriptor( + JSFunction::kNameDescriptorIndex)) { + TRACE_BROKER_MISSING(broker(), + "serialized descriptors on map " << receiver_map); return inference.NoChange(); } - if (!descriptors->GetStrongValue(JSFunction::kNameDescriptorIndex) + ReadOnlyRoots roots(isolate()); + StringRef length_string(broker(), roots.length_string_handle()); + StringRef name_string(broker(), roots.name_string_handle()); + + if (!receiver_map.GetPropertyKey(JSFunction::kLengthDescriptorIndex) + .equals(length_string) || + !receiver_map.GetStrongValue(JSFunction::kLengthDescriptorIndex) + .IsAccessorInfo() || + !receiver_map.GetPropertyKey(JSFunction::kNameDescriptorIndex) + .equals(name_string) || + !receiver_map.GetStrongValue(JSFunction::kNameDescriptorIndex) .IsAccessorInfo()) { return inference.NoChange(); } @@ -646,10 +574,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) { } NodeProperties::ChangeOp( node, - javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode)); - // TODO(mslekova): Remove once ReduceJSCall is brokerized. - AllowHandleDereference allow_handle_dereference; - AllowHandleAllocation allow_handle_allocation; + javascript()->Call(arity, p.frequency(), FeedbackSource(), convert_mode)); // Try to further reduce the JSCall {node}. Reduction const reduction = ReduceJSCall(node); return reduction.Changed() ? reduction : Changed(node); @@ -693,13 +618,19 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) { MapHandles const& object_maps = inference.GetMaps(); MapRef candidate_map(broker(), object_maps[0]); - candidate_map.SerializePrototype(); + if (FLAG_concurrent_inlining && !candidate_map.serialized_prototype()) { + TRACE_BROKER_MISSING(broker(), "prototype for map " << candidate_map); + return inference.NoChange(); + } ObjectRef candidate_prototype = candidate_map.prototype(); // Check if we can constant-fold the {candidate_prototype}. for (size_t i = 0; i < object_maps.size(); ++i) { MapRef object_map(broker(), object_maps[i]); - object_map.SerializePrototype(); + if (FLAG_concurrent_inlining && !object_map.serialized_prototype()) { + TRACE_BROKER_MISSING(broker(), "prototype for map " << object_map); + return inference.NoChange(); + } if (IsSpecialReceiverInstanceType(object_map.instance_type()) || !object_map.prototype().equals(candidate_prototype)) { // We exclude special receivers, like JSProxy or API objects that @@ -830,6 +761,8 @@ Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) { // ES #sec-object.prototype.isprototypeof Reduction JSCallReducer::ReduceObjectPrototypeIsPrototypeOf(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 1); Node* value = node->op()->ValueInputCount() > 2 @@ -1048,7 +981,7 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) { { // TODO(magardn): collect feedback so this can be optimized vtrue = etrue = if_true = - graph()->NewNode(javascript()->HasProperty(VectorSlotPair()), target, + graph()->NewNode(javascript()->HasProperty(FeedbackSource()), target, key, context, frame_state, etrue, if_true); } @@ -1114,10 +1047,10 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker, return true; } -bool CanInlineArrayResizingBuiltin( - JSHeapBroker* broker, MapHandles const& receiver_maps, - std::vector<ElementsKind>& kinds, // NOLINT(runtime/references) - bool builtin_is_push = false) { +bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker, + MapHandles const& receiver_maps, + std::vector<ElementsKind>* kinds, + bool builtin_is_push = false) { DCHECK_NE(0, receiver_maps.size()); for (auto receiver_map : receiver_maps) { MapRef map(broker, receiver_map); @@ -1128,14 +1061,14 @@ bool CanInlineArrayResizingBuiltin( return false; } ElementsKind current_kind = map.elements_kind(); - auto kind_ptr = kinds.data(); + auto kind_ptr = kinds->data(); size_t i; - for (i = 0; i < kinds.size(); i++, kind_ptr++) { + for (i = 0; i < kinds->size(); i++, kind_ptr++) { if (UnionElementsKindUptoPackedness(kind_ptr, current_kind)) { break; } } - if (i == kinds.size()) kinds.push_back(current_kind); + if (i == kinds->size()) kinds->push_back(current_kind); } return true; } @@ -1143,6 +1076,8 @@ bool CanInlineArrayResizingBuiltin( Reduction JSCallReducer::ReduceArrayForEach( Node* node, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + if (!FLAG_turbo_inline_array_builtins) return NoChange(); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -1309,6 +1244,8 @@ Reduction JSCallReducer::ReduceArrayForEach( Reduction JSCallReducer::ReduceArrayReduce( Node* node, ArrayReduceDirection direction, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + if (!FLAG_turbo_inline_array_builtins) return NoChange(); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -1567,6 +1504,8 @@ Reduction JSCallReducer::ReduceArrayReduce( Reduction JSCallReducer::ReduceArrayMap(Node* node, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + if (!FLAG_turbo_inline_array_builtins) return NoChange(); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -1759,6 +1698,8 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node, Reduction JSCallReducer::ReduceArrayFilter( Node* node, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + if (!FLAG_turbo_inline_array_builtins) return NoChange(); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -1809,7 +1750,8 @@ Reduction JSCallReducer::ReduceArrayFilter( Type::Array()); ab.Store(AccessBuilder::ForMap(), initial_map); Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant(); - ab.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array); + ab.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + empty_fixed_array); ab.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array); ab.Store(AccessBuilder::ForJSArrayLength(packed_kind), jsgraph()->ZeroConstant()); @@ -1998,6 +1940,8 @@ Reduction JSCallReducer::ReduceArrayFilter( Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + if (!FLAG_turbo_inline_array_builtins) return NoChange(); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -2218,7 +2162,7 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control, IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements : GrowFastElementsMode::kSmiOrObjectElements; elements = etrue = graph()->NewNode( - simplified()->MaybeGrowFastElements(mode, VectorSlotPair()), a, + simplified()->MaybeGrowFastElements(mode, FeedbackSource()), a, elements, checked_to, elements_length, etrue, if_true); // Update the length of {a}. @@ -2288,7 +2232,7 @@ void JSCallReducer::RewirePostCallbackExceptionEdges(Node* check_throw, Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver, Node* control, Node** effect, Node** k, - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { // Make sure that the access is still in bounds, since the callback could // have changed the array's size. Node* length = *effect = graph()->NewNode( @@ -2313,6 +2257,8 @@ Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver, Reduction JSCallReducer::ReduceArrayEvery(Node* node, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + if (!FLAG_turbo_inline_array_builtins) return NoChange(); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -2567,6 +2513,8 @@ Callable GetCallableForArrayIncludes(ElementsKind elements_kind, // #sec-array.prototype.includes Reduction JSCallReducer::ReduceArrayIndexOfIncludes( SearchVariant search_variant, Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + CallParameters const& p = CallParametersOf(node->op()); if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { return NoChange(); @@ -2638,6 +2586,8 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes( Reduction JSCallReducer::ReduceArraySome(Node* node, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + if (!FLAG_turbo_inline_array_builtins) return NoChange(); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -2906,8 +2856,7 @@ Reduction JSCallReducer::ReduceCallApiFunction( // See if we can constant-fold the compatible receiver checks. HolderLookupResult api_holder = - function_template_info.LookupHolderOfExpectedType(first_receiver_map, - false); + function_template_info.LookupHolderOfExpectedType(first_receiver_map); if (api_holder.lookup == CallOptimization::kHolderNotFound) return inference.NoChange(); @@ -2937,8 +2886,7 @@ Reduction JSCallReducer::ReduceCallApiFunction( for (size_t i = 1; i < receiver_maps.size(); ++i) { MapRef receiver_map(broker(), receiver_maps[i]); HolderLookupResult holder_i = - function_template_info.LookupHolderOfExpectedType(receiver_map, - false); + function_template_info.LookupHolderOfExpectedType(receiver_map); if (api_holder.lookup != holder_i.lookup) return inference.NoChange(); if (!(api_holder.holder.has_value() && holder_i.holder.has_value())) @@ -3059,7 +3007,7 @@ bool IsSafeArgumentsElements(Node* node) { Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread( Node* node, int arity, CallFrequency const& frequency, - VectorSlotPair const& feedback) { + FeedbackSource const& feedback) { DCHECK(node->opcode() == IrOpcode::kJSCallWithArrayLike || node->opcode() == IrOpcode::kJSCallWithSpread || node->opcode() == IrOpcode::kJSConstructWithArrayLike || @@ -3285,13 +3233,6 @@ bool ShouldUseCallICFeedback(Node* node) { return true; } -base::Optional<HeapObjectRef> GetHeapObjectFeedback( - JSHeapBroker* broker, const FeedbackNexus& nexus) { - HeapObject object; - if (!nexus.GetFeedback()->GetHeapObject(&object)) return base::nullopt; - return HeapObjectRef(broker, handle(object, broker->isolate())); -} - } // namespace Reduction JSCallReducer::ReduceJSCall(Node* node) { @@ -3309,7 +3250,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) { ObjectRef target_ref = m.Ref(broker()); if (target_ref.IsJSFunction()) { JSFunctionRef function = target_ref.AsJSFunction(); - function.Serialize(); + if (FLAG_concurrent_inlining && !function.serialized()) { + TRACE_BROKER_MISSING(broker(), "data for function " << function); + return NoChange(); + } // Don't inline cross native context. if (!function.native_context().equals(native_context())) { @@ -3319,7 +3263,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) { return ReduceJSCall(node, function.shared()); } else if (target_ref.IsJSBoundFunction()) { JSBoundFunctionRef function = target_ref.AsJSBoundFunction(); - function.Serialize(); + if (FLAG_concurrent_inlining && !function.serialized()) { + TRACE_BROKER_MISSING(broker(), "data for function " << function); + return NoChange(); + } ObjectRef bound_this = function.bound_this(); ConvertReceiverMode const convert_mode = @@ -3342,7 +3289,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) { } NodeProperties::ChangeOp( - node, javascript()->Call(arity, p.frequency(), VectorSlotPair(), + node, javascript()->Call(arity, p.frequency(), FeedbackSource(), convert_mode)); // Try to further reduce the JSCall {node}. @@ -3390,7 +3337,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) { ? ConvertReceiverMode::kAny : ConvertReceiverMode::kNotNullOrUndefined; NodeProperties::ChangeOp( - node, javascript()->Call(arity, p.frequency(), VectorSlotPair(), + node, javascript()->Call(arity, p.frequency(), FeedbackSource(), convert_mode)); // Try to further reduce the JSCall {node}. @@ -3398,19 +3345,18 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) { return reduction.Changed() ? reduction : Changed(node); } - // Extract feedback from the {node} using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - if (nexus.IsUninitialized()) { + ProcessedFeedback const& feedback = + broker()->GetFeedbackForCall(FeedbackSource(p.feedback())); + if (feedback.IsInsufficient()) { return ReduceSoftDeoptimize( node, DeoptimizeReason::kInsufficientTypeFeedbackForCall); } - base::Optional<HeapObjectRef> feedback = - GetHeapObjectFeedback(broker(), nexus); - if (feedback.has_value() && ShouldUseCallICFeedback(target) && - feedback->map().is_callable()) { - Node* target_function = jsgraph()->Constant(*feedback); + base::Optional<HeapObjectRef> feedback_target = feedback.AsCall().target(); + if (feedback_target.has_value() && ShouldUseCallICFeedback(target) && + feedback_target->map().is_callable()) { + Node* target_function = jsgraph()->Constant(*feedback_target); // Check that the {target} is still the {target_function}. Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target, @@ -3630,8 +3576,6 @@ Reduction JSCallReducer::ReduceJSCall(Node* node, return ReduceMathUnary(node, simplified()->NumberFloor()); case Builtins::kMathFround: return ReduceMathUnary(node, simplified()->NumberFround()); - case Builtins::kMathHypot: - return ReduceMathHypot(node); case Builtins::kMathLog: return ReduceMathUnary(node, simplified()->NumberLog()); case Builtins::kMathLog1p: @@ -3785,22 +3729,17 @@ Reduction JSCallReducer::ReduceJSCall(Node* node, break; } - if (shared.object()->IsApiFunction()) { + if (shared.function_template_info().has_value()) { return ReduceCallApiFunction(node, shared); } return NoChange(); } Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) { - // TODO(mslekova): Remove once ReduceJSCallWithArrayLike is brokerized. - AllowHandleDereference allow_handle_dereference; - AllowHandleAllocation allow_handle_allocation; - DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode()); CallFrequency frequency = CallFrequencyOf(node->op()); - VectorSlotPair feedback; return ReduceCallOrConstructWithArrayLikeOrSpread(node, 2, frequency, - feedback); + FeedbackSource()); } Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) { @@ -3809,7 +3748,7 @@ Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) { DCHECK_LE(3u, p.arity()); int arity = static_cast<int>(p.arity() - 1); CallFrequency frequency = p.frequency(); - VectorSlotPair feedback = p.feedback(); + FeedbackSource feedback = p.feedback(); return ReduceCallOrConstructWithArrayLikeOrSpread(node, arity, frequency, feedback); } @@ -3824,17 +3763,16 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - // Extract feedback from the {node} using the FeedbackNexus. if (p.feedback().IsValid()) { - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - if (nexus.IsUninitialized()) { + ProcessedFeedback const& feedback = + broker()->GetFeedbackForCall(p.feedback()); + if (feedback.IsInsufficient()) { return ReduceSoftDeoptimize( node, DeoptimizeReason::kInsufficientTypeFeedbackForConstruct); } - base::Optional<HeapObjectRef> feedback = - GetHeapObjectFeedback(broker(), nexus); - if (feedback.has_value() && feedback->IsAllocationSite()) { + base::Optional<HeapObjectRef> feedback_target = feedback.AsCall().target(); + if (feedback_target.has_value() && feedback_target->IsAllocationSite()) { // The feedback is an AllocationSite, which means we have called the // Array function and collected transition (and pretenuring) feedback // for the resulting arrays. This has to be kept in sync with the @@ -3859,12 +3797,12 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { NodeProperties::ReplaceValueInput(node, array_function, 1); NodeProperties::ChangeOp( node, javascript()->CreateArray( - arity, feedback->AsAllocationSite().object())); + arity, feedback_target->AsAllocationSite().object())); return Changed(node); - } else if (feedback.has_value() && + } else if (feedback_target.has_value() && !HeapObjectMatcher(new_target).HasValue() && - feedback->map().is_constructor()) { - Node* new_target_feedback = jsgraph()->Constant(*feedback); + feedback_target->map().is_constructor()) { + Node* new_target_feedback = jsgraph()->Constant(*feedback_target); // Check that the {new_target} is still the {new_target_feedback}. Node* check = graph()->NewNode(simplified()->ReferenceEqual(), new_target, @@ -3902,7 +3840,11 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { if (target_ref.IsJSFunction()) { JSFunctionRef function = target_ref.AsJSFunction(); - function.Serialize(); + if (FLAG_concurrent_inlining && !function.serialized()) { + TRACE_BROKER_MISSING(broker(), + "function, not serialized: " << function); + return NoChange(); + } // Do not reduce constructors with break points. if (function.shared().HasBreakInfo()) return NoChange(); @@ -3959,7 +3901,11 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { } } else if (target_ref.IsJSBoundFunction()) { JSBoundFunctionRef function = target_ref.AsJSBoundFunction(); - function.Serialize(); + if (FLAG_concurrent_inlining && !function.serialized()) { + TRACE_BROKER_MISSING(broker(), + "function, not serialized: " << function); + return NoChange(); + } ObjectRef bound_target_function = function.bound_target_function(); FixedArrayRef bound_arguments = function.bound_arguments(); @@ -3989,7 +3935,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { // Update the JSConstruct operator on {node}. NodeProperties::ChangeOp( node, - javascript()->Construct(arity + 2, p.frequency(), VectorSlotPair())); + javascript()->Construct(arity + 2, p.frequency(), FeedbackSource())); // Try to further reduce the JSConstruct {node}. Reduction const reduction = ReduceJSConstruct(node); @@ -4030,7 +3976,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { // Update the JSConstruct operator on {node}. NodeProperties::ChangeOp( node, - javascript()->Construct(arity + 2, p.frequency(), VectorSlotPair())); + javascript()->Construct(arity + 2, p.frequency(), FeedbackSource())); // Try to further reduce the JSConstruct {node}. Reduction const reduction = ReduceJSConstruct(node); @@ -4350,9 +4296,8 @@ Reduction JSCallReducer::ReduceStringPrototypeSubstr(Node* node) { Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) { DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode()); CallFrequency frequency = CallFrequencyOf(node->op()); - VectorSlotPair feedback; return ReduceCallOrConstructWithArrayLikeOrSpread(node, 1, frequency, - feedback); + FeedbackSource()); } Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) { @@ -4361,7 +4306,7 @@ Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) { DCHECK_LE(3u, p.arity()); int arity = static_cast<int>(p.arity() - 2); CallFrequency frequency = p.frequency(); - VectorSlotPair feedback = p.feedback(); + FeedbackSource feedback = p.feedback(); return ReduceCallOrConstructWithArrayLikeOrSpread(node, arity, frequency, feedback); } @@ -4382,7 +4327,7 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node, Node* frame_state = NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead()); Node* deoptimize = graph()->NewNode( - common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()), + common()->Deoptimize(DeoptimizeKind::kSoft, reason, FeedbackSource()), frame_state, effect, control); // TODO(bmeurer): This should be on the AdvancedReducer somehow. NodeProperties::MergeControlToEnd(graph(), common(), deoptimize); @@ -4440,6 +4385,8 @@ void JSCallReducer::CheckIfElementsKind(Node* receiver_elements_kind, // ES6 section 22.1.3.18 Array.prototype.push ( ) Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { @@ -4456,7 +4403,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) { MapHandles const& receiver_maps = inference.GetMaps(); std::vector<ElementsKind> kinds; - if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds, true)) { + if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds, true)) { return inference.NoChange(); } if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE(); @@ -4574,6 +4521,8 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) { // ES6 section 22.1.3.17 Array.prototype.pop ( ) Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { @@ -4589,7 +4538,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) { MapHandles const& receiver_maps = inference.GetMaps(); std::vector<ElementsKind> kinds; - if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) { + if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) { return inference.NoChange(); } if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE(); @@ -4707,6 +4656,8 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) { // ES6 section 22.1.3.22 Array.prototype.shift ( ) Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { @@ -4725,7 +4676,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { MapHandles const& receiver_maps = inference.GetMaps(); std::vector<ElementsKind> kinds; - if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) { + if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) { return inference.NoChange(); } if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE(); @@ -4923,6 +4874,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { // ES6 section 22.1.3.23 Array.prototype.slice ( ) Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + if (!FLAG_turbo_inline_array_builtins) return NoChange(); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -4999,6 +4952,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) { // ES6 section 22.1.2.2 Array.isArray ( arg ) Reduction JSCallReducer::ReduceArrayIsArray(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + // We certainly know that undefined is not an array. if (node->op()->ValueInputCount() < 3) { Node* value = jsgraph()->FalseConstant(); @@ -5022,6 +4977,8 @@ Reduction JSCallReducer::ReduceArrayIsArray(Node* node) { } Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 1); Node* context = NodeProperties::GetContextInput(node); @@ -5047,6 +5004,8 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) { // ES #sec-%arrayiteratorprototype%.next Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); Node* iterator = NodeProperties::GetValueInput(node, 1); @@ -5681,10 +5640,14 @@ Node* JSCallReducer::CreateArtificialFrameState( bailout_id, OutputFrameStateCombine::Ignore(), state_info); const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense()); Node* node0 = graph()->NewNode(op0); + + static constexpr int kTargetInputIndex = 0; + static constexpr int kReceiverInputIndex = 1; + const int parameter_count_with_receiver = parameter_count + 1; std::vector<Node*> params; - params.reserve(parameter_count + 1); - for (int parameter = 0; parameter < parameter_count + 1; ++parameter) { - params.push_back(node->InputAt(1 + parameter)); + params.reserve(parameter_count_with_receiver); + for (int i = 0; i < parameter_count_with_receiver; i++) { + params.push_back(node->InputAt(kReceiverInputIndex + i)); } const Operator* op_param = common()->StateValues( static_cast<int>(params.size()), SparseInputMask::Dense()); @@ -5694,7 +5657,7 @@ Node* JSCallReducer::CreateArtificialFrameState( context = jsgraph()->UndefinedConstant(); } return graph()->NewNode(op, params_node, node0, node0, context, - node->InputAt(0), outer_frame_state); + node->InputAt(kTargetInputIndex), outer_frame_state); } Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { @@ -5804,7 +5767,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { // 9. Call executor with both resolving functions effect = control = graph()->NewNode( - javascript()->Call(4, p.frequency(), VectorSlotPair(), + javascript()->Call(4, p.frequency(), FeedbackSource(), ConvertReceiverMode::kNullOrUndefined, SpeculationMode::kDisallowSpeculation), executor, jsgraph()->UndefinedConstant(), resolve, reject, context, @@ -5817,7 +5780,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { common()->IfException(), exception_control, exception_effect); // 10a. Call reject if the call to executor threw. exception_effect = exception_control = graph()->NewNode( - javascript()->Call(3, p.frequency(), VectorSlotPair(), + javascript()->Call(3, p.frequency(), FeedbackSource(), ConvertReceiverMode::kNullOrUndefined, SpeculationMode::kDisallowSpeculation), reject, jsgraph()->UndefinedConstant(), reason, context, frame_state, @@ -5928,9 +5891,7 @@ bool JSCallReducer::DoPromiseChecks(MapInference* inference) { for (Handle<Map> map : receiver_maps) { MapRef receiver_map(broker(), map); if (!receiver_map.IsJSPromiseMap()) return false; - if (!FLAG_concurrent_inlining) { - receiver_map.SerializePrototype(); - } else if (!receiver_map.serialized_prototype()) { + if (FLAG_concurrent_inlining && !receiver_map.serialized_prototype()) { TRACE_BROKER_MISSING(broker(), "prototype for map " << receiver_map); return false; } @@ -6109,7 +6070,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { } Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -6177,7 +6138,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { // ES section #sec-promise.resolve Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 1); @@ -6293,8 +6254,13 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) { jsgraph()->Constant(TYPE##_ELEMENTS - \ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)); \ control = graph()->NewNode(common()->Branch(), check, control); \ - values.push_back(jsgraph()->HeapConstant( \ - factory()->InternalizeUtf8String(#Type "Array"))); \ + if (FLAG_concurrent_inlining) { \ + values.push_back(jsgraph()->Constant( \ + broker()->GetTypedArrayStringTag(TYPE##_ELEMENTS))); \ + } else { \ + values.push_back(jsgraph()->HeapConstant( \ + factory()->InternalizeUtf8String(#Type "Array"))); \ + } \ effects.push_back(effect); \ controls.push_back(graph()->NewNode(common()->IfTrue(), control)); \ control = graph()->NewNode(common()->IfFalse(), control); \ @@ -6536,9 +6502,10 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext( MapInference inference(broker(), receiver, effect); if (!inference.HaveMaps()) return NoChange(); MapHandles const& receiver_maps = inference.GetMaps(); - receiver_instance_type = receiver_maps[0]->instance_type(); + receiver_instance_type = MapRef(broker(), receiver_maps[0]).instance_type(); for (size_t i = 1; i < receiver_maps.size(); ++i) { - if (receiver_maps[i]->instance_type() != receiver_instance_type) { + if (MapRef(broker(), receiver_maps[i]).instance_type() != + receiver_instance_type) { return inference.NoChange(); } } @@ -6799,6 +6766,8 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext( } Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + Node* value = node->op()->ValueInputCount() >= 3 ? NodeProperties::GetValueInput(node, 2) : jsgraph()->UndefinedConstant(); @@ -6811,6 +6780,8 @@ Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) { Reduction JSCallReducer::ReduceArrayBufferViewAccessor( Node* node, InstanceType instance_type, FieldAccess const& access) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + Node* receiver = NodeProperties::GetValueInput(node, 1); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); @@ -7142,19 +7113,20 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { ZoneVector<PropertyAccessInfo> access_infos(graph()->zone()); AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); - if (!FLAG_concurrent_inlining) { - // Compute property access info for "exec" on {resolution}. - access_info_factory.ComputePropertyAccessInfos( - MapHandles(regexp_maps.begin(), regexp_maps.end()), - factory()->exec_string(), AccessMode::kLoad, &access_infos); - } else { + if (FLAG_concurrent_inlining) { // Obtain precomputed access infos from the broker. for (auto map : regexp_maps) { MapRef map_ref(broker(), map); - PropertyAccessInfo access_info = - broker()->GetAccessInfoForLoadingExec(map_ref); + PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( + map_ref, NameRef(broker(), isolate()->factory()->exec_string()), + AccessMode::kLoad); access_infos.push_back(access_info); } + } else { + // Compute property access info for "exec" on {resolution}. + access_info_factory.ComputePropertyAccessInfos( + MapHandles(regexp_maps.begin(), regexp_maps.end()), + factory()->exec_string(), AccessMode::kLoad, &access_infos); } PropertyAccessInfo ai_exec = @@ -7171,7 +7143,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { JSObjectRef holder_ref(broker(), holder); // Bail out if the exec method is not the original one. - base::Optional<ObjectRef> constant = holder_ref.GetOwnProperty( + base::Optional<ObjectRef> constant = holder_ref.GetOwnDataProperty( ai_exec.field_representation(), ai_exec.field_index()); if (!constant.has_value() || !constant->equals(native_context().regexp_exec_function())) { @@ -7287,7 +7259,7 @@ Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); } Factory* JSCallReducer::factory() const { return isolate()->factory(); } NativeContextRef JSCallReducer::native_context() const { - return broker()->native_context(); + return broker()->target_native_context(); } CommonOperatorBuilder* JSCallReducer::common() const { diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h index bf3676c5b2..66c42cfb63 100644 --- a/deps/v8/src/compiler/js-call-reducer.h +++ b/deps/v8/src/compiler/js-call-reducer.h @@ -17,7 +17,6 @@ namespace internal { // Forward declarations. class Factory; class JSGlobalProxy; -class VectorSlotPair; namespace compiler { @@ -25,6 +24,7 @@ namespace compiler { class CallFrequency; class CommonOperatorBuilder; class CompilationDependencies; +struct FeedbackSource; struct FieldAccess; class JSGraph; class JSHeapBroker; @@ -106,7 +106,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { Reduction ReduceCallOrConstructWithArrayLikeOrSpread( Node* node, int arity, CallFrequency const& frequency, - VectorSlotPair const& feedback); + FeedbackSource const& feedback); Reduction ReduceJSConstruct(Node* node); Reduction ReduceJSConstructWithArrayLike(Node* node); Reduction ReduceJSConstructWithSpread(Node* node); @@ -156,7 +156,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { Reduction ReduceMathImul(Node* node); Reduction ReduceMathClz32(Node* node); Reduction ReduceMathMinMax(Node* node, const Operator* op, Node* empty_value); - Reduction ReduceMathHypot(Node* node); Reduction ReduceNumberIsFinite(Node* node); Reduction ReduceNumberIsInteger(Node* node); @@ -234,7 +233,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { // k is thusly changed, and the effect is changed as well. Node* SafeLoadElement(ElementsKind kind, Node* receiver, Node* control, Node** effect, Node** k, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state, int parameter_count, BailoutId bailout_id, diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc index 4e69db6b9b..cb52ccaccb 100644 --- a/deps/v8/src/compiler/js-create-lowering.cc +++ b/deps/v8/src/compiler/js-create-lowering.cc @@ -127,7 +127,7 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) { AllocationBuilder a(jsgraph(), effect, control); a.Allocate(slack_tracking_prediction.instance_size()); a.Store(AccessBuilder::ForMap(), *initial_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), jsgraph()->EmptyFixedArrayConstant()); @@ -180,11 +180,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) { : native_context().sloppy_arguments_map()); // Actually allocate and initialize the arguments object. AllocationBuilder a(jsgraph(), effect, control); - Node* properties = jsgraph()->EmptyFixedArrayConstant(); STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kTaggedSize); a.Allocate(JSSloppyArgumentsObject::kSize); a.Store(AccessBuilder::ForMap(), arguments_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); a.Store(AccessBuilder::ForArgumentsLength(), arguments_length); a.Store(AccessBuilder::ForArgumentsCallee(), callee); @@ -209,11 +209,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) { jsgraph()->Constant(native_context().strict_arguments_map()); // Actually allocate and initialize the arguments object. AllocationBuilder a(jsgraph(), effect, control); - Node* properties = jsgraph()->EmptyFixedArrayConstant(); STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kTaggedSize); a.Allocate(JSStrictArgumentsObject::kSize); a.Store(AccessBuilder::ForMap(), arguments_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); a.Store(AccessBuilder::ForArgumentsLength(), arguments_length); RelaxControls(node); @@ -239,11 +239,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) { native_context().js_array_packed_elements_map()); // Actually allocate and initialize the jsarray. AllocationBuilder a(jsgraph(), effect, control); - Node* properties = jsgraph()->EmptyFixedArrayConstant(); STATIC_ASSERT(JSArray::kSize == 4 * kTaggedSize); a.Allocate(JSArray::kSize); a.Store(AccessBuilder::ForMap(), jsarray_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), rest_length); RelaxControls(node); @@ -284,12 +284,12 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) { : native_context().sloppy_arguments_map()); // Actually allocate and initialize the arguments object. AllocationBuilder a(jsgraph(), effect, control); - Node* properties = jsgraph()->EmptyFixedArrayConstant(); int length = args_state_info.parameter_count() - 1; // Minus receiver. STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kTaggedSize); a.Allocate(JSSloppyArgumentsObject::kSize); a.Store(AccessBuilder::ForMap(), arguments_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length)); a.Store(AccessBuilder::ForArgumentsCallee(), callee); @@ -320,12 +320,12 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) { jsgraph()->Constant(native_context().strict_arguments_map()); // Actually allocate and initialize the arguments object. AllocationBuilder a(jsgraph(), effect, control); - Node* properties = jsgraph()->EmptyFixedArrayConstant(); int length = args_state_info.parameter_count() - 1; // Minus receiver. STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kTaggedSize); a.Allocate(JSStrictArgumentsObject::kSize); a.Store(AccessBuilder::ForMap(), arguments_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length)); RelaxControls(node); @@ -357,7 +357,6 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) { jsgraph()->Constant(native_context().js_array_packed_elements_map()); // Actually allocate and initialize the jsarray. AllocationBuilder a(jsgraph(), effect, control); - Node* properties = jsgraph()->EmptyFixedArrayConstant(); // -1 to minus receiver int argument_count = args_state_info.parameter_count() - 1; @@ -365,7 +364,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) { STATIC_ASSERT(JSArray::kSize == 4 * kTaggedSize); a.Allocate(JSArray::kSize); a.Store(AccessBuilder::ForMap(), jsarray_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), jsgraph()->Constant(length)); @@ -406,7 +406,7 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) { int size = parameter_count_no_receiver + shared.GetBytecodeArray().register_count(); AllocationBuilder ab(jsgraph(), effect, control); - ab.AllocateArray(size, factory()->fixed_array_map()); + ab.AllocateArray(size, MapRef(broker(), factory()->fixed_array_map())); for (int i = 0; i < size; ++i) { ab.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->UndefinedConstant()); @@ -416,11 +416,12 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) { // Emit code to allocate the JS[Async]GeneratorObject instance. AllocationBuilder a(jsgraph(), effect, control); a.Allocate(slack_tracking_prediction.instance_size()); - Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant(); Node* undefined = jsgraph()->UndefinedConstant(); a.Store(AccessBuilder::ForMap(), initial_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array); - a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); + a.Store(AccessBuilder::ForJSObjectElements(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSGeneratorObjectContext(), context); a.Store(AccessBuilder::ForJSGeneratorObjectFunction(), closure); a.Store(AccessBuilder::ForJSGeneratorObjectReceiver(), receiver); @@ -470,7 +471,7 @@ Reduction JSCreateLowering::ReduceNewArray( // This has to be kept in sync with src/runtime/runtime-array.cc, // where this limit is protected. length = effect = graph()->NewNode( - simplified()->CheckBounds(VectorSlotPair()), length, + simplified()->CheckBounds(FeedbackSource()), length, jsgraph()->Constant(JSArray::kInitialMaxFastElementArray), effect, control); @@ -480,13 +481,13 @@ Reduction JSCreateLowering::ReduceNewArray( ? simplified()->NewDoubleElements(allocation) : simplified()->NewSmiOrObjectElements(allocation), length, effect, control); - Node* properties = jsgraph()->EmptyFixedArrayConstant(); // Perform the allocation of the actual JSArray object. AllocationBuilder a(jsgraph(), effect, control); a.Allocate(slack_tracking_prediction.instance_size(), allocation); a.Store(AccessBuilder::ForMap(), initial_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); a.Store(AccessBuilder::ForJSArrayLength(initial_map.elements_kind()), length); for (int i = 0; i < slack_tracking_prediction.inobject_property_count(); @@ -526,13 +527,13 @@ Reduction JSCreateLowering::ReduceNewArray( elements = effect = AllocateElements(effect, control, elements_kind, capacity, allocation); } - Node* properties = jsgraph()->EmptyFixedArrayConstant(); // Perform the allocation of the actual JSArray object. AllocationBuilder a(jsgraph(), effect, control); a.Allocate(slack_tracking_prediction.instance_size(), allocation); a.Store(AccessBuilder::ForMap(), initial_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length); for (int i = 0; i < slack_tracking_prediction.inobject_property_count(); @@ -565,14 +566,14 @@ Reduction JSCreateLowering::ReduceNewArray( for (auto& value : values) { if (!NodeProperties::GetType(value).Is(Type::SignedSmall())) { value = effect = graph()->NewNode( - simplified()->CheckSmi(VectorSlotPair()), value, effect, control); + simplified()->CheckSmi(FeedbackSource()), value, effect, control); } } } else if (IsDoubleElementsKind(elements_kind)) { for (auto& value : values) { if (!NodeProperties::GetType(value).Is(Type::Number())) { value = effect = - graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value, + graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), value, effect, control); } // Make sure we do not store signaling NaNs into double arrays. @@ -583,14 +584,14 @@ Reduction JSCreateLowering::ReduceNewArray( // Setup elements, properties and length. Node* elements = effect = AllocateElements(effect, control, elements_kind, values, allocation); - Node* properties = jsgraph()->EmptyFixedArrayConstant(); Node* length = jsgraph()->Constant(static_cast<int>(values.size())); // Perform the allocation of the actual JSArray object. AllocationBuilder a(jsgraph(), effect, control); a.Allocate(slack_tracking_prediction.instance_size(), allocation); a.Store(AccessBuilder::ForMap(), initial_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length); for (int i = 0; i < slack_tracking_prediction.inobject_property_count(); @@ -735,7 +736,7 @@ Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) { Type::OtherObject()); a.Store(AccessBuilder::ForMap(), native_context().initial_array_iterator_map()); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), jsgraph()->EmptyFixedArrayConstant()); @@ -761,7 +762,8 @@ Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) { // Create the register file. AllocationBuilder ab(jsgraph(), effect, control); - ab.AllocateArray(register_count, factory()->fixed_array_map()); + ab.AllocateArray(register_count, + MapRef(broker(), factory()->fixed_array_map())); for (int i = 0; i < register_count; ++i) { ab.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->UndefinedConstant()); @@ -771,11 +773,12 @@ Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) { // Create the JSAsyncFunctionObject result. AllocationBuilder a(jsgraph(), effect, control); a.Allocate(JSAsyncFunctionObject::kSize); - Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant(); a.Store(AccessBuilder::ForMap(), native_context().async_function_object_map()); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array); - a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); + a.Store(AccessBuilder::ForJSObjectElements(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSGeneratorObjectContext(), context); a.Store(AccessBuilder::ForJSGeneratorObjectFunction(), closure); a.Store(AccessBuilder::ForJSGeneratorObjectReceiver(), receiver); @@ -844,7 +847,7 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) { a.Store(AccessBuilder::ForMap(), MapForCollectionIterationKind(native_context(), p.collection_kind(), p.iteration_kind())); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), jsgraph()->EmptyFixedArrayConstant()); @@ -871,7 +874,7 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) { Node* bound_arguments = jsgraph()->EmptyFixedArrayConstant(); if (arity > 0) { AllocationBuilder a(jsgraph(), effect, control); - a.AllocateArray(arity, factory()->fixed_array_map()); + a.AllocateArray(arity, MapRef(broker(), factory()->fixed_array_map())); for (int i = 0; i < arity; ++i) { a.Store(AccessBuilder::ForFixedArraySlot(i), NodeProperties::GetValueInput(node, 2 + i)); @@ -884,7 +887,7 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) { a.Allocate(JSBoundFunction::kSize, AllocationType::kYoung, Type::BoundFunction()); a.Store(AccessBuilder::ForMap(), map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), jsgraph()->EmptyFixedArrayConstant()); @@ -936,7 +939,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) { AllocationBuilder a(jsgraph(), effect, control); a.Allocate(function_map.instance_size(), allocation, Type::Function()); a.Store(AccessBuilder::ForMap(), function_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), jsgraph()->EmptyFixedArrayConstant()); @@ -972,7 +975,7 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) { AllocationBuilder a(jsgraph(), effect, graph()->start()); a.Allocate(JSIteratorResult::kSize); a.Store(AccessBuilder::ForMap(), iterator_result_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), jsgraph()->EmptyFixedArrayConstant()); @@ -995,7 +998,7 @@ Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) { a.Allocate(JSStringIterator::kSize, AllocationType::kYoung, Type::OtherObject()); a.Store(AccessBuilder::ForMap(), map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), jsgraph()->EmptyFixedArrayConstant()); @@ -1014,11 +1017,10 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) { Node* array_map = jsgraph()->Constant(native_context().js_array_packed_elements_map()); - Node* properties = jsgraph()->EmptyFixedArrayConstant(); Node* length = jsgraph()->Constant(2); AllocationBuilder aa(jsgraph(), effect, graph()->start()); - aa.AllocateArray(2, factory()->fixed_array_map()); + aa.AllocateArray(2, MapRef(broker(), factory()->fixed_array_map())); aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS), jsgraph()->ZeroConstant(), key); aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS), @@ -1028,7 +1030,8 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) { AllocationBuilder a(jsgraph(), elements, graph()->start()); a.Allocate(JSArray::kSize); a.Store(AccessBuilder::ForMap(), array_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), length); STATIC_ASSERT(JSArray::kSize == 4 * kTaggedSize); @@ -1045,7 +1048,7 @@ Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) { AllocationBuilder a(jsgraph(), effect, graph()->start()); a.Allocate(promise_map.instance_size()); a.Store(AccessBuilder::ForMap(), promise_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), jsgraph()->EmptyFixedArrayConstant()); @@ -1071,8 +1074,12 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - FeedbackVectorRef feedback_vector(broker(), p.feedback().vector()); - ObjectRef feedback = feedback_vector.get(p.feedback().slot()); + FeedbackVectorRef feedback_vector(broker(), p.feedback().vector); + ObjectRef feedback = feedback_vector.get(p.feedback().slot); + // TODO(turbofan): we should consider creating a ProcessedFeedback for + // allocation sites/boiler plates so that we use GetFeedback here. Then + // we can eventually get rid of the additional copy of feedback slots that + // we currently have in FeedbackVectorData. if (feedback.IsAllocationSite()) { AllocationSiteRef site = feedback.AsAllocationSite(); if (site.IsFastLiteral()) { @@ -1094,8 +1101,12 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) { Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) { DCHECK_EQ(IrOpcode::kJSCreateEmptyLiteralArray, node->opcode()); FeedbackParameter const& p = FeedbackParameterOf(node->op()); - FeedbackVectorRef fv(broker(), p.feedback().vector()); - ObjectRef feedback = fv.get(p.feedback().slot()); + FeedbackVectorRef fv(broker(), p.feedback().vector); + ObjectRef feedback = fv.get(p.feedback().slot); + // TODO(turbofan): we should consider creating a ProcessedFeedback for + // allocation sites/boiler plates so that we use GetFeedback here. Then + // we can eventually get rid of the additional copy of feedback slots that + // we currently have in FeedbackVectorData. if (feedback.IsAllocationSite()) { AllocationSiteRef site = feedback.AsAllocationSite(); DCHECK(!site.PointsToLiteral()); @@ -1128,13 +1139,13 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) { // Setup elements and properties. Node* elements = jsgraph()->EmptyFixedArrayConstant(); - Node* properties = jsgraph()->EmptyFixedArrayConstant(); // Perform the allocation of the actual JSArray object. AllocationBuilder a(jsgraph(), effect, control); a.Allocate(map.instance_size()); a.Store(AccessBuilder::ForMap(), js_object_map); - a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); a.Store(AccessBuilder::ForJSObjectElements(), elements); for (int i = 0; i < map.GetInObjectProperties(); i++) { a.Store(AccessBuilder::ForJSObjectInObjectProperty(map, i), @@ -1152,8 +1163,12 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - FeedbackVectorRef feedback_vector(broker(), p.feedback().vector()); - ObjectRef feedback = feedback_vector.get(p.feedback().slot()); + FeedbackVectorRef feedback_vector(broker(), p.feedback().vector); + ObjectRef feedback = feedback_vector.get(p.feedback().slot); + // TODO(turbofan): we should consider creating a ProcessedFeedback for + // allocation sites/boiler plates so that we use GetFeedback here. Then + // we can eventually get rid of the additional copy of feedback slots that + // we currently have in FeedbackVectorData. if (feedback.IsJSRegExp()) { JSRegExpRef boilerplate = feedback.AsJSRegExp(); Node* value = effect = AllocateLiteralRegExp(effect, control, boilerplate); @@ -1192,7 +1207,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) { default: UNREACHABLE(); } - a.AllocateContext(context_length, map); + a.AllocateContext(context_length, MapRef(broker(), map)); a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX), scope_info); a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context); @@ -1220,7 +1235,8 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) { AllocationBuilder a(jsgraph(), effect, control); STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered. - a.AllocateContext(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map()); + a.AllocateContext(Context::MIN_CONTEXT_SLOTS, + MapRef(broker(), factory()->with_context_map())); a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX), scope_info); a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context); a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension); @@ -1243,7 +1259,7 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) { AllocationBuilder a(jsgraph(), effect, control); STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered. a.AllocateContext(Context::MIN_CONTEXT_SLOTS + 1, - factory()->catch_context_map()); + MapRef(broker(), factory()->catch_context_map())); a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX), scope_info); a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context); a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension); @@ -1271,7 +1287,8 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) { AllocationBuilder a(jsgraph(), effect, control); STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered. - a.AllocateContext(context_length, factory()->block_context_map()); + a.AllocateContext(context_length, + MapRef(broker(), factory()->block_context_map())); a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX), scope_info); a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context); @@ -1293,12 +1310,13 @@ namespace { base::Optional<MapRef> GetObjectCreateMap(JSHeapBroker* broker, HeapObjectRef prototype) { MapRef standard_map = - broker->native_context().object_function().initial_map(); + broker->target_native_context().object_function().initial_map(); if (prototype.equals(standard_map.prototype())) { return standard_map; } if (prototype.map().oddball_type() == OddballType::kNull) { - return broker->native_context().slow_object_with_null_prototype_map(); + return broker->target_native_context() + .slow_object_with_null_prototype_map(); } if (prototype.IsJSObject()) { return prototype.AsJSObject().GetObjectCreateMap(); @@ -1401,7 +1419,8 @@ Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control, // Actually allocate the backing store. AllocationBuilder a(jsgraph(), effect, control); - a.AllocateArray(argument_count, factory()->fixed_array_map()); + a.AllocateArray(argument_count, + MapRef(broker(), factory()->fixed_array_map())); for (int i = 0; i < argument_count; ++i, ++parameters_it) { DCHECK_NOT_NULL((*parameters_it).node); a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i), @@ -1432,7 +1451,7 @@ Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control, // Actually allocate the backing store. AllocationBuilder a(jsgraph(), effect, control); - a.AllocateArray(num_elements, factory()->fixed_array_map()); + a.AllocateArray(num_elements, MapRef(broker(), factory()->fixed_array_map())); for (int i = 0; i < num_elements; ++i, ++parameters_it) { DCHECK_NOT_NULL((*parameters_it).node); a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i), @@ -1471,7 +1490,8 @@ Node* JSCreateLowering::AllocateAliasedArguments( // another indirection away and then linked into the parameter map below, // whereas mapped argument values are replaced with a hole instead. AllocationBuilder aa(jsgraph(), effect, control); - aa.AllocateArray(argument_count, factory()->fixed_array_map()); + aa.AllocateArray(argument_count, + MapRef(broker(), factory()->fixed_array_map())); for (int i = 0; i < mapped_count; ++i, ++parameters_it) { aa.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i), jsgraph()->TheHoleConstant()); @@ -1485,7 +1505,8 @@ Node* JSCreateLowering::AllocateAliasedArguments( // Actually allocate the backing store. AllocationBuilder a(jsgraph(), arguments, control); - a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map()); + a.AllocateArray(mapped_count + 2, + MapRef(broker(), factory()->sloppy_arguments_elements_map())); a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(0), context); a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(1), @@ -1530,7 +1551,8 @@ Node* JSCreateLowering::AllocateAliasedArguments( // Actually allocate the backing store. AllocationBuilder a(jsgraph(), arguments, control); - a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map()); + a.AllocateArray(mapped_count + 2, + MapRef(broker(), factory()->sloppy_arguments_elements_map())); a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(0), context); a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(1), @@ -1565,7 +1587,7 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control, // Actually allocate the backing store. AllocationBuilder a(jsgraph(), effect, control); - a.AllocateArray(capacity, elements_map, allocation); + a.AllocateArray(capacity, MapRef(broker(), elements_map), allocation); for (int i = 0; i < capacity; ++i) { Node* index = jsgraph()->Constant(i); a.Store(access, index, value); @@ -1590,7 +1612,7 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control, // Actually allocate the backing store. AllocationBuilder a(jsgraph(), effect, control); - a.AllocateArray(capacity, elements_map, allocation); + a.AllocateArray(capacity, MapRef(broker(), elements_map), allocation); for (int i = 0; i < capacity; ++i) { Node* index = jsgraph()->Constant(i); a.Store(access, index, values[i]); @@ -1601,9 +1623,6 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control, Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control, JSObjectRef boilerplate, AllocationType allocation) { - // Setup the properties backing store. - Node* properties = jsgraph()->EmptyFixedArrayConstant(); - // Compute the in-object properties to store first (might have effects). MapRef boilerplate_map = boilerplate.map(); ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone()); @@ -1616,6 +1635,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control, DCHECK_EQ(kData, property_details.kind()); NameRef property_name = boilerplate_map.GetPropertyKey(i); FieldIndex index = boilerplate_map.GetFieldIndexFor(i); + ConstFieldInfo const_field_info(boilerplate_map.object()); FieldAccess access = {kTaggedBase, index.offset(), property_name.object(), @@ -1624,7 +1644,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control, MachineType::TypeCompressedTagged(), kFullWriteBarrier, LoadSensitivity::kUnsafe, - property_details.constness()}; + const_field_info}; Node* value; if (boilerplate_map.IsUnboxedDoubleField(i)) { access.machine_type = MachineType::Float64(); @@ -1637,7 +1657,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control, // the field. The hole NaN should therefore be unobservable. // Load elimination expects there to be at most one const store to any // given field, so we always mark the unobservable ones as mutable. - access.constness = PropertyConstness::kMutable; + access.const_field_info = ConstFieldInfo::None(); } value = jsgraph()->Constant(bit_cast<double>(value_bits)); } else { @@ -1647,19 +1667,19 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control, boilerplate_value.AsHeapObject().map().oddball_type() == OddballType::kUninitialized; if (is_uninitialized) { - access.constness = PropertyConstness::kMutable; + access.const_field_info = ConstFieldInfo::None(); } if (boilerplate_value.IsJSObject()) { JSObjectRef boilerplate_object = boilerplate_value.AsJSObject(); value = effect = AllocateFastLiteral(effect, control, boilerplate_object, allocation); } else if (property_details.representation().IsDouble()) { - double number = boilerplate_value.AsMutableHeapNumber().value(); + double number = boilerplate_value.AsHeapNumber().value(); // Allocate a mutable HeapNumber box and store the value into it. AllocationBuilder builder(jsgraph(), effect, control); builder.Allocate(HeapNumber::kSize, allocation); builder.Store(AccessBuilder::ForMap(), - factory()->mutable_heap_number_map()); + MapRef(broker(), factory()->heap_number_map())); builder.Store(AccessBuilder::ForHeapNumberValue(), jsgraph()->Constant(number)); value = effect = builder.Finish(); @@ -1695,7 +1715,8 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control, builder.Allocate(boilerplate_map.instance_size(), allocation, Type::For(boilerplate_map)); builder.Store(AccessBuilder::ForMap(), boilerplate_map); - builder.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties); + builder.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), + jsgraph()->EmptyFixedArrayConstant()); builder.Store(AccessBuilder::ForJSObjectElements(), elements); if (boilerplate.IsJSArray()) { JSArrayRef boilerplate_array = boilerplate.AsJSArray(); @@ -1751,7 +1772,7 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control, // Allocate the backing store array and store the elements. AllocationBuilder builder(jsgraph(), effect, control); - builder.AllocateArray(elements_length, elements_map.object(), allocation); + builder.AllocateArray(elements_length, elements_map, allocation); ElementAccess const access = (elements_map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE) ? AccessBuilder::ForFixedDoubleArrayElement() @@ -1811,7 +1832,7 @@ SimplifiedOperatorBuilder* JSCreateLowering::simplified() const { } NativeContextRef JSCreateLowering::native_context() const { - return broker()->native_context(); + return broker()->target_native_context(); } } // namespace compiler diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc index 0a6f90975f..d2a9b675f9 100644 --- a/deps/v8/src/compiler/js-generic-lowering.cc +++ b/deps/v8/src/compiler/js-generic-lowering.cc @@ -9,10 +9,12 @@ #include "src/codegen/code-factory.h" #include "src/compiler/common-operator.h" #include "src/compiler/js-graph.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/machine-operator.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" #include "src/compiler/operator-properties.h" +#include "src/compiler/processed-feedback.h" #include "src/objects/feedback-cell.h" #include "src/objects/feedback-vector.h" #include "src/objects/scope-info.h" @@ -31,8 +33,9 @@ CallDescriptor::Flags FrameStateFlagForCall(Node* node) { } // namespace -JSGenericLowering::JSGenericLowering(JSGraph* jsgraph, Editor* editor) - : AdvancedReducer(editor), jsgraph_(jsgraph) {} +JSGenericLowering::JSGenericLowering(JSGraph* jsgraph, Editor* editor, + JSHeapBroker* broker) + : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker) {} JSGenericLowering::~JSGenericLowering() = default; @@ -144,6 +147,22 @@ void JSGenericLowering::LowerJSStrictEqual(Node* node) { Operator::kEliminatable); } +namespace { +bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source, + JSHeapBroker* broker) { + ProcessedFeedback const& feedback = broker->GetFeedback(source); + + if (feedback.kind() == ProcessedFeedback::kElementAccess) { + return feedback.AsElementAccess().transition_groups().empty(); + } else if (feedback.kind() == ProcessedFeedback::kNamedAccess) { + return feedback.AsNamedAccess().maps().empty(); + } else if (feedback.kind() == ProcessedFeedback::kInsufficient) { + return false; + } + UNREACHABLE(); +} +} // namespace + void JSGenericLowering::LowerJSLoadProperty(Node* node) { CallDescriptor::Flags flags = FrameStateFlagForCall(node); const PropertyAccess& p = PropertyAccessOf(node->op()); @@ -152,16 +171,16 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) { node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index())); if (outer_state->opcode() != IrOpcode::kFrameState) { Callable callable = Builtins::CallableFor( - isolate(), p.feedback().ic_state() == MEGAMORPHIC + isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker()) ? Builtins::kKeyedLoadICTrampoline_Megamorphic : Builtins::kKeyedLoadICTrampoline); ReplaceWithStubCall(node, callable, flags); } else { Callable callable = Builtins::CallableFor( - isolate(), p.feedback().ic_state() == MEGAMORPHIC + isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker()) ? Builtins::kKeyedLoadIC_Megamorphic : Builtins::kKeyedLoadIC); - Node* vector = jsgraph()->HeapConstant(p.feedback().vector()); + Node* vector = jsgraph()->HeapConstant(p.feedback().vector); node->InsertInput(zone(), 3, vector); ReplaceWithStubCall(node, callable, flags); } @@ -182,16 +201,16 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) { node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index())); if (outer_state->opcode() != IrOpcode::kFrameState) { Callable callable = Builtins::CallableFor( - isolate(), p.feedback().ic_state() == MEGAMORPHIC + isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker()) ? Builtins::kLoadICTrampoline_Megamorphic : Builtins::kLoadICTrampoline); ReplaceWithStubCall(node, callable, flags); } else { - Callable callable = - Builtins::CallableFor(isolate(), p.feedback().ic_state() == MEGAMORPHIC - ? Builtins::kLoadIC_Megamorphic - : Builtins::kLoadIC); - Node* vector = jsgraph()->HeapConstant(p.feedback().vector()); + Callable callable = Builtins::CallableFor( + isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker()) + ? Builtins::kLoadIC_Megamorphic + : Builtins::kLoadIC); + Node* vector = jsgraph()->HeapConstant(p.feedback().vector); node->InsertInput(zone(), 3, vector); ReplaceWithStubCall(node, callable, flags); } @@ -210,12 +229,23 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) { } else { Callable callable = CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode()); - Node* vector = jsgraph()->HeapConstant(p.feedback().vector()); + Node* vector = jsgraph()->HeapConstant(p.feedback().vector); node->InsertInput(zone(), 2, vector); ReplaceWithStubCall(node, callable, flags); } } +void JSGenericLowering::LowerJSGetIterator(Node* node) { + CallDescriptor::Flags flags = FrameStateFlagForCall(node); + const PropertyAccess& p = PropertyAccessOf(node->op()); + node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index())); + Node* vector = jsgraph()->HeapConstant(p.feedback().vector); + node->InsertInput(zone(), 2, vector); + Callable callable = + Builtins::CallableFor(isolate(), Builtins::kGetIteratorWithFeedback); + ReplaceWithStubCall(node, callable, flags); +} + void JSGenericLowering::LowerJSStoreProperty(Node* node) { CallDescriptor::Flags flags = FrameStateFlagForCall(node); PropertyAccess const& p = PropertyAccessOf(node->op()); @@ -229,7 +259,7 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) { } else { Callable callable = Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC); - Node* vector = jsgraph()->HeapConstant(p.feedback().vector()); + Node* vector = jsgraph()->HeapConstant(p.feedback().vector); node->InsertInput(zone(), 4, vector); ReplaceWithStubCall(node, callable, flags); } @@ -252,7 +282,7 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) { ReplaceWithStubCall(node, callable, flags); } else { Callable callable = Builtins::CallableFor(isolate(), Builtins::kStoreIC); - Node* vector = jsgraph()->HeapConstant(p.feedback().vector()); + Node* vector = jsgraph()->HeapConstant(p.feedback().vector); node->InsertInput(zone(), 4, vector); ReplaceWithStubCall(node, callable, flags); } @@ -270,7 +300,7 @@ void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) { ReplaceWithStubCall(node, callable, flags); } else { Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate()); - Node* vector = jsgraph()->HeapConstant(p.feedback().vector()); + Node* vector = jsgraph()->HeapConstant(p.feedback().vector); node->InsertInput(zone(), 4, vector); ReplaceWithStubCall(node, callable, flags); } @@ -290,7 +320,7 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) { } else { Callable callable = Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC); - Node* vector = jsgraph()->HeapConstant(p.feedback().vector()); + Node* vector = jsgraph()->HeapConstant(p.feedback().vector); node->InsertInput(zone(), 3, vector); ReplaceWithStubCall(node, callable, flags); } @@ -298,8 +328,9 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) { void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) { FeedbackParameter const& p = FeedbackParameterOf(node->op()); + RelaxControls(node); node->InsertInputs(zone(), 4, 2); - node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector())); + node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector)); node->ReplaceInput(5, jsgraph()->SmiConstant(p.feedback().index())); ReplaceWithRuntimeCall(node, Runtime::kDefineDataPropertyInLiteral); } @@ -311,7 +342,7 @@ void JSGenericLowering::LowerJSStoreInArrayLiteral(Node* node) { FeedbackParameter const& p = FeedbackParameterOf(node->op()); RelaxControls(node); node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index())); - node->InsertInput(zone(), 4, jsgraph()->HeapConstant(p.feedback().vector())); + node->InsertInput(zone(), 4, jsgraph()->HeapConstant(p.feedback().vector)); ReplaceWithStubCall(node, callable, flags); } @@ -513,7 +544,7 @@ void JSGenericLowering::LowerJSCreateTypedArray(Node* node) { void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) { CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op()); CallDescriptor::Flags flags = FrameStateFlagForCall(node); - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector())); + node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector)); node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index())); node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant())); @@ -533,7 +564,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) { void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) { CallDescriptor::Flags flags = FrameStateFlagForCall(node); FeedbackParameter const& p = FeedbackParameterOf(node->op()); - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector())); + node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector)); node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index())); node->RemoveInput(4); // control Callable callable = @@ -551,7 +582,7 @@ void JSGenericLowering::LowerJSCreateArrayFromIterable(Node* node) { void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) { CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op()); CallDescriptor::Flags flags = FrameStateFlagForCall(node); - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector())); + node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector)); node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index())); node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant())); node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags())); @@ -576,7 +607,7 @@ void JSGenericLowering::LowerJSCloneObject(Node* node) { Builtins::CallableFor(isolate(), Builtins::kCloneObjectIC); node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.flags())); node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index())); - node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector())); + node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector)); ReplaceWithStubCall(node, callable, flags); } @@ -589,7 +620,7 @@ void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) { CallDescriptor::Flags flags = FrameStateFlagForCall(node); Callable callable = Builtins::CallableFor(isolate(), Builtins::kCreateRegExpLiteral); - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector())); + node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector)); node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index())); node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant())); node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags())); @@ -812,14 +843,13 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - Node* limit = effect = graph()->NewNode( - machine()->Load(MachineType::Pointer()), - jsgraph()->ExternalConstant( - ExternalReference::address_of_stack_limit(isolate())), - jsgraph()->IntPtrConstant(0), effect, control); - Node* pointer = graph()->NewNode(machine()->LoadStackPointer()); + Node* limit = effect = + graph()->NewNode(machine()->Load(MachineType::Pointer()), + jsgraph()->ExternalConstant( + ExternalReference::address_of_jslimit(isolate())), + jsgraph()->IntPtrConstant(0), effect, control); - Node* check = graph()->NewNode(machine()->UintLessThan(), limit, pointer); + Node* check = graph()->NewNode(machine()->StackPointerGreaterThan(), limit); Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control); diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h index 2a395ca5e8..2a4ac808b1 100644 --- a/deps/v8/src/compiler/js-generic-lowering.h +++ b/deps/v8/src/compiler/js-generic-lowering.h @@ -23,7 +23,7 @@ class Linkage; // Lowers JS-level operators to runtime and IC calls in the "generic" case. class JSGenericLowering final : public AdvancedReducer { public: - JSGenericLowering(JSGraph* jsgraph, Editor* editor); + JSGenericLowering(JSGraph* jsgraph, Editor* editor, JSHeapBroker* broker); ~JSGenericLowering() final; const char* reducer_name() const override { return "JSGenericLowering"; } @@ -48,9 +48,11 @@ class JSGenericLowering final : public AdvancedReducer { Graph* graph() const; CommonOperatorBuilder* common() const; MachineOperatorBuilder* machine() const; + JSHeapBroker* broker() const { return broker_; } private: JSGraph* const jsgraph_; + JSHeapBroker* const broker_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc index 43a4beadee..beed7820b4 100644 --- a/deps/v8/src/compiler/js-graph.cc +++ b/deps/v8/src/compiler/js-graph.cc @@ -46,26 +46,6 @@ Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles, argv_mode, builtin_exit_frame)); } -Node* JSGraph::Constant(Handle<Object> value) { - // Dereference the handle to determine if a number constant or other - // canonicalized node can be used. - if (value->IsNumber()) { - return Constant(value->Number()); - } else if (value->IsUndefined(isolate())) { - return UndefinedConstant(); - } else if (value->IsTrue(isolate())) { - return TrueConstant(); - } else if (value->IsFalse(isolate())) { - return FalseConstant(); - } else if (value->IsNull(isolate())) { - return NullConstant(); - } else if (value->IsTheHole(isolate())) { - return TheHoleConstant(); - } else { - return HeapConstant(Handle<HeapObject>::cast(value)); - } -} - Node* JSGraph::Constant(const ObjectRef& ref) { if (ref.IsSmi()) return Constant(ref.AsSmi()); OddballType oddball_type = diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h index ec36c26034..83c81b1010 100644 --- a/deps/v8/src/compiler/js-graph.h +++ b/deps/v8/src/compiler/js-graph.h @@ -46,16 +46,12 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph { // Used for stubs and runtime functions with no context. (alias: SMI zero) Node* NoContextConstant() { return ZeroConstant(); } - // Creates a HeapConstant node, possibly canonicalized, and may access the - // heap to inspect the object. + // Creates a HeapConstant node, possibly canonicalized. Node* HeapConstant(Handle<HeapObject> value); // Creates a Constant node of the appropriate type for the given object. - // Accesses the heap to inspect the object and determine whether one of the + // Inspect the (serialized) object and determine whether one of the // canonicalized globals or a number constant should be returned. - Node* Constant(Handle<Object> value); - - // Like above, but doesn't access the heap directly. Node* Constant(const ObjectRef& value); // Creates a NumberConstant node, usually canonicalized. diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc index c79c793ae6..7466a80f85 100644 --- a/deps/v8/src/compiler/js-heap-broker.cc +++ b/deps/v8/src/compiler/js-heap-broker.cc @@ -16,7 +16,6 @@ #include "src/compiler/bytecode-analysis.h" #include "src/compiler/graph-reducer.h" #include "src/compiler/per-isolate-compiler-cache.h" -#include "src/compiler/vector-slot-pair.h" #include "src/init/bootstrapper.h" #include "src/objects/allocation-site-inl.h" #include "src/objects/api-callbacks.h" @@ -26,6 +25,7 @@ #include "src/objects/js-array-buffer-inl.h" #include "src/objects/js-array-inl.h" #include "src/objects/js-regexp-inl.h" +#include "src/objects/literal-objects-inl.h" #include "src/objects/module-inl.h" #include "src/objects/objects-inl.h" #include "src/objects/template-objects-inl.h" @@ -256,13 +256,14 @@ class JSObjectField { uint64_t number_bits_ = 0; }; -struct FieldIndexHasher { - size_t operator()(FieldIndex field_index) const { - return field_index.index(); - } +class JSReceiverData : public HeapObjectData { + public: + JSReceiverData(JSHeapBroker* broker, ObjectData** storage, + Handle<JSReceiver> object) + : HeapObjectData(broker, storage, object) {} }; -class JSObjectData : public HeapObjectData { +class JSObjectData : public JSReceiverData { public: JSObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<JSObject> object); @@ -277,16 +278,22 @@ class JSObjectData : public HeapObjectData { FixedArrayBaseData* elements() const; void SerializeObjectCreateMap(JSHeapBroker* broker); - MapData* object_create_map() const { // Can be nullptr. - CHECK(serialized_object_create_map_); + + MapData* object_create_map(JSHeapBroker* broker) const { // Can be nullptr. + if (!serialized_object_create_map_) { + DCHECK_NULL(object_create_map_); + TRACE_MISSING(broker, "object_create_map on " << this); + } return object_create_map_; } - ObjectData* GetOwnConstantElement(JSHeapBroker* broker, uint32_t index, - bool serialize); - ObjectData* GetOwnProperty(JSHeapBroker* broker, - Representation representation, - FieldIndex field_index, bool serialize); + ObjectData* GetOwnConstantElement( + JSHeapBroker* broker, uint32_t index, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); + ObjectData* GetOwnDataProperty( + JSHeapBroker* broker, Representation representation, + FieldIndex field_index, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); // This method is only used to assert our invariants. bool cow_or_empty_elements_tenured() const; @@ -316,7 +323,9 @@ class JSObjectData : public HeapObjectData { // (2) are known not to (possibly they don't exist at all). // In case (2), the second pair component is nullptr. // For simplicity, this may in theory overlap with inobject_fields_. - ZoneUnorderedMap<FieldIndex, ObjectData*, FieldIndexHasher> own_properties_; + // The keys of the map are the property_index() values of the + // respective property FieldIndex'es. + ZoneUnorderedMap<int, ObjectData*> own_properties_; }; void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) { @@ -353,24 +362,25 @@ base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker, return base::nullopt; } -ObjectRef GetOwnPropertyFromHeap(JSHeapBroker* broker, - Handle<JSObject> receiver, - Representation representation, - FieldIndex field_index) { +ObjectRef GetOwnDataPropertyFromHeap(JSHeapBroker* broker, + Handle<JSObject> receiver, + Representation representation, + FieldIndex field_index) { Handle<Object> constant = JSObject::FastPropertyAt(receiver, representation, field_index); return ObjectRef(broker, constant); } + } // namespace ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker, uint32_t index, - bool serialize) { + SerializationPolicy policy) { for (auto const& p : own_constant_elements_) { if (p.first == index) return p.second; } - if (!serialize) { + if (policy == SerializationPolicy::kAssumeSerialized) { TRACE_MISSING(broker, "knowledge about index " << index << " on " << this); return nullptr; } @@ -382,24 +392,24 @@ ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker, return result; } -ObjectData* JSObjectData::GetOwnProperty(JSHeapBroker* broker, - Representation representation, - FieldIndex field_index, - bool serialize) { - auto p = own_properties_.find(field_index); +ObjectData* JSObjectData::GetOwnDataProperty(JSHeapBroker* broker, + Representation representation, + FieldIndex field_index, + SerializationPolicy policy) { + auto p = own_properties_.find(field_index.property_index()); if (p != own_properties_.end()) return p->second; - if (!serialize) { + if (policy == SerializationPolicy::kAssumeSerialized) { TRACE_MISSING(broker, "knowledge about property with index " << field_index.property_index() << " on " << this); return nullptr; } - ObjectRef property = GetOwnPropertyFromHeap( + ObjectRef property = GetOwnDataPropertyFromHeap( broker, Handle<JSObject>::cast(object()), representation, field_index); ObjectData* result(property.data()); - own_properties_.insert(std::make_pair(field_index, result)); + own_properties_.insert(std::make_pair(field_index.property_index(), result)); return result; } @@ -446,6 +456,31 @@ void JSTypedArrayData::Serialize(JSHeapBroker* broker) { } } +class ArrayBoilerplateDescriptionData : public HeapObjectData { + public: + ArrayBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage, + Handle<ArrayBoilerplateDescription> object) + : HeapObjectData(broker, storage, object), + constants_elements_length_(object->constant_elements().length()) {} + + int constants_elements_length() const { return constants_elements_length_; } + + private: + int const constants_elements_length_; +}; + +class ObjectBoilerplateDescriptionData : public HeapObjectData { + public: + ObjectBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage, + Handle<ObjectBoilerplateDescription> object) + : HeapObjectData(broker, storage, object), size_(object->size()) {} + + int size() const { return size_; } + + private: + int const size_; +}; + class JSDataViewData : public JSObjectData { public: JSDataViewData(JSHeapBroker* broker, ObjectData** storage, @@ -465,6 +500,7 @@ class JSBoundFunctionData : public JSObjectData { Handle<JSBoundFunction> object); void Serialize(JSHeapBroker* broker); + bool serialized() const { return serialized_; } ObjectData* bound_target_function() const { return bound_target_function_; } ObjectData* bound_this() const { return bound_this_; } @@ -557,18 +593,6 @@ class HeapNumberData : public HeapObjectData { double const value_; }; -class MutableHeapNumberData : public HeapObjectData { - public: - MutableHeapNumberData(JSHeapBroker* broker, ObjectData** storage, - Handle<MutableHeapNumber> object) - : HeapObjectData(broker, storage, object), value_(object->value()) {} - - double value() const { return value_; } - - private: - double const value_; -}; - class ContextData : public HeapObjectData { public: ContextData(JSHeapBroker* broker, ObjectData** storage, @@ -576,12 +600,15 @@ class ContextData : public HeapObjectData { // {previous} will return the closest valid context possible to desired // {depth}, decrementing {depth} for each previous link successfully followed. - // If {serialize} is true, it will serialize contexts along the way. - ContextData* previous(JSHeapBroker* broker, size_t* depth, bool serialize); + ContextData* previous( + JSHeapBroker* broker, size_t* depth, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); - // Returns nullptr if the slot index isn't valid or wasn't serialized - // (unless {serialize} is true). - ObjectData* GetSlot(JSHeapBroker* broker, int index, bool serialize); + // Returns nullptr if the slot index isn't valid or wasn't serialized, + // unless {policy} is {kSerializeIfNeeded}. + ObjectData* GetSlot( + JSHeapBroker* broker, int index, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); private: ZoneMap<int, ObjectData*> slots_; @@ -593,10 +620,11 @@ ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage, : HeapObjectData(broker, storage, object), slots_(broker->zone()) {} ContextData* ContextData::previous(JSHeapBroker* broker, size_t* depth, - bool serialize) { + SerializationPolicy policy) { if (*depth == 0) return this; - if (serialize && previous_ == nullptr) { + if (policy == SerializationPolicy::kSerializeIfNeeded && + previous_ == nullptr) { TraceScope tracer(broker, this, "ContextData::previous"); Handle<Context> context = Handle<Context>::cast(object()); Object prev = context->unchecked_previous(); @@ -607,20 +635,20 @@ ContextData* ContextData::previous(JSHeapBroker* broker, size_t* depth, if (previous_ != nullptr) { *depth = *depth - 1; - return previous_->previous(broker, depth, serialize); + return previous_->previous(broker, depth, policy); } return this; } ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index, - bool serialize) { + SerializationPolicy policy) { CHECK_GE(index, 0); auto search = slots_.find(index); if (search != slots_.end()) { return search->second; } - if (serialize) { + if (policy == SerializationPolicy::kSerializeIfNeeded) { Handle<Context> context = Handle<Context>::cast(object()); if (index < context->length()) { TraceScope tracer(broker, this, "ContextData::GetSlot"); @@ -680,8 +708,9 @@ class StringData : public NameData { bool is_external_string() const { return is_external_string_; } bool is_seq_string() const { return is_seq_string_; } - StringData* GetCharAsString(JSHeapBroker* broker, uint32_t index, - bool serialize); + StringData* GetCharAsString( + JSHeapBroker* broker, uint32_t index, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); private: int const length_; @@ -730,14 +759,14 @@ class InternalizedStringData : public StringData { }; StringData* StringData::GetCharAsString(JSHeapBroker* broker, uint32_t index, - bool serialize) { + SerializationPolicy policy) { if (index >= static_cast<uint32_t>(length())) return nullptr; for (auto const& p : chars_as_strings_) { if (p.first == index) return p.second; } - if (!serialize) { + if (policy == SerializationPolicy::kAssumeSerialized) { TRACE_MISSING(broker, "knowledge about index " << index << " on " << this); return nullptr; } @@ -842,6 +871,12 @@ bool IsInlinableFastLiteral(Handle<JSObject> boilerplate) { } // namespace +class AccessorInfoData : public HeapObjectData { + public: + AccessorInfoData(JSHeapBroker* broker, ObjectData** storage, + Handle<AccessorInfo> object); +}; + class AllocationSiteData : public HeapObjectData { public: AllocationSiteData(JSHeapBroker* broker, ObjectData** storage, @@ -891,6 +926,7 @@ class ScriptContextTableData : public HeapObjectData { struct PropertyDescriptor { NameData* key = nullptr; + ObjectData* value = nullptr; PropertyDetails details = PropertyDetails::Empty(); FieldIndex field_index; MapData* field_owner = nullptr; @@ -926,8 +962,11 @@ class MapData : public HeapObjectData { bool supports_fast_array_resize() const { return supports_fast_array_resize_; } - bool IsMapOfCurrentGlobalProxy() const { - return is_map_of_current_global_proxy_; + bool IsMapOfTargetGlobalProxy() const { + return is_map_of_target_global_proxy_; + } + bool is_abandoned_prototype_map() const { + return is_abandoned_prototype_map_; } // Extra information. @@ -942,10 +981,14 @@ class MapData : public HeapObjectData { // on field owner(s). void SerializeOwnDescriptor(JSHeapBroker* broker, int descriptor_index); void SerializeOwnDescriptors(JSHeapBroker* broker); + ObjectData* GetStrongValue(int descriptor_index) const; DescriptorArrayData* instance_descriptors() const { return instance_descriptors_; } + void SerializeRootMap(JSHeapBroker* broker); + MapData* FindRootMap() const; + void SerializeConstructor(JSHeapBroker* broker); ObjectData* GetConstructor() const { CHECK(serialized_constructor_); @@ -984,7 +1027,8 @@ class MapData : public HeapObjectData { int const unused_property_fields_; bool const supports_fast_array_iteration_; bool const supports_fast_array_resize_; - bool const is_map_of_current_global_proxy_; + bool const is_map_of_target_global_proxy_; + bool const is_abandoned_prototype_map_; bool serialized_elements_kind_generalizations_ = false; ZoneVector<MapData*> elements_kind_generalizations_; @@ -1001,11 +1045,18 @@ class MapData : public HeapObjectData { bool serialized_prototype_ = false; ObjectData* prototype_ = nullptr; + bool serialized_root_map_ = false; + MapData* root_map_ = nullptr; + bool serialized_for_element_load_ = false; bool serialized_for_element_store_ = false; }; +AccessorInfoData::AccessorInfoData(JSHeapBroker* broker, ObjectData** storage, + Handle<AccessorInfo> object) + : HeapObjectData(broker, storage, object) {} + AllocationSiteData::AllocationSiteData(JSHeapBroker* broker, ObjectData** storage, Handle<AllocationSite> object) @@ -1103,8 +1154,9 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object) SupportsFastArrayIteration(broker->isolate(), object)), supports_fast_array_resize_( SupportsFastArrayResize(broker->isolate(), object)), - is_map_of_current_global_proxy_( - object->IsMapOfGlobalProxy(broker->isolate()->native_context())), + is_map_of_target_global_proxy_( + object->IsMapOfGlobalProxy(broker->target_native_context().object())), + is_abandoned_prototype_map_(object->is_abandoned_prototype_map()), elements_kind_generalizations_(broker->zone()) {} JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage, @@ -1210,28 +1262,52 @@ FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage, class FeedbackVectorData : public HeapObjectData { public: - const ZoneVector<ObjectData*>& feedback() { return feedback_; } - FeedbackVectorData(JSHeapBroker* broker, ObjectData** storage, Handle<FeedbackVector> object); - void SerializeSlots(JSHeapBroker* broker); + double invocation_count() const { return invocation_count_; } + + void Serialize(JSHeapBroker* broker); + const ZoneVector<ObjectData*>& feedback() { return feedback_; } + FeedbackCellData* GetClosureFeedbackCell(JSHeapBroker* broker, + int index) const; private: + double const invocation_count_; + bool serialized_ = false; ZoneVector<ObjectData*> feedback_; + ZoneVector<ObjectData*> closure_feedback_cell_array_; }; FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker, ObjectData** storage, Handle<FeedbackVector> object) - : HeapObjectData(broker, storage, object), feedback_(broker->zone()) {} + : HeapObjectData(broker, storage, object), + invocation_count_(object->invocation_count()), + feedback_(broker->zone()), + closure_feedback_cell_array_(broker->zone()) {} + +FeedbackCellData* FeedbackVectorData::GetClosureFeedbackCell( + JSHeapBroker* broker, int index) const { + CHECK_GE(index, 0); + + size_t cell_array_size = closure_feedback_cell_array_.size(); + if (!serialized_) { + DCHECK_EQ(cell_array_size, 0); + TRACE_BROKER_MISSING(broker, + " closure feedback cell array for vector " << this); + return nullptr; + } + CHECK_LT(index, cell_array_size); + return closure_feedback_cell_array_[index]->AsFeedbackCell(); +} -void FeedbackVectorData::SerializeSlots(JSHeapBroker* broker) { +void FeedbackVectorData::Serialize(JSHeapBroker* broker) { if (serialized_) return; serialized_ = true; - TraceScope tracer(broker, this, "FeedbackVectorData::SerializeSlots"); + TraceScope tracer(broker, this, "FeedbackVectorData::Serialize"); Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object()); DCHECK(feedback_.empty()); feedback_.reserve(vector->length()); @@ -1252,6 +1328,16 @@ void FeedbackVectorData::SerializeSlots(JSHeapBroker* broker) { } DCHECK_EQ(vector->length(), feedback_.size()); TRACE(broker, "Copied " << feedback_.size() << " slots"); + + DCHECK(closure_feedback_cell_array_.empty()); + int length = vector->closure_feedback_cell_array().length(); + closure_feedback_cell_array_.reserve(length); + for (int i = 0; i < length; ++i) { + Handle<FeedbackCell> cell = vector->GetClosureFeedbackCell(i); + ObjectData* cell_data = broker->GetOrCreateData(cell); + closure_feedback_cell_array_.push_back(cell_data); + } + TRACE(broker, "Copied " << length << " feedback cells"); } class FixedArrayBaseData : public HeapObjectData { @@ -1300,21 +1386,26 @@ void JSBoundFunctionData::Serialize(JSHeapBroker* broker) { Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object()); DCHECK_NULL(bound_target_function_); - DCHECK_NULL(bound_this_); - DCHECK_NULL(bound_arguments_); - bound_target_function_ = broker->GetOrCreateData(function->bound_target_function()); - bound_this_ = broker->GetOrCreateData(function->bound_this()); + if (bound_target_function_->IsJSBoundFunction()) { + bound_target_function_->AsJSBoundFunction()->Serialize(broker); + } else if (bound_target_function_->IsJSFunction()) { + bound_target_function_->AsJSFunction()->Serialize(broker); + } + + DCHECK_NULL(bound_arguments_); bound_arguments_ = broker->GetOrCreateData(function->bound_arguments())->AsFixedArray(); - bound_arguments_->SerializeContents(broker); + + DCHECK_NULL(bound_this_); + bound_this_ = broker->GetOrCreateData(function->bound_this()); } JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<JSObject> object) - : HeapObjectData(broker, storage, object), + : JSReceiverData(broker, storage, object), inobject_fields_(broker->zone()), own_constant_elements_(broker->zone()), own_properties_(broker->zone()) {} @@ -1494,8 +1585,9 @@ class JSArrayData : public JSObjectData { void Serialize(JSHeapBroker* broker); ObjectData* length() const { return length_; } - ObjectData* GetOwnElement(JSHeapBroker* broker, uint32_t index, - bool serialize); + ObjectData* GetOwnElement( + JSHeapBroker* broker, uint32_t index, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); private: bool serialized_ = false; @@ -1524,12 +1616,12 @@ void JSArrayData::Serialize(JSHeapBroker* broker) { } ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index, - bool serialize) { + SerializationPolicy policy) { for (auto const& p : own_elements_) { if (p.first == index) return p.second; } - if (!serialize) { + if (policy == SerializationPolicy::kAssumeSerialized) { TRACE_MISSING(broker, "knowledge about index " << index << " on " << this); return nullptr; } @@ -1654,7 +1746,7 @@ class SourceTextModuleData : public HeapObjectData { Handle<SourceTextModule> object); void Serialize(JSHeapBroker* broker); - CellData* GetCell(int cell_index) const; + CellData* GetCell(JSHeapBroker* broker, int cell_index) const; private: bool serialized_ = false; @@ -1669,8 +1761,14 @@ SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker, imports_(broker->zone()), exports_(broker->zone()) {} -CellData* SourceTextModuleData::GetCell(int cell_index) const { - CHECK(serialized_); +CellData* SourceTextModuleData::GetCell(JSHeapBroker* broker, + int cell_index) const { + if (!serialized_) { + DCHECK(imports_.empty()); + TRACE_BROKER_MISSING(broker, + "module cell " << cell_index << " on " << this); + return nullptr; + } CellData* cell; switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) { case SourceTextModuleDescriptor::kImport: @@ -1741,13 +1839,25 @@ void CellData::Serialize(JSHeapBroker* broker) { value_ = broker->GetOrCreateData(cell->value()); } +class JSGlobalObjectData : public JSObjectData { + public: + JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage, + Handle<JSGlobalObject> object); +}; + +JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker, + ObjectData** storage, + Handle<JSGlobalObject> object) + : JSObjectData(broker, storage, object) {} + class JSGlobalProxyData : public JSObjectData { public: JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage, Handle<JSGlobalProxy> object); - PropertyCellData* GetPropertyCell(JSHeapBroker* broker, NameData* name, - bool serialize); + PropertyCellData* GetPropertyCell( + JSHeapBroker* broker, NameData* name, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); private: // Properties that either @@ -1764,10 +1874,11 @@ JSGlobalProxyData::JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage, namespace { base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker, Handle<Name> name) { - LookupIterator it(broker->isolate(), - handle(broker->native_context().object()->global_object(), - broker->isolate()), - name, LookupIterator::OWN); + LookupIterator it( + broker->isolate(), + handle(broker->target_native_context().object()->global_object(), + broker->isolate()), + name, LookupIterator::OWN); it.TryLookupCachedProperty(); if (it.state() == LookupIterator::DATA && it.GetHolder<JSObject>()->IsJSGlobalObject()) { @@ -1777,15 +1888,14 @@ base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker, } } // namespace -PropertyCellData* JSGlobalProxyData::GetPropertyCell(JSHeapBroker* broker, - NameData* name, - bool serialize) { +PropertyCellData* JSGlobalProxyData::GetPropertyCell( + JSHeapBroker* broker, NameData* name, SerializationPolicy policy) { CHECK_NOT_NULL(name); for (auto const& p : properties_) { if (p.first == name) return p.second; } - if (!serialize) { + if (policy == SerializationPolicy::kAssumeSerialized) { TRACE_MISSING(broker, "knowledge about global property " << name); return nullptr; } @@ -1896,6 +2006,13 @@ void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) { } } +ObjectData* MapData::GetStrongValue(int descriptor_index) const { + auto data = instance_descriptors_->contents().find(descriptor_index); + if (data == instance_descriptors_->contents().end()) return nullptr; + + return data->second.value; +} + void MapData::SerializeOwnDescriptor(JSHeapBroker* broker, int descriptor_index) { TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor"); @@ -1907,7 +2024,7 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker, } ZoneMap<int, PropertyDescriptor>& contents = - instance_descriptors_->contents(); + instance_descriptors()->contents(); CHECK_LT(descriptor_index, map->NumberOfOwnDescriptors()); if (contents.find(descriptor_index) != contents.end()) return; @@ -1919,6 +2036,11 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker, PropertyDescriptor d; d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index))->AsName(); + MaybeObject value = descriptors->GetValue(descriptor_index); + HeapObject obj; + if (value.GetHeapObjectIfStrong(&obj)) { + d.value = broker->GetOrCreateData(handle(obj, broker->isolate())); + } d.details = descriptors->GetDetails(descriptor_index); if (d.details.location() == kField) { d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index); @@ -1941,6 +2063,19 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker, << contents.size() << " total)"); } +void MapData::SerializeRootMap(JSHeapBroker* broker) { + if (serialized_root_map_) return; + serialized_root_map_ = true; + + TraceScope tracer(broker, this, "MapData::SerializeRootMap"); + Handle<Map> map = Handle<Map>::cast(object()); + DCHECK_NULL(root_map_); + root_map_ = + broker->GetOrCreateData(map->FindRootMap(broker->isolate()))->AsMap(); +} + +MapData* MapData::FindRootMap() const { return root_map_; } + void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, int depth) { if (serialized_as_boilerplate_) return; @@ -2029,15 +2164,16 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, } else { Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate); - // In case of unboxed double fields we use a sentinel NaN value to mark + // In case of double fields we use a sentinel NaN value to mark // uninitialized fields. A boilerplate value with such a field may migrate - // from its unboxed double to a tagged representation. In the process the - // raw double is converted to a heap number. The sentinel value carries no - // special meaning when it occurs in a heap number, so we would like to - // recover the uninitialized value. - // We check for the sentinel here, specifically, since migrations might - // have been triggered as part of boilerplate serialization. - if (value->IsHeapNumber() && + // from its double to a tagged representation. If the double is unboxed, + // the raw double is converted to a heap number, otherwise the (boxed) + // double ceases to be mutable, and becomes a normal heap number. The + // sentinel value carries no special meaning when it occurs in a heap + // number, so we would like to recover the uninitialized value. We check + // for the sentinel here, specifically, since migrations might have been + // triggered as part of boilerplate serialization. + if (!details.representation().IsDouble() && value->IsHeapNumber() && HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) { value = isolate->factory()->uninitialized_value(); } @@ -2079,7 +2215,8 @@ bool ObjectRef::equals(const ObjectRef& other) const { Isolate* ObjectRef::isolate() const { return broker()->isolate(); } -ContextRef ContextRef::previous(size_t* depth, bool serialize) const { +ContextRef ContextRef::previous(size_t* depth, + SerializationPolicy policy) const { DCHECK_NOT_NULL(depth); if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; @@ -2092,10 +2229,11 @@ ContextRef ContextRef::previous(size_t* depth, bool serialize) const { return ContextRef(broker(), handle(current, broker()->isolate())); } ContextData* current = this->data()->AsContext(); - return ContextRef(broker(), current->previous(broker(), depth, serialize)); + return ContextRef(broker(), current->previous(broker(), depth, policy)); } -base::Optional<ObjectRef> ContextRef::get(int index, bool serialize) const { +base::Optional<ObjectRef> ContextRef::get(int index, + SerializationPolicy policy) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference handle_dereference; @@ -2103,7 +2241,7 @@ base::Optional<ObjectRef> ContextRef::get(int index, bool serialize) const { return ObjectRef(broker(), value); } ObjectData* optional_slot = - data()->AsContext()->GetSlot(broker(), index, serialize); + data()->AsContext()->GetSlot(broker(), index, policy); if (optional_slot != nullptr) { return ObjectRef(broker(), optional_slot); } @@ -2121,13 +2259,13 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone, tracing_enabled_(tracing_enabled), feedback_(zone()), bytecode_analyses_(zone()), - ais_for_loading_then_(zone()), - ais_for_loading_exec_(zone()) { - // Note that this initialization of the refs_ pointer with the minimal - // initial capacity is redundant in the normal use case (concurrent - // compilation enabled, standard objects to be serialized), as the map - // is going to be replaced immediatelly with a larger capacity one. - // It doesn't seem to affect the performance in a noticeable way though. + property_access_infos_(zone()), + typed_array_string_tags_(zone()) { + // Note that this initialization of {refs_} with the minimal initial capacity + // is redundant in the normal use case (concurrent compilation enabled, + // standard objects to be serialized), as the map is going to be replaced + // immediately with a larger-capacity one. It doesn't seem to affect the + // performance in a noticeable way though. TRACE(this, "Constructing heap broker"); } @@ -2136,13 +2274,6 @@ std::ostream& JSHeapBroker::Trace() { << std::string(trace_indentation_ * 2, ' '); } -void JSHeapBroker::StartSerializing() { - CHECK_EQ(mode_, kDisabled); - TRACE(this, "Starting serialization"); - mode_ = kSerializing; - refs_->Clear(); -} - void JSHeapBroker::StopSerializing() { CHECK_EQ(mode_, kSerializing); TRACE(this, "Stopping serialization"); @@ -2157,39 +2288,54 @@ void JSHeapBroker::Retire() { bool JSHeapBroker::SerializingAllowed() const { return mode() == kSerializing; } -void JSHeapBroker::SetNativeContextRef() { - native_context_ = NativeContextRef(this, isolate()->native_context()); +void JSHeapBroker::SetTargetNativeContextRef( + Handle<NativeContext> native_context) { + // The MapData constructor uses {target_native_context_}. This creates a + // benign cycle that we break by setting {target_native_context_} right before + // starting to serialize (thus creating dummy data), and then again properly + // right after. + DCHECK((mode() == kDisabled && !target_native_context_.has_value()) || + (mode() == kSerializing && + target_native_context_->object().equals(native_context) && + target_native_context_->data_->kind() == kUnserializedHeapObject)); + target_native_context_ = NativeContextRef(this, native_context); } bool IsShareable(Handle<Object> object, Isolate* isolate) { - Builtins* const b = isolate->builtins(); - int index; RootIndex root_index; - return (object->IsHeapObject() && - b->IsBuiltinHandle(Handle<HeapObject>::cast(object), &index)) || + bool is_builtin_handle = + object->IsHeapObject() && isolate->builtins()->IsBuiltinHandle( + Handle<HeapObject>::cast(object), &index); + return is_builtin_handle || isolate->roots_table().IsRootHandle(object, &root_index); } -void JSHeapBroker::SerializeShareableObjects() { +void JSHeapBroker::InitializeRefsMap() { + TraceScope tracer(this, "JSHeapBroker::InitializeRefsMap"); + + DCHECK_NULL(compiler_cache_); PerIsolateCompilerCache::Setup(isolate()); compiler_cache_ = isolate()->compiler_cache(); if (compiler_cache_->HasSnapshot()) { - RefsMap* snapshot = compiler_cache_->GetSnapshot(); - - refs_ = new (zone()) RefsMap(snapshot, zone()); + TRACE(this, "Importing existing RefsMap snapshot"); + DCHECK_NULL(refs_); + refs_ = new (zone()) RefsMap(compiler_cache_->GetSnapshot(), zone()); return; } - TraceScope tracer( - this, "JSHeapBroker::SerializeShareableObjects (building snapshot)"); - + TRACE(this, "Building RefsMap snapshot"); + DCHECK_NULL(refs_); refs_ = new (zone()) RefsMap(kInitialRefsBucketCount, AddressMatcher(), zone()); + // Temporarily use the "compiler zone" for serialization, such that the + // serialized data survives this compilation. + DCHECK_EQ(current_zone_, broker_zone_); current_zone_ = compiler_cache_->zone(); + // Serialize various builtins. Builtins* const b = isolate()->builtins(); { Builtins::Name builtins[] = { @@ -2199,17 +2345,28 @@ void JSHeapBroker::SerializeShareableObjects() { Builtins::kAllocateRegularInOldGeneration, Builtins::kArgumentsAdaptorTrampoline, Builtins::kArrayConstructorImpl, + Builtins::kArrayIncludesHoleyDoubles, + Builtins::kArrayIncludesPackedDoubles, + Builtins::kArrayIncludesSmiOrObject, + Builtins::kArrayIndexOfHoleyDoubles, + Builtins::kArrayIndexOfPackedDoubles, + Builtins::kArrayIndexOfSmiOrObject, + Builtins::kCallApiCallback, Builtins::kCallFunctionForwardVarargs, Builtins::kCallFunction_ReceiverIsAny, Builtins::kCallFunction_ReceiverIsNotNullOrUndefined, Builtins::kCallFunction_ReceiverIsNullOrUndefined, + Builtins::kCloneFastJSArray, + Builtins::kCompileLazy, Builtins::kConstructFunctionForwardVarargs, Builtins::kForInFilter, + Builtins::kGetProperty, + Builtins::kIncBlockCounter, Builtins::kJSBuiltinsConstructStub, Builtins::kJSConstructStubGeneric, Builtins::kStringAdd_CheckNone, - Builtins::kStringAdd_ConvertLeft, - Builtins::kStringAdd_ConvertRight, + Builtins::kStringAddConvertLeft, + Builtins::kStringAddConvertRight, Builtins::kToNumber, Builtins::kToObject, }; @@ -2223,12 +2380,13 @@ void JSHeapBroker::SerializeShareableObjects() { } } + // TODO(mslekova): Serialize root objects (from factory). + + // Verify. for (RefsMap::Entry* p = refs_->Start(); p != nullptr; p = refs_->Next(p)) { CHECK(IsShareable(p->value->object(), isolate())); } - // TODO(mslekova): - // Serialize root objects (from factory). compiler_cache()->SetSnapshot(refs_); current_zone_ = broker_zone_; } @@ -2252,6 +2410,25 @@ void JSHeapBroker::CollectArrayAndObjectPrototypes() { CHECK(!array_and_object_prototypes_.empty()); } +void JSHeapBroker::SerializeTypedArrayStringTags() { +#define TYPED_ARRAY_STRING_TAG(Type, type, TYPE, ctype) \ + do { \ + ObjectData* data = GetOrCreateData( \ + isolate()->factory()->InternalizeUtf8String(#Type "Array")); \ + typed_array_string_tags_.push_back(data); \ + } while (false); + + TYPED_ARRAYS(TYPED_ARRAY_STRING_TAG) +#undef TYPED_ARRAY_STRING_TAG +} + +StringRef JSHeapBroker::GetTypedArrayStringTag(ElementsKind kind) { + DCHECK(IsTypedArrayElementsKind(kind)); + size_t idx = kind - FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND; + CHECK_LT(idx, typed_array_string_tags_.size()); + return StringRef(this, typed_array_string_tags_[idx]); +} + bool JSHeapBroker::IsArrayOrObjectPrototype(const JSObjectRef& object) const { if (mode() == kDisabled) { return isolate()->IsInAnyContext(*object.object(), @@ -2264,22 +2441,29 @@ bool JSHeapBroker::IsArrayOrObjectPrototype(const JSObjectRef& object) const { array_and_object_prototypes_.end(); } -void JSHeapBroker::SerializeStandardObjects() { - if (mode() == kDisabled) return; - CHECK_EQ(mode(), kSerializing); +void JSHeapBroker::InitializeAndStartSerializing( + Handle<NativeContext> native_context) { + TraceScope tracer(this, "JSHeapBroker::InitializeAndStartSerializing"); - SerializeShareableObjects(); + CHECK_EQ(mode_, kDisabled); + mode_ = kSerializing; - TraceScope tracer(this, "JSHeapBroker::SerializeStandardObjects"); + // Throw away the dummy data that we created while disabled. + refs_->Clear(); + refs_ = nullptr; - CollectArrayAndObjectPrototypes(); + InitializeRefsMap(); - SetNativeContextRef(); - native_context().Serialize(); + SetTargetNativeContextRef(native_context); + target_native_context().Serialize(); - Factory* const f = isolate()->factory(); + CollectArrayAndObjectPrototypes(); + SerializeTypedArrayStringTags(); - // Maps, strings, oddballs + // Serialize standard objects. + // + // - Maps, strings, oddballs + Factory* const f = isolate()->factory(); GetOrCreateData(f->arguments_marker_map()); GetOrCreateData(f->bigint_string()); GetOrCreateData(f->block_context_map()); @@ -2300,7 +2484,6 @@ void JSHeapBroker::SerializeStandardObjects() { GetOrCreateData(f->length_string()); GetOrCreateData(f->many_closures_cell_map()); GetOrCreateData(f->minus_zero_value()); - GetOrCreateData(f->mutable_heap_number_map()); GetOrCreateData(f->name_dictionary_map()); GetOrCreateData(f->NaN_string()); GetOrCreateData(f->null_map()); @@ -2312,6 +2495,8 @@ void JSHeapBroker::SerializeStandardObjects() { GetOrCreateData(f->optimized_out()); GetOrCreateData(f->optimized_out_map()); GetOrCreateData(f->property_array_map()); + GetOrCreateData(f->ReflectHas_string()); + GetOrCreateData(f->ReflectGet_string()); GetOrCreateData(f->sloppy_arguments_elements_map()); GetOrCreateData(f->stale_register()); GetOrCreateData(f->stale_register_map()); @@ -2328,8 +2513,7 @@ void JSHeapBroker::SerializeStandardObjects() { GetOrCreateData(f->uninitialized_map()); GetOrCreateData(f->with_context_map()); GetOrCreateData(f->zero_string()); - - // Protector cells + // - Cells GetOrCreateData(f->array_buffer_detaching_protector()) ->AsPropertyCell() ->Serialize(this); @@ -2340,6 +2524,7 @@ void JSHeapBroker::SerializeStandardObjects() { GetOrCreateData(f->array_species_protector()) ->AsPropertyCell() ->Serialize(this); + GetOrCreateData(f->many_closures_cell())->AsFeedbackCell(); GetOrCreateData(f->no_elements_protector()) ->AsPropertyCell() ->Serialize(this); @@ -2353,8 +2538,7 @@ void JSHeapBroker::SerializeStandardObjects() { ->AsPropertyCell() ->Serialize(this); GetOrCreateData(f->string_length_protector())->AsCell()->Serialize(this); - - // CEntry stub + // - CEntry stub GetOrCreateData( CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, true)); @@ -2425,7 +2609,7 @@ base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const { return base::Optional<MapRef>(); } } - MapData* map_data = data()->AsJSObject()->object_create_map(); + MapData* map_data = data()->AsJSObject()->object_create_map(broker()); return map_data != nullptr ? MapRef(broker(), map_data) : base::Optional<MapRef>(); } @@ -2535,13 +2719,14 @@ bool MapRef::supports_fast_array_resize() const { return data()->AsMap()->supports_fast_array_resize(); } -bool MapRef::IsMapOfCurrentGlobalProxy() const { +bool MapRef::IsMapOfTargetGlobalProxy() const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleDereference allow_handle_dereference; AllowHandleAllocation handle_allocation; - return object()->IsMapOfGlobalProxy(broker()->isolate()->native_context()); + return object()->IsMapOfGlobalProxy( + broker()->target_native_context().object()); } - return data()->AsMap()->IsMapOfCurrentGlobalProxy(); + return data()->AsMap()->IsMapOfTargetGlobalProxy(); } int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const { @@ -2612,6 +2797,18 @@ ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const { return ObjectRef(broker(), data()->AsFeedbackVector()->feedback().at(i)); } +FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference handle_dereference; + return FeedbackCellRef(broker(), object()->GetClosureFeedbackCell(index)); + } + + return FeedbackCellRef( + broker(), + data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index)); +} + double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleDereference handle_dereference; @@ -2789,6 +2986,22 @@ base::Optional<double> StringRef::ToNumber() { return data()->AsString()->to_number(); } +int ArrayBoilerplateDescriptionRef::constants_elements_length() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return object()->constant_elements().length(); + } + return data()->AsArrayBoilerplateDescription()->constants_elements_length(); +} + +int ObjectBoilerplateDescriptionRef::size() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return object()->size(); + } + return data()->AsObjectBoilerplateDescription()->size(); +} + ObjectRef FixedArrayRef::get(int i) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; @@ -2954,11 +3167,13 @@ BIMODAL_ACCESSOR_C(BytecodeArray, interpreter::Register, BIMODAL_ACCESSOR(Cell, Object, value) +BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count) + BIMODAL_ACCESSOR(HeapObject, Map, map) BIMODAL_ACCESSOR(JSArray, Object, length) -BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_target_function) +BIMODAL_ACCESSOR(JSBoundFunction, JSReceiver, bound_target_function) BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_this) BIMODAL_ACCESSOR(JSBoundFunction, FixedArray, bound_arguments) @@ -3003,6 +3218,7 @@ BIMODAL_ACCESSOR(Map, HeapObject, prototype) BIMODAL_ACCESSOR_C(Map, InstanceType, instance_type) BIMODAL_ACCESSOR(Map, Object, GetConstructor) BIMODAL_ACCESSOR(Map, HeapObject, GetBackPointer) +BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map) #define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \ BIMODAL_ACCESSOR(NativeContext, type, name) @@ -3047,7 +3263,7 @@ bool FunctionTemplateInfoRef::has_call_code() const { BIMODAL_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver) HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType( - MapRef receiver_map, bool serialize) { + MapRef receiver_map, SerializationPolicy policy) { const HolderLookupResult not_found; if (broker()->mode() == JSHeapBroker::kDisabled) { @@ -3083,7 +3299,7 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType( if (lookup_it != fti_data->known_receivers().cend()) { return lookup_it->second; } - if (!serialize) { + if (policy == SerializationPolicy::kAssumeSerialized) { TRACE_BROKER_MISSING(broker(), "holder for receiver with map " << receiver_map); return not_found; @@ -3129,6 +3345,37 @@ BIMODAL_ACCESSOR_C(String, int, length) BIMODAL_ACCESSOR(FeedbackCell, HeapObject, value) +ObjectRef MapRef::GetStrongValue(int descriptor_index) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return ObjectRef(broker(), + handle(object()->instance_descriptors().GetStrongValue( + descriptor_index), + broker()->isolate())); + } + return ObjectRef(broker(), data()->AsMap()->GetStrongValue(descriptor_index)); +} + +void MapRef::SerializeRootMap() { + if (broker()->mode() == JSHeapBroker::kDisabled) return; + CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); + data()->AsMap()->SerializeRootMap(broker()); +} + +base::Optional<MapRef> MapRef::FindRootMap() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return MapRef(broker(), handle(object()->FindRootMap(broker()->isolate()), + broker()->isolate())); + } + MapData* map_data = data()->AsMap()->FindRootMap(); + if (map_data) { + return MapRef(broker(), map_data); + } + TRACE_BROKER_MISSING(broker(), "root map for object " << *this); + return base::nullopt; +} + void* JSTypedArrayRef::external_pointer() const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleDereference allow_handle_dereference; @@ -3297,7 +3544,7 @@ Maybe<double> ObjectRef::OddballToNumber() const { } base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement( - uint32_t index, bool serialize) const { + uint32_t index, SerializationPolicy policy) const { if (broker()->mode() == JSHeapBroker::kDisabled) { return (IsJSObject() || IsString()) ? GetOwnElementFromHeap(broker(), object(), index, true) @@ -3306,35 +3553,36 @@ base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement( ObjectData* element = nullptr; if (IsJSObject()) { element = - data()->AsJSObject()->GetOwnConstantElement(broker(), index, serialize); + data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy); } else if (IsString()) { - element = data()->AsString()->GetCharAsString(broker(), index, serialize); + element = data()->AsString()->GetCharAsString(broker(), index, policy); } if (element == nullptr) return base::nullopt; return ObjectRef(broker(), element); } -base::Optional<ObjectRef> JSObjectRef::GetOwnProperty( +base::Optional<ObjectRef> JSObjectRef::GetOwnDataProperty( Representation field_representation, FieldIndex index, - bool serialize) const { + SerializationPolicy policy) const { if (broker()->mode() == JSHeapBroker::kDisabled) { - return GetOwnPropertyFromHeap(broker(), Handle<JSObject>::cast(object()), - field_representation, index); + return GetOwnDataPropertyFromHeap(broker(), + Handle<JSObject>::cast(object()), + field_representation, index); } - ObjectData* property = data()->AsJSObject()->GetOwnProperty( - broker(), field_representation, index, serialize); + ObjectData* property = data()->AsJSObject()->GetOwnDataProperty( + broker(), field_representation, index, policy); if (property == nullptr) return base::nullopt; return ObjectRef(broker(), property); } -base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(uint32_t index, - bool serialize) const { +base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement( + uint32_t index, SerializationPolicy policy) const { if (broker()->mode() == JSHeapBroker::kDisabled) { if (!object()->elements().IsCowArray()) return base::nullopt; return GetOwnElementFromHeap(broker(), object(), index, false); } - if (serialize) { + if (policy == SerializationPolicy::kSerializeIfNeeded) { data()->AsJSObject()->SerializeElements(broker()); } else if (!data()->AsJSObject()->serialized_elements()) { TRACE(broker(), "'elements' on " << this); @@ -3343,7 +3591,7 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(uint32_t index, if (!elements().map().IsFixedCowArrayMap()) return base::nullopt; ObjectData* element = - data()->AsJSArray()->GetOwnElement(broker(), index, serialize); + data()->AsJSArray()->GetOwnElement(broker(), index, policy); if (element == nullptr) return base::nullopt; return ObjectRef(broker(), element); } @@ -3353,27 +3601,25 @@ double HeapNumberRef::value() const { return data()->AsHeapNumber()->value(); } -double MutableHeapNumberRef::value() const { - IF_BROKER_DISABLED_ACCESS_HANDLE_C(MutableHeapNumber, value); - return data()->AsMutableHeapNumber()->value(); -} - uint64_t BigIntRef::AsUint64() const { IF_BROKER_DISABLED_ACCESS_HANDLE_C(BigInt, AsUint64); return data()->AsBigInt()->AsUint64(); } -CellRef SourceTextModuleRef::GetCell(int cell_index) const { +base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference allow_handle_dereference; return CellRef(broker(), handle(object()->GetCell(cell_index), broker()->isolate())); } - return CellRef(broker(), data()->AsSourceTextModule()->GetCell(cell_index)); + CellData* cell = data()->AsSourceTextModule()->GetCell(broker(), cell_index); + if (cell == nullptr) return base::nullopt; + return CellRef(broker(), cell); } -ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object) +ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object, + bool check_type) : broker_(broker) { switch (broker->mode()) { case JSHeapBroker::kSerialized: @@ -3398,6 +3644,10 @@ ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object) case JSHeapBroker::kRetired: UNREACHABLE(); } + if (!data_) { // TODO(mslekova): Remove once we're on the background thread. + AllowHandleDereference handle_dereference; + object->Print(); + } CHECK_WITH_MSG(data_ != nullptr, "Object is not known to the heap broker"); } @@ -3489,8 +3739,8 @@ Float64 FixedDoubleArrayData::Get(int i) const { return contents_[i]; } -void FeedbackVectorRef::SerializeSlots() { - data()->AsFeedbackVector()->SerializeSlots(broker()); +void FeedbackVectorRef::Serialize() { + data()->AsFeedbackVector()->Serialize(broker()); } bool NameRef::IsUniqueName() const { @@ -3597,8 +3847,13 @@ void JSFunctionRef::Serialize() { data()->AsJSFunction()->Serialize(broker()); } +bool JSBoundFunctionRef::serialized() const { + if (broker()->mode() == JSHeapBroker::kDisabled) return true; + return data()->AsJSBoundFunction()->serialized(); +} + bool JSFunctionRef::serialized() const { - CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled); + if (broker()->mode() == JSHeapBroker::kDisabled) return true; return data()->AsJSFunction()->serialized(); } @@ -3614,10 +3869,9 @@ bool JSFunctionRef::IsSerializedForCompilation() const { shared().IsSerializedForCompilation(feedback_vector()); } -JSArrayRef SharedFunctionInfoRef::GetTemplateObject(ObjectRef description, - FeedbackVectorRef vector, - FeedbackSlot slot, - bool serialize) { +JSArrayRef SharedFunctionInfoRef::GetTemplateObject( + ObjectRef description, FeedbackVectorRef vector, FeedbackSlot slot, + SerializationPolicy policy) { // Look in the feedback vector for the array. A Smi indicates that it's // not yet cached here. ObjectRef candidate = vector.get(slot); @@ -3632,22 +3886,22 @@ JSArrayRef SharedFunctionInfoRef::GetTemplateObject(ObjectRef description, Handle<TemplateObjectDescription>::cast(description.object()); Handle<JSArray> template_object = TemplateObjectDescription::GetTemplateObject( - broker()->isolate(), broker()->native_context().object(), tod, - object(), slot.ToInt()); + broker()->isolate(), broker()->target_native_context().object(), + tod, object(), slot.ToInt()); return JSArrayRef(broker(), template_object); } JSArrayData* array = data()->AsSharedFunctionInfo()->GetTemplateObject(slot); if (array != nullptr) return JSArrayRef(broker(), array); - CHECK(serialize); + CHECK_EQ(policy, SerializationPolicy::kSerializeIfNeeded); CHECK(broker()->SerializingAllowed()); Handle<TemplateObjectDescription> tod = Handle<TemplateObjectDescription>::cast(description.object()); Handle<JSArray> template_object = TemplateObjectDescription::GetTemplateObject( - broker()->isolate(), broker()->native_context().object(), tod, + broker()->isolate(), broker()->target_native_context().object(), tod, object(), slot.ToInt()); array = broker()->GetOrCreateData(template_object)->AsJSArray(); data()->AsSharedFunctionInfo()->SetTemplateObject(slot, array); @@ -3663,15 +3917,17 @@ void SharedFunctionInfoRef::SetSerializedForCompilation( void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() { CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker()); } base::Optional<FunctionTemplateInfoRef> SharedFunctionInfoRef::function_template_info() const { if (broker()->mode() == JSHeapBroker::kDisabled) { - return FunctionTemplateInfoRef( - broker(), handle(object()->function_data(), broker()->isolate())); + if (object()->IsApiFunction()) { + return FunctionTemplateInfoRef( + broker(), handle(object()->function_data(), broker()->isolate())); + } + return base::nullopt; } FunctionTemplateInfoData* function_template_info = data()->AsSharedFunctionInfo()->function_template_info(); @@ -3703,6 +3959,16 @@ void MapRef::SerializeOwnDescriptor(int descriptor_index) { data()->AsMap()->SerializeOwnDescriptor(broker(), descriptor_index); } +bool MapRef::serialized_own_descriptor(int descriptor_index) const { + CHECK_LT(descriptor_index, NumberOfOwnDescriptors()); + if (broker()->mode() == JSHeapBroker::kDisabled) return true; + DescriptorArrayData* desc_array_data = + data()->AsMap()->instance_descriptors(); + if (!desc_array_data) return false; + return desc_array_data->contents().find(descriptor_index) != + desc_array_data->contents().end(); +} + void MapRef::SerializeBackPointer() { if (broker()->mode() == JSHeapBroker::kDisabled) return; CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); @@ -3762,13 +4028,13 @@ void FunctionTemplateInfoRef::SerializeCallCode() { } base::Optional<PropertyCellRef> JSGlobalProxyRef::GetPropertyCell( - NameRef const& name, bool serialize) const { + NameRef const& name, SerializationPolicy policy) const { if (broker()->mode() == JSHeapBroker::kDisabled) { return GetPropertyCellFromHeap(broker(), name.object()); } PropertyCellData* property_cell_data = - data()->AsJSGlobalProxy()->GetPropertyCell( - broker(), name.data()->AsName(), serialize); + data()->AsJSGlobalProxy()->GetPropertyCell(broker(), + name.data()->AsName(), policy); if (property_cell_data == nullptr) return base::nullopt; return PropertyCellRef(broker(), property_cell_data); } @@ -3787,64 +4053,133 @@ bool CanInlineElementAccess(MapRef const& map) { return false; } -InsufficientFeedback::InsufficientFeedback() - : ProcessedFeedback(kInsufficient) {} +ProcessedFeedback::ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind) + : kind_(kind), slot_kind_(slot_kind) {} + +KeyedAccessMode ElementAccessFeedback::keyed_mode() const { + return keyed_mode_; +} + +ZoneVector<ElementAccessFeedback::TransitionGroup> const& +ElementAccessFeedback::transition_groups() const { + return transition_groups_; +} + +ElementAccessFeedback const& ElementAccessFeedback::Refine( + ZoneVector<Handle<Map>> const& inferred_maps, Zone* zone) const { + ElementAccessFeedback& refined_feedback = + *new (zone) ElementAccessFeedback(zone, keyed_mode(), slot_kind()); + if (inferred_maps.empty()) return refined_feedback; + + ZoneUnorderedSet<Handle<Map>, Handle<Map>::hash, Handle<Map>::equal_to> + inferred(zone); + inferred.insert(inferred_maps.begin(), inferred_maps.end()); + + for (auto const& group : transition_groups()) { + DCHECK(!group.empty()); + TransitionGroup new_group(zone); + for (size_t i = 1; i < group.size(); ++i) { + Handle<Map> source = group[i]; + if (inferred.find(source) != inferred.end()) { + new_group.push_back(source); + } + } + + Handle<Map> target = group.front(); + bool const keep_target = + inferred.find(target) != inferred.end() || new_group.size() > 1; + if (keep_target) { + new_group.push_back(target); + // The target must be at the front, the order of sources doesn't matter. + std::swap(new_group[0], new_group[new_group.size() - 1]); + } + + if (!new_group.empty()) { + DCHECK(new_group.size() == 1 || new_group.front().equals(target)); + refined_feedback.transition_groups_.push_back(std::move(new_group)); + } + } + return refined_feedback; +} + +InsufficientFeedback::InsufficientFeedback(FeedbackSlotKind slot_kind) + : ProcessedFeedback(kInsufficient, slot_kind) {} -GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell) - : ProcessedFeedback(kGlobalAccess), +GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell, + FeedbackSlotKind slot_kind) + : ProcessedFeedback(kGlobalAccess, slot_kind), cell_or_context_(cell), - index_and_immutable_(0 /* doesn't matter */) {} + index_and_immutable_(0 /* doesn't matter */) { + DCHECK(IsGlobalICKind(slot_kind)); +} + +GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind) + : ProcessedFeedback(kGlobalAccess, slot_kind), + index_and_immutable_(0 /* doesn't matter */) { + DCHECK(IsGlobalICKind(slot_kind)); +} GlobalAccessFeedback::GlobalAccessFeedback(ContextRef script_context, - int slot_index, bool immutable) - : ProcessedFeedback(kGlobalAccess), + int slot_index, bool immutable, + FeedbackSlotKind slot_kind) + : ProcessedFeedback(kGlobalAccess, slot_kind), cell_or_context_(script_context), index_and_immutable_(FeedbackNexus::SlotIndexBits::encode(slot_index) | FeedbackNexus::ImmutabilityBit::encode(immutable)) { DCHECK_EQ(this->slot_index(), slot_index); DCHECK_EQ(this->immutable(), immutable); + DCHECK(IsGlobalICKind(slot_kind)); } +bool GlobalAccessFeedback::IsMegamorphic() const { + return !cell_or_context_.has_value(); +} bool GlobalAccessFeedback::IsPropertyCell() const { - return cell_or_context_.IsPropertyCell(); + return cell_or_context_.has_value() && cell_or_context_->IsPropertyCell(); +} +bool GlobalAccessFeedback::IsScriptContextSlot() const { + return cell_or_context_.has_value() && cell_or_context_->IsContext(); } PropertyCellRef GlobalAccessFeedback::property_cell() const { - DCHECK(IsPropertyCell()); - return cell_or_context_.AsPropertyCell(); + CHECK(IsPropertyCell()); + return cell_or_context_->AsPropertyCell(); } ContextRef GlobalAccessFeedback::script_context() const { - DCHECK(IsScriptContextSlot()); - return cell_or_context_.AsContext(); + CHECK(IsScriptContextSlot()); + return cell_or_context_->AsContext(); } int GlobalAccessFeedback::slot_index() const { - CHECK(IsScriptContextSlot()); + DCHECK(IsScriptContextSlot()); return FeedbackNexus::SlotIndexBits::decode(index_and_immutable_); } bool GlobalAccessFeedback::immutable() const { - CHECK(IsScriptContextSlot()); + DCHECK(IsScriptContextSlot()); return FeedbackNexus::ImmutabilityBit::decode(index_and_immutable_); } base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const { - if (IsScriptContextSlot()) { - if (immutable()) return script_context().get(slot_index()); - } else { + if (IsPropertyCell()) { return property_cell().value(); + } else if (IsScriptContextSlot() && immutable()) { + return script_context().get(slot_index()); + } else { + return base::nullopt; } - return {}; } KeyedAccessMode KeyedAccessMode::FromNexus(FeedbackNexus const& nexus) { - if (IsKeyedLoadICKind(nexus.kind())) { + FeedbackSlotKind kind = nexus.kind(); + if (IsKeyedLoadICKind(kind)) { return KeyedAccessMode(AccessMode::kLoad, nexus.GetKeyedAccessLoadMode()); } - if (IsKeyedHasICKind(nexus.kind())) { + if (IsKeyedHasICKind(kind)) { return KeyedAccessMode(AccessMode::kHas, nexus.GetKeyedAccessLoadMode()); } - if (IsKeyedStoreICKind(nexus.kind())) { + if (IsKeyedStoreICKind(kind)) { return KeyedAccessMode(AccessMode::kStore, nexus.GetKeyedAccessStoreMode()); } - if (IsStoreInArrayLiteralICKind(nexus.kind())) { + if (IsStoreInArrayLiteralICKind(kind) || + IsStoreDataPropertyInLiteralKind(kind)) { return KeyedAccessMode(AccessMode::kStoreInLiteral, nexus.GetKeyedAccessStoreMode()); } @@ -3890,59 +4225,40 @@ KeyedAccessMode::KeyedAccessMode(AccessMode access_mode, } ElementAccessFeedback::ElementAccessFeedback(Zone* zone, - KeyedAccessMode const& keyed_mode) - : ProcessedFeedback(kElementAccess), - receiver_maps(zone), - transitions(zone), - keyed_mode(keyed_mode) {} - -ElementAccessFeedback::MapIterator::MapIterator( - ElementAccessFeedback const& processed, JSHeapBroker* broker) - : processed_(processed), broker_(broker) { - CHECK_LT(processed.receiver_maps.size(), - std::numeric_limits<size_t>::max() - processed.transitions.size()); -} - -bool ElementAccessFeedback::MapIterator::done() const { - return index_ >= - processed_.receiver_maps.size() + processed_.transitions.size(); -} - -void ElementAccessFeedback::MapIterator::advance() { index_++; } - -MapRef ElementAccessFeedback::MapIterator::current() const { - CHECK(!done()); - size_t receiver_maps_size = processed_.receiver_maps.size(); - Handle<Map> map; - if (index_ < receiver_maps_size) { - map = processed_.receiver_maps[index_]; - } else { - map = processed_.transitions[index_ - receiver_maps_size].first; + KeyedAccessMode const& keyed_mode, + FeedbackSlotKind slot_kind) + : ProcessedFeedback(kElementAccess, slot_kind), + keyed_mode_(keyed_mode), + transition_groups_(zone) { + DCHECK(IsKeyedLoadICKind(slot_kind) || IsKeyedHasICKind(slot_kind) || + IsStoreDataPropertyInLiteralKind(slot_kind) || + IsKeyedStoreICKind(slot_kind) || + IsStoreInArrayLiteralICKind(slot_kind)); +} + +bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const { + for (auto const& group : transition_groups()) { + for (Handle<Map> map : group) { + if (!MapRef(broker, map).IsStringMap()) return false; + } } - return MapRef(broker_, map); -} - -ElementAccessFeedback::MapIterator ElementAccessFeedback::all_maps( - JSHeapBroker* broker) const { - return MapIterator(*this, broker); + return true; } -NamedAccessFeedback::NamedAccessFeedback( - NameRef const& name, ZoneVector<PropertyAccessInfo> const& access_infos) - : ProcessedFeedback(kNamedAccess), - name_(name), - access_infos_(access_infos) { - CHECK(!access_infos.empty()); +NamedAccessFeedback::NamedAccessFeedback(NameRef const& name, + ZoneVector<Handle<Map>> const& maps, + FeedbackSlotKind slot_kind) + : ProcessedFeedback(kNamedAccess, slot_kind), name_(name), maps_(maps) { + DCHECK(IsLoadICKind(slot_kind) || IsStoreICKind(slot_kind) || + IsStoreOwnICKind(slot_kind) || IsKeyedLoadICKind(slot_kind) || + IsKeyedHasICKind(slot_kind) || IsKeyedStoreICKind(slot_kind) || + IsStoreInArrayLiteralICKind(slot_kind) || + IsStoreDataPropertyInLiteralKind(slot_kind)); } -FeedbackSource::FeedbackSource(FeedbackNexus const& nexus) - : vector(nexus.vector_handle()), slot(nexus.slot()) {} - -FeedbackSource::FeedbackSource(VectorSlotPair const& pair) - : vector(pair.vector()), slot(pair.slot()) {} - void JSHeapBroker::SetFeedback(FeedbackSource const& source, ProcessedFeedback const* feedback) { + CHECK(source.IsValid()); auto insertion = feedback_.insert({source, feedback}); CHECK(insertion.second); } @@ -3951,80 +4267,90 @@ bool JSHeapBroker::HasFeedback(FeedbackSource const& source) const { return feedback_.find(source) != feedback_.end(); } -ProcessedFeedback const* JSHeapBroker::GetFeedback( +ProcessedFeedback const& JSHeapBroker::GetFeedback( FeedbackSource const& source) const { + DCHECK(source.IsValid()); auto it = feedback_.find(source); CHECK_NE(it, feedback_.end()); - return it->second; + return *it->second; } -GlobalAccessFeedback const* JSHeapBroker::GetGlobalAccessFeedback( +FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind( FeedbackSource const& source) const { - ProcessedFeedback const* feedback = GetFeedback(source); - if (feedback == nullptr) return nullptr; - CHECK_EQ(feedback->kind(), ProcessedFeedback::kGlobalAccess); - return static_cast<GlobalAccessFeedback const*>(feedback); -} - -ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess( - MapHandles const& maps, KeyedAccessMode const& keyed_mode) { - DCHECK(!maps.empty()); - - // Collect possible transition targets. - MapHandles possible_transition_targets; - possible_transition_targets.reserve(maps.size()); - for (Handle<Map> map : maps) { - if (CanInlineElementAccess(MapRef(this, map)) && - IsFastElementsKind(map->elements_kind()) && - GetInitialFastElementsKind() != map->elements_kind()) { - possible_transition_targets.push_back(map); - } + if (FLAG_concurrent_inlining) { + ProcessedFeedback const& processed = GetFeedback(source); + return processed.slot_kind(); } + FeedbackNexus nexus(source.vector, source.slot); + return nexus.kind(); +} - ElementAccessFeedback* result = - new (zone()) ElementAccessFeedback(zone(), keyed_mode); +bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const { + return FLAG_concurrent_inlining + ? GetFeedback(source).IsInsufficient() + : FeedbackNexus(source.vector, source.slot).IsUninitialized(); +} - // Separate the actual receiver maps and the possible transition sources. +namespace { +MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapHandles const& maps) { + MapHandles result; for (Handle<Map> map : maps) { - // Don't generate elements kind transitions from stable maps. - Map transition_target = map->is_stable() - ? Map() - : map->FindElementsKindTransitionedMap( - isolate(), possible_transition_targets); - if (transition_target.is_null()) { - result->receiver_maps.push_back(map); - } else { - result->transitions.emplace_back(map, - handle(transition_target, isolate())); + if (Map::TryUpdate(isolate, map).ToHandle(&map) && + !map->is_abandoned_prototype_map()) { + DCHECK(!map->is_deprecated()); + result.push_back(map); } } + return result; +} // namespace +} // namespace -#ifdef ENABLE_SLOW_DCHECKS - // No transition sources appear in {receiver_maps}. - // All transition targets appear in {receiver_maps}. - for (auto& transition : result->transitions) { - CHECK(std::none_of( - result->receiver_maps.cbegin(), result->receiver_maps.cend(), - [&](Handle<Map> map) { return map.equals(transition.first); })); - CHECK(std::any_of( - result->receiver_maps.cbegin(), result->receiver_maps.cend(), - [&](Handle<Map> map) { return map.equals(transition.second); })); +ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess( + FeedbackSource const& source, AccessMode mode, + base::Optional<NameRef> static_name) { + FeedbackNexus nexus(source.vector, source.slot); + FeedbackSlotKind kind = nexus.kind(); + if (nexus.IsUninitialized()) return *new (zone()) InsufficientFeedback(kind); + + MapHandles maps; + nexus.ExtractMaps(&maps); + DCHECK_NE(nexus.ic_state(), PREMONOMORPHIC); + if (!maps.empty()) { + maps = GetRelevantReceiverMaps(isolate(), maps); + if (maps.empty()) return *new (zone()) InsufficientFeedback(kind); + } + + base::Optional<NameRef> name = + static_name.has_value() ? static_name : GetNameFeedback(nexus); + if (name.has_value()) { + return *new (zone()) NamedAccessFeedback( + *name, ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()), kind); + } else if (nexus.GetKeyType() == ELEMENT && !maps.empty()) { + return ProcessFeedbackMapsForElementAccess( + maps, KeyedAccessMode::FromNexus(nexus), kind); + } else { + // No actionable feedback. + DCHECK(maps.empty()); + // TODO(neis): Investigate if we really want to treat cleared the same as + // megamorphic (also for global accesses). + // TODO(neis): Using ElementAccessFeedback here is kind of an abuse. + return *new (zone()) + ElementAccessFeedback(zone(), KeyedAccessMode::FromNexus(nexus), kind); } -#endif - CHECK(!result->receiver_maps.empty()); - - return result; } -GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess( +ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess( FeedbackSource const& source) { FeedbackNexus nexus(source.vector, source.slot); DCHECK(nexus.kind() == FeedbackSlotKind::kLoadGlobalInsideTypeof || nexus.kind() == FeedbackSlotKind::kLoadGlobalNotInsideTypeof || nexus.kind() == FeedbackSlotKind::kStoreGlobalSloppy || nexus.kind() == FeedbackSlotKind::kStoreGlobalStrict); + if (nexus.IsUninitialized()) { + return *new (zone()) InsufficientFeedback(nexus.kind()); + } if (nexus.ic_state() != MONOMORPHIC || nexus.GetFeedback()->IsCleared()) { - return nullptr; + return *new (zone()) GlobalAccessFeedback(nexus.kind()); } Handle<Object> feedback_value(nexus.GetFeedback()->GetHeapObjectOrSmi(), @@ -4039,7 +4365,7 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess( int const context_slot_index = FeedbackNexus::SlotIndexBits::decode(number); bool const immutable = FeedbackNexus::ImmutabilityBit::decode(number); Handle<Context> context = ScriptContextTable::GetContext( - isolate(), native_context().script_context_table().object(), + isolate(), target_native_context().script_context_table().object(), script_context_index); { ObjectRef contents(this, @@ -4049,10 +4375,11 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess( } ContextRef context_ref(this, context); if (immutable) { - context_ref.get(context_slot_index, true); + context_ref.get(context_slot_index, + SerializationPolicy::kSerializeIfNeeded); } - return new (zone()) - GlobalAccessFeedback(context_ref, context_slot_index, immutable); + return *new (zone()) GlobalAccessFeedback(context_ref, context_slot_index, + immutable, nexus.kind()); } CHECK(feedback_value->IsPropertyCell()); @@ -4060,11 +4387,275 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess( // object and the feedback is the cell holding its value. PropertyCellRef cell(this, Handle<PropertyCell>::cast(feedback_value)); cell.Serialize(); - return new (zone()) GlobalAccessFeedback(cell); + return *new (zone()) GlobalAccessFeedback(cell, nexus.kind()); +} + +BinaryOperationHint JSHeapBroker::ReadFeedbackForBinaryOperation( + FeedbackSource const& source) const { + return FeedbackNexus(source.vector, source.slot).GetBinaryOperationFeedback(); +} + +CompareOperationHint JSHeapBroker::ReadFeedbackForCompareOperation( + FeedbackSource const& source) const { + return FeedbackNexus(source.vector, source.slot) + .GetCompareOperationFeedback(); +} + +ForInHint JSHeapBroker::ReadFeedbackForForIn( + FeedbackSource const& source) const { + return FeedbackNexus(source.vector, source.slot).GetForInFeedback(); +} + +ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf( + FeedbackSource const& source) { + FeedbackNexus nexus(source.vector, source.slot); + if (nexus.IsUninitialized()) + return *new (zone()) InsufficientFeedback(nexus.kind()); + + base::Optional<JSObjectRef> optional_constructor; + { + MaybeHandle<JSObject> maybe_constructor = nexus.GetConstructorFeedback(); + Handle<JSObject> constructor; + if (maybe_constructor.ToHandle(&constructor)) { + optional_constructor = JSObjectRef(this, constructor); + } + } + return *new (zone()) InstanceOfFeedback(optional_constructor, nexus.kind()); +} + +ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall( + FeedbackSource const& source) { + FeedbackNexus nexus(source.vector, source.slot); + if (nexus.IsUninitialized()) + return *new (zone()) InsufficientFeedback(nexus.kind()); + + base::Optional<HeapObjectRef> target_ref; + { + MaybeObject maybe_target = nexus.GetFeedback(); + HeapObject target_object; + if (maybe_target->GetHeapObject(&target_object)) { + target_ref = HeapObjectRef(this, handle(target_object, isolate())); + } + } + float frequency = nexus.ComputeCallFrequency(); + SpeculationMode mode = nexus.GetSpeculationMode(); + return *new (zone()) CallFeedback(target_ref, frequency, mode, nexus.kind()); +} + +BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation( + FeedbackSource const& source) { + ProcessedFeedback const& feedback = + FLAG_concurrent_inlining ? GetFeedback(source) + : ProcessFeedbackForBinaryOperation(source); + return feedback.IsInsufficient() ? BinaryOperationHint::kNone + : feedback.AsBinaryOperation().value(); +} + +CompareOperationHint JSHeapBroker::GetFeedbackForCompareOperation( + FeedbackSource const& source) { + ProcessedFeedback const& feedback = + FLAG_concurrent_inlining ? GetFeedback(source) + : ProcessFeedbackForCompareOperation(source); + return feedback.IsInsufficient() ? CompareOperationHint::kNone + : feedback.AsCompareOperation().value(); +} + +ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) { + ProcessedFeedback const& feedback = FLAG_concurrent_inlining + ? GetFeedback(source) + : ProcessFeedbackForForIn(source); + return feedback.IsInsufficient() ? ForInHint::kNone + : feedback.AsForIn().value(); +} + +ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess( + FeedbackSource const& source, AccessMode mode, + base::Optional<NameRef> static_name) { + return FLAG_concurrent_inlining + ? GetFeedback(source) + : ProcessFeedbackForPropertyAccess(source, mode, static_name); +} + +ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf( + FeedbackSource const& source) { + return FLAG_concurrent_inlining ? GetFeedback(source) + : ProcessFeedbackForInstanceOf(source); +} + +ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall( + FeedbackSource const& source) { + return FLAG_concurrent_inlining ? GetFeedback(source) + : ProcessFeedbackForCall(source); +} + +ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess( + FeedbackSource const& source) { + return FLAG_concurrent_inlining ? GetFeedback(source) + : ProcessFeedbackForGlobalAccess(source); +} + +ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForBinaryOperation( + FeedbackSource const& source) { + if (HasFeedback(source)) return GetFeedback(source); + BinaryOperationHint hint = ReadFeedbackForBinaryOperation(source); + ProcessedFeedback const* feedback; + if (hint == BinaryOperationHint::kNone) { + feedback = + new (zone()) InsufficientFeedback(source.vector->GetKind(source.slot)); + } else { + feedback = new (zone()) + BinaryOperationFeedback(hint, source.vector->GetKind(source.slot)); + } + SetFeedback(source, feedback); + return *feedback; +} + +ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCompareOperation( + FeedbackSource const& source) { + if (HasFeedback(source)) return GetFeedback(source); + CompareOperationHint hint = ReadFeedbackForCompareOperation(source); + ProcessedFeedback const* feedback; + if (hint == CompareOperationHint::kNone) { + feedback = + new (zone()) InsufficientFeedback(source.vector->GetKind(source.slot)); + } else { + feedback = new (zone()) + CompareOperationFeedback(hint, source.vector->GetKind(source.slot)); + } + SetFeedback(source, feedback); + return *feedback; +} + +ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForForIn( + FeedbackSource const& source) { + if (HasFeedback(source)) return GetFeedback(source); + ForInHint hint = ReadFeedbackForForIn(source); + ProcessedFeedback const* feedback; + if (hint == ForInHint::kNone) { + feedback = + new (zone()) InsufficientFeedback(source.vector->GetKind(source.slot)); + } else { + feedback = + new (zone()) ForInFeedback(hint, source.vector->GetKind(source.slot)); + } + SetFeedback(source, feedback); + return *feedback; +} + +ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForPropertyAccess( + FeedbackSource const& source, AccessMode mode, + base::Optional<NameRef> static_name) { + if (HasFeedback(source)) return GetFeedback(source); + ProcessedFeedback const& feedback = + ReadFeedbackForPropertyAccess(source, mode, static_name); + SetFeedback(source, &feedback); + return feedback; +} + +ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForInstanceOf( + FeedbackSource const& source) { + if (HasFeedback(source)) return GetFeedback(source); + ProcessedFeedback const& feedback = ReadFeedbackForInstanceOf(source); + SetFeedback(source, &feedback); + return feedback; +} + +ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCall( + FeedbackSource const& source) { + if (HasFeedback(source)) return GetFeedback(source); + ProcessedFeedback const& feedback = ReadFeedbackForCall(source); + SetFeedback(source, &feedback); + return feedback; +} + +ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForGlobalAccess( + FeedbackSource const& source) { + if (HasFeedback(source)) return GetFeedback(source); + ProcessedFeedback const& feedback = ReadFeedbackForGlobalAccess(source); + SetFeedback(source, &feedback); + return feedback; +} + +ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess( + MapHandles const& maps, KeyedAccessMode const& keyed_mode, + FeedbackSlotKind slot_kind) { + DCHECK(!maps.empty()); + + // Collect possible transition targets. + MapHandles possible_transition_targets; + possible_transition_targets.reserve(maps.size()); + for (Handle<Map> map : maps) { + MapRef map_ref(this, map); + map_ref.SerializeRootMap(); + + if (CanInlineElementAccess(map_ref) && + IsFastElementsKind(map->elements_kind()) && + GetInitialFastElementsKind() != map->elements_kind()) { + possible_transition_targets.push_back(map); + } + } + + using TransitionGroup = ElementAccessFeedback::TransitionGroup; + ZoneUnorderedMap<Handle<Map>, TransitionGroup, Handle<Map>::hash, + Handle<Map>::equal_to> + transition_groups(zone()); + + // Separate the actual receiver maps and the possible transition sources. + for (Handle<Map> map : maps) { + // Don't generate elements kind transitions from stable maps. + Map transition_target = map->is_stable() + ? Map() + : map->FindElementsKindTransitionedMap( + isolate(), possible_transition_targets); + if (transition_target.is_null()) { + TransitionGroup group(1, map, zone()); + transition_groups.insert({map, group}); + } else { + Handle<Map> target(transition_target, isolate()); + TransitionGroup new_group(1, target, zone()); + TransitionGroup& actual_group = + transition_groups.insert({target, new_group}).first->second; + actual_group.push_back(map); + } + } + + ElementAccessFeedback* result = + new (zone()) ElementAccessFeedback(zone(), keyed_mode, slot_kind); + for (auto entry : transition_groups) { + result->AddGroup(std::move(entry.second)); + } + + CHECK(!result->transition_groups().empty()); + return *result; +} + +void ElementAccessFeedback::AddGroup(TransitionGroup&& group) { + CHECK(!group.empty()); + transition_groups_.push_back(std::move(group)); + +#ifdef ENABLE_SLOW_DCHECKS + // Check that each of the group's maps occurs exactly once in the whole + // feedback. This implies that "a source is not a target". + for (Handle<Map> map : group) { + int count = 0; + for (TransitionGroup const& some_group : transition_groups()) { + count += std::count_if( + some_group.begin(), some_group.end(), + [&](Handle<Map> some_map) { return some_map.equals(map); }); + } + CHECK_EQ(count, 1); + } +#endif } std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) { - return os << ref.data(); + if (ref.broker()->mode() == JSHeapBroker::kDisabled) { + // If the broker is disabled we cannot be in a background thread so it's + // safe to read the heap. + return os << ref.data() << " {" << ref.object() << "}"; + } else { + return os << ref.data(); + } } base::Optional<NameRef> JSHeapBroker::GetNameFeedback( @@ -4074,67 +4665,77 @@ base::Optional<NameRef> JSHeapBroker::GetNameFeedback( return NameRef(this, handle(raw_name, isolate())); } -PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingThen(MapRef map) { - auto access_info = ais_for_loading_then_.find(map); - if (access_info == ais_for_loading_then_.end()) { - TRACE_BROKER_MISSING( - this, "access info for reducing JSResolvePromise with map " << map); +PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo( + MapRef map, NameRef name, AccessMode access_mode, + CompilationDependencies* dependencies, SerializationPolicy policy) { + PropertyAccessTarget target({map, name, access_mode}); + auto it = property_access_infos_.find(target); + if (it != property_access_infos_.end()) return it->second; + + if (policy == SerializationPolicy::kAssumeSerialized) { + TRACE_BROKER_MISSING(this, "PropertyAccessInfo for " + << access_mode << " of property " << name + << " on map " << map); return PropertyAccessInfo::Invalid(zone()); } - return access_info->second; -} -void JSHeapBroker::CreateAccessInfoForLoadingThen( - MapRef map, CompilationDependencies* dependencies) { - auto access_info = ais_for_loading_then_.find(map); - if (access_info == ais_for_loading_then_.end()) { - AccessInfoFactory access_info_factory(this, dependencies, zone()); - Handle<Name> then_string = isolate()->factory()->then_string(); - ais_for_loading_then_.insert( - std::make_pair(map, access_info_factory.ComputePropertyAccessInfo( - map.object(), then_string, AccessMode::kLoad))); + CHECK_NOT_NULL(dependencies); + AccessInfoFactory factory(this, dependencies, zone()); + PropertyAccessInfo access_info = factory.ComputePropertyAccessInfo( + map.object(), name.object(), access_mode); + if (FLAG_concurrent_inlining) { + CHECK(SerializingAllowed()); + TRACE(this, "Storing PropertyAccessInfo for " + << access_mode << " of property " << name << " on map " + << map); + property_access_infos_.insert({target, access_info}); } + return access_info; } -PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingExec(MapRef map) { - auto access_info = ais_for_loading_exec_.find(map); - if (access_info == ais_for_loading_exec_.end()) { - TRACE_BROKER_MISSING(this, - "access info for property 'exec' on map " << map); - return PropertyAccessInfo::Invalid(zone()); - } - return access_info->second; +BinaryOperationFeedback const& ProcessedFeedback::AsBinaryOperation() const { + CHECK_EQ(kBinaryOperation, kind()); + return *static_cast<BinaryOperationFeedback const*>(this); } -PropertyAccessInfo const& JSHeapBroker::CreateAccessInfoForLoadingExec( - MapRef map, CompilationDependencies* dependencies) { - auto access_info = ais_for_loading_exec_.find(map); - if (access_info != ais_for_loading_exec_.end()) { - return access_info->second; - } - - ZoneVector<PropertyAccessInfo> access_infos(zone()); - AccessInfoFactory access_info_factory(this, dependencies, zone()); - PropertyAccessInfo ai_exec = access_info_factory.ComputePropertyAccessInfo( - map.object(), isolate()->factory()->exec_string(), AccessMode::kLoad); +CallFeedback const& ProcessedFeedback::AsCall() const { + CHECK_EQ(kCall, kind()); + return *static_cast<CallFeedback const*>(this); +} - auto inserted_ai = ais_for_loading_exec_.insert(std::make_pair(map, ai_exec)); - return inserted_ai.first->second; +CompareOperationFeedback const& ProcessedFeedback::AsCompareOperation() const { + CHECK_EQ(kCompareOperation, kind()); + return *static_cast<CompareOperationFeedback const*>(this); } -ElementAccessFeedback const* ProcessedFeedback::AsElementAccess() const { +ElementAccessFeedback const& ProcessedFeedback::AsElementAccess() const { CHECK_EQ(kElementAccess, kind()); - return static_cast<ElementAccessFeedback const*>(this); + return *static_cast<ElementAccessFeedback const*>(this); +} + +ForInFeedback const& ProcessedFeedback::AsForIn() const { + CHECK_EQ(kForIn, kind()); + return *static_cast<ForInFeedback const*>(this); +} + +GlobalAccessFeedback const& ProcessedFeedback::AsGlobalAccess() const { + CHECK_EQ(kGlobalAccess, kind()); + return *static_cast<GlobalAccessFeedback const*>(this); +} + +InstanceOfFeedback const& ProcessedFeedback::AsInstanceOf() const { + CHECK_EQ(kInstanceOf, kind()); + return *static_cast<InstanceOfFeedback const*>(this); } -NamedAccessFeedback const* ProcessedFeedback::AsNamedAccess() const { +NamedAccessFeedback const& ProcessedFeedback::AsNamedAccess() const { CHECK_EQ(kNamedAccess, kind()); - return static_cast<NamedAccessFeedback const*>(this); + return *static_cast<NamedAccessFeedback const*>(this); } BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis( Handle<BytecodeArray> bytecode_array, BailoutId osr_bailout_id, - bool analyze_liveness, bool serialize) { + bool analyze_liveness, SerializationPolicy policy) { ObjectData* bytecode_array_data = GetData(bytecode_array); CHECK_NOT_NULL(bytecode_array_data); @@ -4154,7 +4755,7 @@ BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis( return *it->second; } - CHECK(serialize); + CHECK_EQ(policy, SerializationPolicy::kSerializeIfNeeded); BytecodeAnalysis* analysis = new (zone()) BytecodeAnalysis( bytecode_array, zone(), osr_bailout_id, analyze_liveness); DCHECK_EQ(analysis->osr_bailout_id(), osr_bailout_id); diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h index ffc10d2b93..8c2622bf48 100644 --- a/deps/v8/src/compiler/js-heap-broker.h +++ b/deps/v8/src/compiler/js-heap-broker.h @@ -9,6 +9,8 @@ #include "src/base/optional.h" #include "src/common/globals.h" #include "src/compiler/access-info.h" +#include "src/compiler/feedback-source.h" +#include "src/compiler/processed-feedback.h" #include "src/compiler/refs-map.h" #include "src/handles/handles.h" #include "src/interpreter/bytecode-array-accessor.h" @@ -26,29 +28,6 @@ class BytecodeAnalysis; class ObjectRef; std::ostream& operator<<(std::ostream& os, const ObjectRef& ref); -struct FeedbackSource { - FeedbackSource(Handle<FeedbackVector> vector_, FeedbackSlot slot_) - : vector(vector_), slot(slot_) {} - explicit FeedbackSource(FeedbackNexus const& nexus); - explicit FeedbackSource(VectorSlotPair const& pair); - - Handle<FeedbackVector> const vector; - FeedbackSlot const slot; - - struct Hash { - size_t operator()(FeedbackSource const& source) const { - return base::hash_combine(source.vector.address(), source.slot); - } - }; - - struct Equal { - bool operator()(FeedbackSource const& lhs, - FeedbackSource const& rhs) const { - return lhs.vector.equals(rhs.vector) && lhs.slot == rhs.slot; - } - }; -}; - #define TRACE_BROKER(broker, x) \ do { \ if (broker->tracing_enabled() && FLAG_trace_heap_broker_verbose) \ @@ -58,25 +37,51 @@ struct FeedbackSource { #define TRACE_BROKER_MISSING(broker, x) \ do { \ if (broker->tracing_enabled()) \ - broker->Trace() << __FUNCTION__ << ": missing " << x << '\n'; \ + broker->Trace() << "Missing " << x << " (" << __FILE__ << ":" \ + << __LINE__ << ")" << std::endl; \ } while (false) +struct PropertyAccessTarget { + MapRef map; + NameRef name; + AccessMode mode; + + struct Hash { + size_t operator()(const PropertyAccessTarget& pair) const { + return base::hash_combine( + base::hash_combine(pair.map.object().address(), + pair.name.object().address()), + static_cast<int>(pair.mode)); + } + }; + struct Equal { + bool operator()(const PropertyAccessTarget& lhs, + const PropertyAccessTarget& rhs) const { + return lhs.map.equals(rhs.map) && lhs.name.equals(rhs.name) && + lhs.mode == rhs.mode; + } + }; +}; + class V8_EXPORT_PRIVATE JSHeapBroker { public: JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled); - void SetNativeContextRef(); - void SerializeStandardObjects(); + // The compilation target's native context. We need the setter because at + // broker construction time we don't yet have the canonical handle. + NativeContextRef target_native_context() const { + return target_native_context_.value(); + } + void SetTargetNativeContextRef(Handle<NativeContext> native_context); + + void InitializeAndStartSerializing(Handle<NativeContext> native_context); Isolate* isolate() const { return isolate_; } Zone* zone() const { return current_zone_; } bool tracing_enabled() const { return tracing_enabled_; } - NativeContextRef native_context() const { return native_context_.value(); } - PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; } enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired }; BrokerMode mode() const { return mode_; } - void StartSerializing(); void StopSerializing(); void Retire(); bool SerializingAllowed() const; @@ -93,36 +98,64 @@ class V8_EXPORT_PRIVATE JSHeapBroker { bool IsArrayOrObjectPrototype(const JSObjectRef& object) const; bool HasFeedback(FeedbackSource const& source) const; - // The processed {feedback} can be {nullptr}, indicating that the original - // feedback didn't contain information relevant for Turbofan. void SetFeedback(FeedbackSource const& source, ProcessedFeedback const* feedback); - ProcessedFeedback const* GetFeedback(FeedbackSource const& source) const; - - // Convenience wrappers around GetFeedback. - GlobalAccessFeedback const* GetGlobalAccessFeedback( - FeedbackSource const& source) const; + ProcessedFeedback const& GetFeedback(FeedbackSource const& source) const; + FeedbackSlotKind GetFeedbackSlotKind(FeedbackSource const& source) const; // TODO(neis): Move these into serializer when we're always in the background. - ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess( - MapHandles const& maps, KeyedAccessMode const& keyed_mode); - GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess( - FeedbackSource const& source); - + ElementAccessFeedback const& ProcessFeedbackMapsForElementAccess( + MapHandles const& maps, KeyedAccessMode const& keyed_mode, + FeedbackSlotKind slot_kind); BytecodeAnalysis const& GetBytecodeAnalysis( Handle<BytecodeArray> bytecode_array, BailoutId osr_offset, - bool analyze_liveness, bool serialize); + bool analyze_liveness, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); + + // Binary, comparison and for-in hints can be fully expressed via + // an enum. Insufficient feedback is signaled by <Hint enum>::kNone. + BinaryOperationHint GetFeedbackForBinaryOperation( + FeedbackSource const& source); + CompareOperationHint GetFeedbackForCompareOperation( + FeedbackSource const& source); + ForInHint GetFeedbackForForIn(FeedbackSource const& source); + + ProcessedFeedback const& GetFeedbackForCall(FeedbackSource const& source); + ProcessedFeedback const& GetFeedbackForGlobalAccess( + FeedbackSource const& source); + ProcessedFeedback const& GetFeedbackForInstanceOf( + FeedbackSource const& source); + ProcessedFeedback const& GetFeedbackForPropertyAccess( + FeedbackSource const& source, AccessMode mode, + base::Optional<NameRef> static_name); + + ProcessedFeedback const& ProcessFeedbackForBinaryOperation( + FeedbackSource const& source); + ProcessedFeedback const& ProcessFeedbackForCall(FeedbackSource const& source); + ProcessedFeedback const& ProcessFeedbackForCompareOperation( + FeedbackSource const& source); + ProcessedFeedback const& ProcessFeedbackForForIn( + FeedbackSource const& source); + ProcessedFeedback const& ProcessFeedbackForGlobalAccess( + FeedbackSource const& source); + ProcessedFeedback const& ProcessFeedbackForInstanceOf( + FeedbackSource const& source); + ProcessedFeedback const& ProcessFeedbackForPropertyAccess( + FeedbackSource const& source, AccessMode mode, + base::Optional<NameRef> static_name); + + bool FeedbackIsInsufficient(FeedbackSource const& source) const; base::Optional<NameRef> GetNameFeedback(FeedbackNexus const& nexus); - // If there is no result stored for {map}, we return an Invalid - // PropertyAccessInfo. - PropertyAccessInfo GetAccessInfoForLoadingThen(MapRef map); - void CreateAccessInfoForLoadingThen(MapRef map, - CompilationDependencies* dependencies); - PropertyAccessInfo GetAccessInfoForLoadingExec(MapRef map); - PropertyAccessInfo const& CreateAccessInfoForLoadingExec( - MapRef map, CompilationDependencies* dependencies); + // If {policy} is {kAssumeSerialized} and the broker doesn't know about the + // combination of {map}, {name}, and {access_mode}, returns Invalid. + PropertyAccessInfo GetPropertyAccessInfo( + MapRef map, NameRef name, AccessMode access_mode, + CompilationDependencies* dependencies = nullptr, + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); + + StringRef GetTypedArrayStringTag(ElementsKind kind); std::ostream& Trace(); void IncrementTracingIndentation(); @@ -133,13 +166,33 @@ class V8_EXPORT_PRIVATE JSHeapBroker { friend class ObjectRef; friend class ObjectData; - void SerializeShareableObjects(); + // Bottleneck FeedbackNexus access here, for storage in the broker + // or on-the-fly usage elsewhere in the compiler. + ForInHint ReadFeedbackForForIn(FeedbackSource const& source) const; + CompareOperationHint ReadFeedbackForCompareOperation( + FeedbackSource const& source) const; + BinaryOperationHint ReadFeedbackForBinaryOperation( + FeedbackSource const& source) const; + + ProcessedFeedback const& ReadFeedbackForCall(FeedbackSource const& source); + ProcessedFeedback const& ReadFeedbackForGlobalAccess( + FeedbackSource const& source); + ProcessedFeedback const& ReadFeedbackForInstanceOf( + FeedbackSource const& source); + ProcessedFeedback const& ReadFeedbackForPropertyAccess( + FeedbackSource const& source, AccessMode mode, + base::Optional<NameRef> static_name); + + void InitializeRefsMap(); void CollectArrayAndObjectPrototypes(); + void SerializeTypedArrayStringTags(); + + PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; } Isolate* const isolate_; Zone* const broker_zone_; - Zone* current_zone_; - base::Optional<NativeContextRef> native_context_; + Zone* current_zone_ = nullptr; + base::Optional<NativeContextRef> target_native_context_; RefsMap* refs_; ZoneUnorderedSet<Handle<JSObject>, Handle<JSObject>::hash, Handle<JSObject>::equal_to> @@ -148,16 +201,16 @@ class V8_EXPORT_PRIVATE JSHeapBroker { bool const tracing_enabled_; StdoutStream trace_out_; unsigned trace_indentation_ = 0; - PerIsolateCompilerCache* compiler_cache_; + PerIsolateCompilerCache* compiler_cache_ = nullptr; ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*, FeedbackSource::Hash, FeedbackSource::Equal> feedback_; ZoneUnorderedMap<ObjectData*, BytecodeAnalysis*> bytecode_analyses_; - typedef ZoneUnorderedMap<MapRef, PropertyAccessInfo, ObjectRef::Hash, - ObjectRef::Equal> - MapToAccessInfos; - MapToAccessInfos ais_for_loading_then_; - MapToAccessInfos ais_for_loading_exec_; + ZoneUnorderedMap<PropertyAccessTarget, PropertyAccessInfo, + PropertyAccessTarget::Hash, PropertyAccessTarget::Equal> + property_access_infos_; + + ZoneVector<ObjectData*> typed_array_string_tags_; static const size_t kMinimalRefsBucketCount = 8; // must be power of 2 static const size_t kInitialRefsBucketCount = 1024; // must be power of 2 diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc index 7e7c9e3a0e..bf4b79bf92 100644 --- a/deps/v8/src/compiler/js-heap-copy-reducer.cc +++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc @@ -27,113 +27,175 @@ JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; } Reduction JSHeapCopyReducer::Reduce(Node* node) { switch (node->opcode()) { case IrOpcode::kHeapConstant: { - ObjectRef object(broker(), HeapConstantOf(node->op())); - if (object.IsJSFunction()) object.AsJSFunction().Serialize(); - if (object.IsJSObject()) object.AsJSObject().SerializeObjectCreateMap(); - if (object.IsSourceTextModule()) object.AsSourceTextModule().Serialize(); + if (!FLAG_concurrent_inlining) { + ObjectRef object(broker(), HeapConstantOf(node->op())); + if (object.IsJSFunction()) object.AsJSFunction().Serialize(); + if (object.IsJSObject()) { + object.AsJSObject().SerializeObjectCreateMap(); + } + if (object.IsSourceTextModule()) { + object.AsSourceTextModule().Serialize(); + } + } break; } case IrOpcode::kJSCreateArray: { - CreateArrayParameters const& p = CreateArrayParametersOf(node->op()); - Handle<AllocationSite> site; - if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site); + if (!FLAG_concurrent_inlining) { + CreateArrayParameters const& p = CreateArrayParametersOf(node->op()); + Handle<AllocationSite> site; + if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site); + } break; } case IrOpcode::kJSCreateArguments: { - Node* const frame_state = NodeProperties::GetFrameStateInput(node); - FrameStateInfo state_info = FrameStateInfoOf(frame_state->op()); - SharedFunctionInfoRef shared(broker(), - state_info.shared_info().ToHandleChecked()); + if (!FLAG_concurrent_inlining) { + Node* const frame_state = NodeProperties::GetFrameStateInput(node); + FrameStateInfo state_info = FrameStateInfoOf(frame_state->op()); + SharedFunctionInfoRef shared( + broker(), state_info.shared_info().ToHandleChecked()); + } break; } case IrOpcode::kJSCreateBlockContext: { - ScopeInfoRef(broker(), ScopeInfoOf(node->op())); + if (!FLAG_concurrent_inlining) { + ScopeInfoRef(broker(), ScopeInfoOf(node->op())); + } break; } case IrOpcode::kJSCreateBoundFunction: { - CreateBoundFunctionParameters const& p = - CreateBoundFunctionParametersOf(node->op()); - MapRef(broker(), p.map()); + if (!FLAG_concurrent_inlining) { + CreateBoundFunctionParameters const& p = + CreateBoundFunctionParametersOf(node->op()); + MapRef(broker(), p.map()); + } break; } case IrOpcode::kJSCreateCatchContext: { - ScopeInfoRef(broker(), ScopeInfoOf(node->op())); + if (!FLAG_concurrent_inlining) { + ScopeInfoRef(broker(), ScopeInfoOf(node->op())); + } break; } case IrOpcode::kJSCreateClosure: { - CreateClosureParameters const& p = CreateClosureParametersOf(node->op()); - SharedFunctionInfoRef(broker(), p.shared_info()); - FeedbackCellRef(broker(), p.feedback_cell()); - HeapObjectRef(broker(), p.code()); + if (!FLAG_concurrent_inlining) { + CreateClosureParameters const& p = + CreateClosureParametersOf(node->op()); + SharedFunctionInfoRef(broker(), p.shared_info()); + FeedbackCellRef(broker(), p.feedback_cell()); + HeapObjectRef(broker(), p.code()); + } break; } case IrOpcode::kJSCreateEmptyLiteralArray: { - FeedbackParameter const& p = FeedbackParameterOf(node->op()); - FeedbackVectorRef(broker(), p.feedback().vector()).SerializeSlots(); + if (!FLAG_concurrent_inlining) { + FeedbackParameter const& p = FeedbackParameterOf(node->op()); + FeedbackVectorRef(broker(), p.feedback().vector).Serialize(); + } break; } case IrOpcode::kJSCreateFunctionContext: { - CreateFunctionContextParameters const& p = - CreateFunctionContextParametersOf(node->op()); - ScopeInfoRef(broker(), p.scope_info()); + if (!FLAG_concurrent_inlining) { + CreateFunctionContextParameters const& p = + CreateFunctionContextParametersOf(node->op()); + ScopeInfoRef(broker(), p.scope_info()); + } break; } case IrOpcode::kJSCreateLiteralArray: case IrOpcode::kJSCreateLiteralObject: { - CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op()); - FeedbackVectorRef(broker(), p.feedback().vector()).SerializeSlots(); + if (!FLAG_concurrent_inlining) { + CreateLiteralParameters const& p = + CreateLiteralParametersOf(node->op()); + FeedbackVectorRef(broker(), p.feedback().vector).Serialize(); + } break; } case IrOpcode::kJSCreateLiteralRegExp: { - CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op()); - FeedbackVectorRef(broker(), p.feedback().vector()).SerializeSlots(); + if (!FLAG_concurrent_inlining) { + CreateLiteralParameters const& p = + CreateLiteralParametersOf(node->op()); + FeedbackVectorRef(broker(), p.feedback().vector).Serialize(); + } break; } case IrOpcode::kJSCreateWithContext: { - ScopeInfoRef(broker(), ScopeInfoOf(node->op())); + if (!FLAG_concurrent_inlining) { + ScopeInfoRef(broker(), ScopeInfoOf(node->op())); + } + break; + } + case IrOpcode::kJSLoadNamed: { + if (!FLAG_concurrent_inlining) { + NamedAccess const& p = NamedAccessOf(node->op()); + NameRef name(broker(), p.name()); + if (p.feedback().IsValid()) { + broker()->ProcessFeedbackForPropertyAccess(p.feedback(), + AccessMode::kLoad, name); + } + } break; } - case IrOpcode::kJSLoadNamed: case IrOpcode::kJSStoreNamed: { - NamedAccess const& p = NamedAccessOf(node->op()); - NameRef(broker(), p.name()); + if (!FLAG_concurrent_inlining) { + NamedAccess const& p = NamedAccessOf(node->op()); + NameRef name(broker(), p.name()); + } break; } case IrOpcode::kStoreField: case IrOpcode::kLoadField: { - FieldAccess access = FieldAccessOf(node->op()); - Handle<Map> map_handle; - if (access.map.ToHandle(&map_handle)) { - MapRef(broker(), map_handle); - } - Handle<Name> name_handle; - if (access.name.ToHandle(&name_handle)) { - NameRef(broker(), name_handle); + if (!FLAG_concurrent_inlining) { + FieldAccess access = FieldAccessOf(node->op()); + Handle<Map> map_handle; + if (access.map.ToHandle(&map_handle)) { + MapRef(broker(), map_handle); + } + Handle<Name> name_handle; + if (access.name.ToHandle(&name_handle)) { + NameRef(broker(), name_handle); + } } break; } case IrOpcode::kMapGuard: { - ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op()); - for (Handle<Map> map : maps) { - MapRef(broker(), map); + if (!FLAG_concurrent_inlining) { + ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op()); + for (Handle<Map> map : maps) { + MapRef(broker(), map); + } } break; } case IrOpcode::kCheckMaps: { - ZoneHandleSet<Map> const& maps = CheckMapsParametersOf(node->op()).maps(); - for (Handle<Map> map : maps) { - MapRef(broker(), map); + if (!FLAG_concurrent_inlining) { + ZoneHandleSet<Map> const& maps = + CheckMapsParametersOf(node->op()).maps(); + for (Handle<Map> map : maps) { + MapRef(broker(), map); + } } break; } case IrOpcode::kCompareMaps: { - ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op()); - for (Handle<Map> map : maps) { - MapRef(broker(), map); + if (!FLAG_concurrent_inlining) { + ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op()); + for (Handle<Map> map : maps) { + MapRef(broker(), map); + } + } + break; + } + case IrOpcode::kJSLoadProperty: { + if (!FLAG_concurrent_inlining) { + PropertyAccess const& p = PropertyAccessOf(node->op()); + AccessMode access_mode = AccessMode::kLoad; + if (p.feedback().IsValid()) { + broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode, + base::nullopt); + } } break; } - default: break; } diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc index e11d6b59a3..ae271b3af9 100644 --- a/deps/v8/src/compiler/js-inlining-heuristic.cc +++ b/deps/v8/src/compiler/js-inlining-heuristic.cc @@ -114,8 +114,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { Handle<SharedFunctionInfo> frame_shared_info; for (int i = 0; i < candidate.num_functions; ++i) { if (!candidate.bytecode[i].has_value()) { - // We're already missing critical data which wouldn't allow us to - // continue the inlining checks. Log a warning and continue. + // Can't inline without bytecode. + // TODO(neis): Should this even be a broker message? if (candidate.functions[i].has_value()) { TRACE_BROKER(broker(), "Missing bytecode array trying to inline JSFunction " @@ -205,6 +205,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { } void JSInliningHeuristic::Finalize() { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + if (candidates_.empty()) return; // Nothing to do without candidates. if (FLAG_trace_turbo_inlining) PrintCandidates(); @@ -730,18 +732,22 @@ bool JSInliningHeuristic::CandidateCompare::operator()( void JSInliningHeuristic::PrintCandidates() { StdoutStream os; - os << "Candidates for inlining (size=" << candidates_.size() << "):\n"; + os << candidates_.size() << " candidate(s) for inlining:" << std::endl; for (const Candidate& candidate : candidates_) { - os << " #" << candidate.node->id() << ":" - << candidate.node->op()->mnemonic() - << ", frequency: " << candidate.frequency << std::endl; + os << "- candidate: " << candidate.node->op()->mnemonic() << " node #" + << candidate.node->id() << " with frequency " << candidate.frequency + << ", " << candidate.num_functions << " target(s):" << std::endl; for (int i = 0; i < candidate.num_functions; ++i) { - SharedFunctionInfoRef shared = - candidate.functions[i].has_value() - ? candidate.functions[i].value().shared() - : candidate.shared_info.value(); - PrintF(" - size:%d, name: %s\n", candidate.bytecode[i].value().length(), - shared.object()->DebugName().ToCString().get()); + SharedFunctionInfoRef shared = candidate.functions[i].has_value() + ? candidate.functions[i]->shared() + : candidate.shared_info.value(); + os << " - target: " << shared; + if (candidate.bytecode[i].has_value()) { + os << ", bytecode size: " << candidate.bytecode[i]->length(); + } else { + os << ", no bytecode"; + } + os << std::endl; } } } diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc index 91cbea2346..51179f1956 100644 --- a/deps/v8/src/compiler/js-inlining.cc +++ b/deps/v8/src/compiler/js-inlining.cc @@ -247,9 +247,13 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state, bailout_id, OutputFrameStateCombine::Ignore(), state_info); const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense()); Node* node0 = graph()->NewNode(op0); + + static constexpr int kTargetInputIndex = 0; + static constexpr int kReceiverInputIndex = 1; + const int parameter_count_with_receiver = parameter_count + 1; NodeVector params(local_zone_); - for (int parameter = 0; parameter < parameter_count + 1; ++parameter) { - params.push_back(node->InputAt(1 + parameter)); + for (int i = 0; i < parameter_count_with_receiver; i++) { + params.push_back(node->InputAt(kReceiverInputIndex + i)); } const Operator* op_param = common()->StateValues( static_cast<int>(params.size()), SparseInputMask::Dense()); @@ -259,7 +263,7 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state, context = jsgraph()->UndefinedConstant(); } return graph()->NewNode(op, params_node, node0, node0, context, - node->InputAt(0), outer_frame_state); + node->InputAt(kTargetInputIndex), outer_frame_state); } namespace { @@ -301,7 +305,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget( // TODO(turbofan): We might want to revisit this restriction later when we // have a need for this, and we know how to model different native contexts // in the same graph in a compositional way. - if (!function.native_context().equals(broker()->native_context())) { + if (!function.native_context().equals(broker()->target_native_context())) { return base::nullopt; } @@ -332,7 +336,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget( // - context : The context (as SSA value) bound by the call target. // - feedback_vector : The target is guaranteed to use this feedback vector. FeedbackVectorRef JSInliner::DetermineCallContext(Node* node, - Node*& context_out) { + Node** context_out) { DCHECK(IrOpcode::IsInlineeOpcode(node->opcode())); HeapObjectMatcher match(node->InputAt(0)); @@ -342,7 +346,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node, CHECK(function.has_feedback_vector()); // The inlinee specializes to the context from the JSFunction object. - context_out = jsgraph()->Constant(function.context()); + *context_out = jsgraph()->Constant(function.context()); return function.feedback_vector(); } @@ -354,7 +358,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node, FeedbackCellRef cell(FeedbackCellRef(broker(), p.feedback_cell())); // The inlinee uses the locally provided context at instantiation. - context_out = NodeProperties::GetContextInput(match.node()); + *context_out = NodeProperties::GetContextInput(match.node()); return cell.value().AsFeedbackVector(); } @@ -369,13 +373,14 @@ Reduction JSInliner::ReduceJSCall(Node* node) { // Determine the call target. base::Optional<SharedFunctionInfoRef> shared_info(DetermineCallTarget(node)); if (!shared_info.has_value()) return NoChange(); - DCHECK(shared_info->IsInlineable()); + SharedFunctionInfoRef outer_shared_info(broker(), info_->shared_info()); + // Constructor must be constructable. if (node->opcode() == IrOpcode::kJSConstruct && !IsConstructable(shared_info->kind())) { - TRACE("Not inlining " << *shared_info << " into " << info_->shared_info() + TRACE("Not inlining " << *shared_info << " into " << outer_shared_info << " because constructor is not constructable."); return NoChange(); } @@ -384,7 +389,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) { // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ). if (node->opcode() == IrOpcode::kJSCall && IsClassConstructor(shared_info->kind())) { - TRACE("Not inlining " << *shared_info << " into " << info_->shared_info() + TRACE("Not inlining " << *shared_info << " into " << outer_shared_info << " because callee is a class constructor."); return NoChange(); } @@ -398,7 +403,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) { nesting_level++; if (nesting_level > kMaxDepthForInlining) { TRACE("Not inlining " - << *shared_info << " into " << info_->shared_info() + << *shared_info << " into " << outer_shared_info << " because call has exceeded the maximum depth for function " "inlining."); return NoChange(); @@ -413,38 +418,38 @@ Reduction JSInliner::ReduceJSCall(Node* node) { // passing the IsInlineable check, The broker holds a reference to the // bytecode array, which prevents it from getting flushed. // Therefore, the following check should always hold true. - CHECK(shared_info.value().is_compiled()); + CHECK(shared_info->is_compiled()); if (!FLAG_concurrent_inlining && info_->is_source_positions_enabled()) { SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(), shared_info->object()); } - TRACE("Inlining " << *shared_info << " into " << info_->shared_info() + TRACE("Inlining " << *shared_info << " into " << outer_shared_info << ((exception_target != nullptr) ? " (inside try-block)" : "")); // Determine the targets feedback vector and its context. Node* context; - FeedbackVectorRef feedback_vector = DetermineCallContext(node, context); + FeedbackVectorRef feedback_vector = DetermineCallContext(node, &context); - if (FLAG_concurrent_inlining) { - if (!shared_info.value().IsSerializedForCompilation(feedback_vector)) { - TRACE("Missed opportunity to inline a function (" - << *shared_info << " with " << feedback_vector << ")"); - return NoChange(); - } + if (FLAG_concurrent_inlining && + !shared_info->IsSerializedForCompilation(feedback_vector)) { + // TODO(neis): Should this be a broker message? + TRACE("Missed opportunity to inline a function (" + << *shared_info << " with " << feedback_vector << ")"); + return NoChange(); } // ---------------------------------------------------------------- // After this point, we've made a decision to inline this function. // We shall not bailout from inlining if we got here. - BytecodeArrayRef bytecode_array = shared_info.value().GetBytecodeArray(); + BytecodeArrayRef bytecode_array = shared_info->GetBytecodeArray(); // Remember that we inlined this function. - int inlining_id = info_->AddInlinedFunction( - shared_info.value().object(), bytecode_array.object(), - source_positions_->GetSourcePosition(node)); + int inlining_id = + info_->AddInlinedFunction(shared_info->object(), bytecode_array.object(), + source_positions_->GetSourcePosition(node)); // Create the subgraph for the inlinee. Node* start; @@ -461,20 +466,11 @@ Reduction JSInliner::ReduceJSCall(Node* node) { flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized; } { - // TODO(mslekova): Remove the following once bytecode graph builder - // is brokerized. Also, remove the context argument from - // BuildGraphFromBytecode and extract it from the broker there. - AllowHandleDereference allow_handle_deref; - AllowHandleAllocation allow_handle_alloc; - AllowHeapAllocation allow_heap_alloc; - AllowCodeDependencyChange allow_code_dep_change; CallFrequency frequency = call.frequency(); - Handle<NativeContext> native_context(info_->native_context(), isolate()); - BuildGraphFromBytecode( - broker(), zone(), bytecode_array.object(), - shared_info.value().object(), feedback_vector.object(), - BailoutId::None(), jsgraph(), frequency, source_positions_, - native_context, inlining_id, flags, &info_->tick_counter()); + BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_vector, + BailoutId::None(), jsgraph(), frequency, + source_positions_, inlining_id, flags, + &info_->tick_counter()); } // Extract the inlinee start/end nodes. @@ -522,13 +518,13 @@ Reduction JSInliner::ReduceJSCall(Node* node) { // where execution continues at {construct_stub_create_deopt_pc_offset}). Node* receiver = jsgraph()->TheHoleConstant(); // Implicit receiver. Node* context = NodeProperties::GetContextInput(node); - if (NeedsImplicitReceiver(shared_info.value())) { + if (NeedsImplicitReceiver(*shared_info)) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); Node* frame_state_inside = CreateArtificialFrameState( node, frame_state, call.formal_arguments(), BailoutId::ConstructStubCreate(), FrameStateType::kConstructStub, - shared_info.value(), context); + *shared_info, context); Node* create = graph()->NewNode(javascript()->Create(), call.target(), new_target, context, frame_state_inside, effect, control); @@ -583,7 +579,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) { frame_state = CreateArtificialFrameState( node, frame_state, call.formal_arguments(), BailoutId::ConstructStubInvoke(), FrameStateType::kConstructStub, - shared_info.value(), context); + *shared_info, context); } // Insert a JSConvertReceiver node for sloppy callees. Note that the context @@ -593,8 +589,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); if (NodeProperties::CanBePrimitive(broker(), call.receiver(), effect)) { CallParameters const& p = CallParametersOf(node->op()); - Node* global_proxy = - jsgraph()->Constant(broker()->native_context().global_proxy_object()); + Node* global_proxy = jsgraph()->Constant( + broker()->target_native_context().global_proxy_object()); Node* receiver = effect = graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()), call.receiver(), global_proxy, effect, start); @@ -612,7 +608,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) { if (call.formal_arguments() != parameter_count) { frame_state = CreateArtificialFrameState( node, frame_state, call.formal_arguments(), BailoutId::None(), - FrameStateType::kArgumentsAdaptor, shared_info.value()); + FrameStateType::kArgumentsAdaptor, *shared_info); } return InlineCall(node, new_target, context, frame_state, start, end, diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h index f50f7b591d..f60d53dbc9 100644 --- a/deps/v8/src/compiler/js-inlining.h +++ b/deps/v8/src/compiler/js-inlining.h @@ -59,8 +59,7 @@ class JSInliner final : public AdvancedReducer { SourcePositionTable* const source_positions_; base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node); - FeedbackVectorRef DetermineCallContext( - Node* node, Node*& context_out); // NOLINT(runtime/references) + FeedbackVectorRef DetermineCallContext(Node* node, Node** context_out); Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state, int parameter_count, BailoutId bailout_id, diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc index 970a7e3ed6..ccb0622017 100644 --- a/deps/v8/src/compiler/js-intrinsic-lowering.cc +++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc @@ -21,10 +21,13 @@ namespace v8 { namespace internal { namespace compiler { -JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph) - : AdvancedReducer(editor), jsgraph_(jsgraph) {} +JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph, + JSHeapBroker* broker) + : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker) {} Reduction JSIntrinsicLowering::Reduce(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange(); const Runtime::Function* const f = Runtime::FunctionForId(CallRuntimeParametersOf(node->op()).id()); @@ -108,7 +111,7 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) { // TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer. Node* deoptimize = graph()->NewNode( common()->Deoptimize(DeoptimizeKind::kEager, - DeoptimizeReason::kDeoptimizeNow, VectorSlotPair()), + DeoptimizeReason::kDeoptimizeNow, FeedbackSource()), frame_state, effect, control); NodeProperties::MergeControlToEnd(graph(), common(), deoptimize); Revisit(graph()->end()); @@ -307,7 +310,7 @@ Reduction JSIntrinsicLowering::ReduceToObject(Node* node) { Reduction JSIntrinsicLowering::ReduceToString(Node* node) { // ToString is unnecessary if the input is a string. HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0)); - if (m.HasValue() && m.Value()->IsString()) { + if (m.HasValue() && m.Ref(broker()).IsString()) { ReplaceWithValue(node, m.node()); return Replace(m.node()); } diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h index 844e051d0a..f32b53b586 100644 --- a/deps/v8/src/compiler/js-intrinsic-lowering.h +++ b/deps/v8/src/compiler/js-intrinsic-lowering.h @@ -31,7 +31,7 @@ class SimplifiedOperatorBuilder; class V8_EXPORT_PRIVATE JSIntrinsicLowering final : public NON_EXPORTED_BASE(AdvancedReducer) { public: - JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph); + JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker); ~JSIntrinsicLowering() final = default; const char* reducer_name() const override { return "JSIntrinsicLowering"; } @@ -81,12 +81,14 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final Graph* graph() const; JSGraph* jsgraph() const { return jsgraph_; } + JSHeapBroker* broker() const { return broker_; } Isolate* isolate() const; CommonOperatorBuilder* common() const; JSOperatorBuilder* javascript() const; SimplifiedOperatorBuilder* simplified() const; JSGraph* const jsgraph_; + JSHeapBroker* const broker_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc index 8f7552baa1..9f950c808c 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.cc +++ b/deps/v8/src/compiler/js-native-context-specialization.cc @@ -19,7 +19,6 @@ #include "src/compiler/node-matchers.h" #include "src/compiler/property-access-builder.h" #include "src/compiler/type-cache.h" -#include "src/compiler/vector-slot-pair.h" #include "src/execution/isolate-inl.h" #include "src/numbers/dtoa.h" #include "src/objects/feedback-vector.h" @@ -52,35 +51,26 @@ bool HasOnlyJSArrayMaps(JSHeapBroker* broker, return true; } -void TryUpdateThenDropDeprecated(Isolate* isolate, MapHandles* maps) { - for (auto it = maps->begin(); it != maps->end();) { - if (Map::TryUpdate(isolate, *it).ToHandle(&*it)) { - DCHECK(!(*it)->is_deprecated()); - ++it; - } else { - it = maps->erase(it); - } - } -} - } // namespace JSNativeContextSpecialization::JSNativeContextSpecialization( Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags, - Handle<Context> native_context, CompilationDependencies* dependencies, - Zone* zone, Zone* shared_zone) + CompilationDependencies* dependencies, Zone* zone, Zone* shared_zone) : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker), flags_(flags), - global_object_(native_context->global_object(), jsgraph->isolate()), - global_proxy_(native_context->global_proxy(), jsgraph->isolate()), + global_object_(broker->target_native_context().global_object().object()), + global_proxy_( + broker->target_native_context().global_proxy_object().object()), dependencies_(dependencies), zone_(zone), shared_zone_(shared_zone), type_cache_(TypeCache::Get()) {} Reduction JSNativeContextSpecialization::Reduce(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + switch (node->opcode()) { case IrOpcode::kJSAdd: return ReduceJSAdd(node); @@ -128,6 +118,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) { return ReduceJSToObject(node); case IrOpcode::kJSToString: return ReduceJSToString(node); + case IrOpcode::kJSGetIterator: + return ReduceJSGetIterator(node); default: break; } @@ -236,11 +228,12 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter( // Create the JSAsyncFunctionObject based on the SharedFunctionInfo // extracted from the top-most frame in {frame_state}. - Handle<SharedFunctionInfo> shared = - FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked(); - DCHECK(shared->is_compiled()); - int register_count = shared->internal_formal_parameter_count() + - shared->GetBytecodeArray().register_count(); + SharedFunctionInfoRef shared( + broker(), + FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked()); + DCHECK(shared.is_compiled()); + int register_count = shared.internal_formal_parameter_count() + + shared.GetBytecodeArray().register_count(); Node* value = effect = graph()->NewNode(javascript()->CreateAsyncFunctionObject(register_count), closure, receiver, promise, context, effect, control); @@ -360,9 +353,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor( if (!m.HasValue()) return NoChange(); JSFunctionRef function = m.Ref(broker()).AsJSFunction(); MapRef function_map = function.map(); - if (!FLAG_concurrent_inlining) { - function_map.SerializePrototype(); - } else if (!function_map.serialized_prototype()) { + if (FLAG_concurrent_inlining && !function_map.serialized_prototype()) { TRACE_BROKER_MISSING(broker(), "data for map " << function_map); return NoChange(); } @@ -396,22 +387,37 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { // we have feedback from the InstanceOfIC. Handle<JSObject> receiver; HeapObjectMatcher m(constructor); - if (m.HasValue() && m.Value()->IsJSObject()) { - receiver = Handle<JSObject>::cast(m.Value()); + if (m.HasValue() && m.Ref(broker()).IsJSObject()) { + receiver = m.Ref(broker()).AsJSObject().object(); } else if (p.feedback().IsValid()) { - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - if (!nexus.GetConstructorFeedback().ToHandle(&receiver)) return NoChange(); + ProcessedFeedback const& feedback = + broker()->GetFeedbackForInstanceOf(FeedbackSource(p.feedback())); + if (feedback.IsInsufficient()) return NoChange(); + base::Optional<JSObjectRef> maybe_receiver = + feedback.AsInstanceOf().value(); + if (!maybe_receiver.has_value()) return NoChange(); + receiver = maybe_receiver->object(); } else { return NoChange(); } - Handle<Map> receiver_map(receiver->map(), isolate()); - // Compute property access info for @@hasInstance on the constructor. - AccessInfoFactory access_info_factory(broker(), dependencies(), - graph()->zone()); - PropertyAccessInfo access_info = - access_info_factory.ComputePropertyAccessInfo( - receiver_map, factory()->has_instance_symbol(), AccessMode::kLoad); + JSObjectRef receiver_ref(broker(), receiver); + MapRef receiver_map = receiver_ref.map(); + + PropertyAccessInfo access_info = PropertyAccessInfo::Invalid(graph()->zone()); + if (FLAG_concurrent_inlining) { + access_info = broker()->GetPropertyAccessInfo( + receiver_map, + NameRef(broker(), isolate()->factory()->has_instance_symbol()), + AccessMode::kLoad); + } else { + AccessInfoFactory access_info_factory(broker(), dependencies(), + graph()->zone()); + access_info = access_info_factory.ComputePropertyAccessInfo( + receiver_map.object(), factory()->has_instance_symbol(), + AccessMode::kLoad); + } + if (access_info.IsInvalid()) return NoChange(); access_info.RecordDependencies(dependencies()); @@ -420,7 +426,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { if (access_info.IsNotFound()) { // If there's no @@hasInstance handler, the OrdinaryHasInstance operation // takes over, but that requires the constructor to be callable. - if (!receiver_map->is_callable()) return NoChange(); + if (!receiver_map.is_callable()) return NoChange(); dependencies()->DependOnStablePrototypeChains(access_info.receiver_maps(), kStartAtPrototype); @@ -439,17 +445,15 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { } if (access_info.IsDataConstant()) { - // Determine actual holder. Handle<JSObject> holder; bool found_on_proto = access_info.holder().ToHandle(&holder); - if (!found_on_proto) holder = receiver; - - FieldIndex field_index = access_info.field_index(); - Handle<Object> constant = JSObject::FastPropertyAt( - holder, access_info.field_representation(), field_index); - if (!constant->IsCallable()) { + JSObjectRef holder_ref = + found_on_proto ? JSObjectRef(broker(), holder) : receiver_ref; + base::Optional<ObjectRef> constant = holder_ref.GetOwnDataProperty( + access_info.field_representation(), access_info.field_index()); + if (!constant.has_value() || !constant->IsHeapObject() || + !constant->AsHeapObject().map().is_callable()) return NoChange(); - } if (found_on_proto) { dependencies()->DependOnStablePrototypeChains( @@ -457,8 +461,6 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { JSObjectRef(broker(), holder)); } - DCHECK(constant->IsCallable()); - // Check that {constructor} is actually {receiver}. constructor = access_builder.BuildCheckValue(constructor, &effect, control, receiver); @@ -478,14 +480,14 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { 0, frame_state, ContinuationFrameStateMode::LAZY); // Call the @@hasInstance handler. - Node* target = jsgraph()->Constant(constant); + Node* target = jsgraph()->Constant(*constant); node->InsertInput(graph()->zone(), 0, target); node->ReplaceInput(1, constructor); node->ReplaceInput(2, object); node->ReplaceInput(4, continuation_frame_state); node->ReplaceInput(5, effect); NodeProperties::ChangeOp( - node, javascript()->Call(3, CallFrequency(), VectorSlotPair(), + node, javascript()->Call(3, CallFrequency(), FeedbackSource(), ConvertReceiverMode::kNotNullOrUndefined)); // Rewire the value uses of {node} to ToBoolean conversion of the result. @@ -504,7 +506,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { JSNativeContextSpecialization::InferHasInPrototypeChainResult JSNativeContextSpecialization::InferHasInPrototypeChain( - Node* receiver, Node* effect, Handle<HeapObject> prototype) { + Node* receiver, Node* effect, HeapObjectRef const& prototype) { ZoneHandleSet<Map> receiver_maps; NodeProperties::InferReceiverMapsResult result = NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect, @@ -517,28 +519,31 @@ JSNativeContextSpecialization::InferHasInPrototypeChain( bool all = true; bool none = true; for (size_t i = 0; i < receiver_maps.size(); ++i) { - Handle<Map> receiver_map = receiver_maps[i]; - if (receiver_map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) { - return kMayBeInPrototypeChain; - } - if (result == NodeProperties::kUnreliableReceiverMaps && - !receiver_map->is_stable()) { + MapRef map(broker(), receiver_maps[i]); + if (result == NodeProperties::kUnreliableReceiverMaps && !map.is_stable()) { return kMayBeInPrototypeChain; } - for (PrototypeIterator it(isolate(), receiver_map);; it.Advance()) { - if (it.IsAtEnd()) { + while (true) { + if (IsSpecialReceiverInstanceType(map.instance_type())) { + return kMayBeInPrototypeChain; + } + if (!map.IsJSObjectMap()) { all = false; break; } - Handle<HeapObject> current = - PrototypeIterator::GetCurrent<HeapObject>(it); - if (current.is_identical_to(prototype)) { + if (FLAG_concurrent_inlining && !map.serialized_prototype()) { + TRACE_BROKER_MISSING(broker(), "prototype data for map " << map); + return kMayBeInPrototypeChain; + } + if (map.prototype().equals(prototype)) { none = false; break; } - if (!current->map().is_stable() || - current->map().instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) { - return kMayBeInPrototypeChain; + map = map.prototype().map(); + if (!map.is_stable()) return kMayBeInPrototypeChain; + if (map.oddball_type() == OddballType::kNull) { + all = false; + break; } } } @@ -554,8 +559,8 @@ JSNativeContextSpecialization::InferHasInPrototypeChain( // might be a different object each time, so it's much simpler to include // {prototype}. That does, however, mean that we must check {prototype}'s // map stability. - if (!prototype->map().is_stable()) return kMayBeInPrototypeChain; - last_prototype.emplace(broker(), Handle<JSObject>::cast(prototype)); + if (!prototype.map().is_stable()) return kMayBeInPrototypeChain; + last_prototype = prototype.AsJSObject(); } WhereToStart start = result == NodeProperties::kUnreliableReceiverMaps ? kStartAtReceiver @@ -580,7 +585,7 @@ Reduction JSNativeContextSpecialization::ReduceJSHasInPrototypeChain( HeapObjectMatcher m(prototype); if (m.HasValue()) { InferHasInPrototypeChainResult result = - InferHasInPrototypeChain(value, effect, m.Value()); + InferHasInPrototypeChain(value, effect, m.Ref(broker())); if (result != kMayBeInPrototypeChain) { Node* value = jsgraph()->BooleanConstant(result == kIsInPrototypeChain); ReplaceWithValue(node, value); @@ -601,34 +606,41 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance( HeapObjectMatcher m(constructor); if (!m.HasValue()) return NoChange(); - // Check if the {constructor} is a JSBoundFunction. - if (m.Value()->IsJSBoundFunction()) { - // OrdinaryHasInstance on bound functions turns into a recursive - // invocation of the instanceof operator again. - // ES6 section 7.3.19 OrdinaryHasInstance (C, O) step 2. - Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(m.Value()); - Handle<JSReceiver> bound_target_function(function->bound_target_function(), - isolate()); + if (m.Ref(broker()).IsJSBoundFunction()) { + // OrdinaryHasInstance on bound functions turns into a recursive invocation + // of the instanceof operator again. + JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction(); + if (FLAG_concurrent_inlining && !function.serialized()) { + TRACE_BROKER_MISSING(broker(), "data for JSBoundFunction " << function); + return NoChange(); + } + + JSReceiverRef bound_target_function = function.bound_target_function(); + NodeProperties::ReplaceValueInput(node, object, 0); NodeProperties::ReplaceValueInput( - node, jsgraph()->HeapConstant(bound_target_function), 1); - NodeProperties::ChangeOp(node, javascript()->InstanceOf(VectorSlotPair())); + node, jsgraph()->Constant(bound_target_function), 1); + NodeProperties::ChangeOp(node, javascript()->InstanceOf(FeedbackSource())); Reduction const reduction = ReduceJSInstanceOf(node); return reduction.Changed() ? reduction : Changed(node); } - // Optimize if we currently know the "prototype" property. - if (m.Value()->IsJSFunction()) { + if (m.Ref(broker()).IsJSFunction()) { + // Optimize if we currently know the "prototype" property. + JSFunctionRef function = m.Ref(broker()).AsJSFunction(); - // TODO(neis): This is a temporary hack needed because the copy reducer - // runs only after this pass. - function.Serialize(); + if (FLAG_concurrent_inlining && !function.serialized()) { + TRACE_BROKER_MISSING(broker(), "data for JSFunction " << function); + return NoChange(); + } + // TODO(neis): Remove the has_prototype_slot condition once the broker is // always enabled. if (!function.map().has_prototype_slot() || !function.has_prototype() || function.PrototypeRequiresRuntimeLookup()) { return NoChange(); } + ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function); Node* prototype_constant = jsgraph()->Constant(prototype); @@ -656,7 +668,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) { // Check if the {constructor} is the %Promise% function. HeapObjectMatcher m(constructor); if (!m.HasValue() || - !m.Ref(broker()).equals(broker()->native_context().promise_function())) { + !m.Ref(broker()).equals(native_context().promise_function())) { return NoChange(); } @@ -680,7 +692,6 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) { // ES section #sec-promise-resolve-functions Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSResolvePromise, node->opcode()); Node* promise = NodeProperties::GetValueInput(node, 0); Node* resolution = NodeProperties::GetValueInput(node, 1); @@ -705,7 +716,9 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) { // Obtain pre-computed access infos from the broker. for (auto map : resolution_maps) { MapRef map_ref(broker(), map); - access_infos.push_back(broker()->GetAccessInfoForLoadingThen(map_ref)); + access_infos.push_back(broker()->GetPropertyAccessInfo( + map_ref, NameRef(broker(), isolate()->factory()->then_string()), + AccessMode::kLoad)); } } PropertyAccessInfo access_info = @@ -948,7 +961,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess( } else { // Check that the {value} is a Smi. value = effect = graph()->NewNode( - simplified()->CheckSmi(VectorSlotPair()), value, effect, control); + simplified()->CheckSmi(FeedbackSource()), value, effect, control); property_cell_value_type = Type::SignedSmall(); representation = MachineType::RepCompressedTaggedSigned(); } @@ -978,70 +991,85 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess( } Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode()); LoadGlobalParameters const& p = LoadGlobalParametersOf(node->op()); if (!p.feedback().IsValid()) return NoChange(); - FeedbackSource source(p.feedback()); - // TODO(neis): Make consistent with other feedback processing code. - GlobalAccessFeedback const* processed = - FLAG_concurrent_inlining - ? broker()->GetGlobalAccessFeedback(source) - : broker()->ProcessFeedbackForGlobalAccess(source); - if (processed == nullptr) return NoChange(); + ProcessedFeedback const& processed = + broker()->GetFeedbackForGlobalAccess(FeedbackSource(p.feedback())); + if (processed.IsInsufficient()) return NoChange(); - if (processed->IsScriptContextSlot()) { + GlobalAccessFeedback const& feedback = processed.AsGlobalAccess(); + if (feedback.IsScriptContextSlot()) { Node* effect = NodeProperties::GetEffectInput(node); - Node* script_context = jsgraph()->Constant(processed->script_context()); + Node* script_context = jsgraph()->Constant(feedback.script_context()); Node* value = effect = - graph()->NewNode(javascript()->LoadContext(0, processed->slot_index(), - processed->immutable()), + graph()->NewNode(javascript()->LoadContext(0, feedback.slot_index(), + feedback.immutable()), script_context, effect); ReplaceWithValue(node, value, effect); return Replace(value); + } else if (feedback.IsPropertyCell()) { + return ReduceGlobalAccess(node, nullptr, nullptr, + NameRef(broker(), p.name()), AccessMode::kLoad, + nullptr, feedback.property_cell()); + } else { + DCHECK(feedback.IsMegamorphic()); + return NoChange(); } - - CHECK(processed->IsPropertyCell()); - return ReduceGlobalAccess(node, nullptr, nullptr, NameRef(broker(), p.name()), - AccessMode::kLoad, nullptr, - processed->property_cell()); } Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode()); Node* value = NodeProperties::GetValueInput(node, 0); - StoreGlobalParameters const& p = StoreGlobalParametersOf(node->op()); if (!p.feedback().IsValid()) return NoChange(); - FeedbackSource source(p.feedback()); - GlobalAccessFeedback const* processed = - FLAG_concurrent_inlining - ? broker()->GetGlobalAccessFeedback(source) - : broker()->ProcessFeedbackForGlobalAccess(source); - if (processed == nullptr) return NoChange(); + ProcessedFeedback const& processed = + broker()->GetFeedbackForGlobalAccess(FeedbackSource(p.feedback())); + if (processed.IsInsufficient()) return NoChange(); - if (processed->IsScriptContextSlot()) { - if (processed->immutable()) return NoChange(); + GlobalAccessFeedback const& feedback = processed.AsGlobalAccess(); + if (feedback.IsScriptContextSlot()) { + if (feedback.immutable()) return NoChange(); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - Node* script_context = jsgraph()->Constant(processed->script_context()); + Node* script_context = jsgraph()->Constant(feedback.script_context()); effect = - graph()->NewNode(javascript()->StoreContext(0, processed->slot_index()), + graph()->NewNode(javascript()->StoreContext(0, feedback.slot_index()), value, script_context, effect, control); ReplaceWithValue(node, value, effect, control); return Replace(value); - } - - if (processed->IsPropertyCell()) { + } else if (feedback.IsPropertyCell()) { return ReduceGlobalAccess(node, nullptr, value, NameRef(broker(), p.name()), AccessMode::kStore, nullptr, - processed->property_cell()); + feedback.property_cell()); + } else { + DCHECK(feedback.IsMegamorphic()); + return NoChange(); } +} - UNREACHABLE(); +void JSNativeContextSpecialization::FilterMapsAndGetPropertyAccessInfos( + NamedAccessFeedback const& feedback, AccessMode access_mode, Node* receiver, + Node* effect, ZoneVector<PropertyAccessInfo>* access_infos) { + ZoneVector<Handle<Map>> receiver_maps(zone()); + + // Either infer maps from the graph or use the feedback. + if (!InferReceiverMaps(receiver, effect, &receiver_maps)) { + receiver_maps = feedback.maps(); + } + RemoveImpossibleReceiverMaps(receiver, &receiver_maps); + + for (Handle<Map> map_handle : receiver_maps) { + MapRef map(broker(), map_handle); + if (map.is_deprecated()) continue; + PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( + map, feedback.name(), access_mode, dependencies(), + FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized + : SerializationPolicy::kSerializeIfNeeded); + access_infos->push_back(access_info); + } } Reduction JSNativeContextSpecialization::ReduceNamedAccess( @@ -1052,18 +1080,23 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( node->opcode() == IrOpcode::kJSLoadProperty || node->opcode() == IrOpcode::kJSStoreProperty || node->opcode() == IrOpcode::kJSStoreNamedOwn || - node->opcode() == IrOpcode::kJSHasProperty); + node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral || + node->opcode() == IrOpcode::kJSHasProperty || + node->opcode() == IrOpcode::kJSGetIterator); Node* receiver = NodeProperties::GetValueInput(node, 0); Node* context = NodeProperties::GetContextInput(node); Node* frame_state = NodeProperties::GetFrameStateInput(node); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); + ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone()); ZoneVector<PropertyAccessInfo> access_infos(zone()); + FilterMapsAndGetPropertyAccessInfos(feedback, access_mode, receiver, effect, + &access_infos_for_feedback); AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); if (!access_info_factory.FinalizePropertyAccessInfos( - feedback.access_infos(), access_mode, &access_infos)) { + access_infos_for_feedback, access_mode, &access_infos)) { return NoChange(); } @@ -1072,7 +1105,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( // to the current native context's global object instead. if (access_infos.size() == 1 && access_infos[0].receiver_maps().size() == 1) { MapRef receiver_map(broker(), access_infos[0].receiver_maps()[0]); - if (receiver_map.IsMapOfCurrentGlobalProxy()) { + if (receiver_map.IsMapOfTargetGlobalProxy()) { return ReduceGlobalAccess(node, receiver, value, feedback.name(), access_mode, key); } @@ -1318,7 +1351,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus( } Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode()); NamedAccess const& p = NamedAccessOf(node->op()); Node* const receiver = NodeProperties::GetValueInput(node, 0); @@ -1332,9 +1364,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { name.equals(ObjectRef(broker(), factory()->prototype_string()))) { // Optimize "prototype" property of functions. JSFunctionRef function = object.AsJSFunction(); - if (!FLAG_concurrent_inlining) { - function.Serialize(); - } else if (!function.serialized()) { + if (FLAG_concurrent_inlining && !function.serialized()) { TRACE_BROKER_MISSING(broker(), "data for function " << function); return NoChange(); } @@ -1363,8 +1393,16 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { AccessMode::kLoad); } +Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) { + DCHECK_EQ(IrOpcode::kJSGetIterator, node->opcode()); + PropertyAccess const& p = PropertyAccessOf(node->op()); + NameRef name(broker(), factory()->iterator_symbol()); + + return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(), + FeedbackSource(p.feedback()), AccessMode::kLoad); +} + Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode()); NamedAccess const& p = NamedAccessOf(node->op()); Node* const value = NodeProperties::GetValueInput(node, 1); @@ -1376,7 +1414,6 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) { } Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode()); StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op()); Node* const value = NodeProperties::GetValueInput(node, 1); @@ -1401,7 +1438,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString( // Ensure that the {receiver} is actually a String. receiver = effect = graph()->NewNode( - simplified()->CheckString(VectorSlotPair()), receiver, effect, control); + simplified()->CheckString(FeedbackSource()), receiver, effect, control); // Determine the {receiver} length. Node* length = graph()->NewNode(simplified()->StringLength(), receiver); @@ -1428,13 +1465,50 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker, } } // namespace +void JSNativeContextSpecialization::RemoveImpossibleReceiverMaps( + Node* receiver, ZoneVector<Handle<Map>>* receiver_maps) const { + base::Optional<MapRef> root_map = InferReceiverRootMap(receiver); + if (root_map.has_value()) { + DCHECK(!root_map->is_abandoned_prototype_map()); + receiver_maps->erase( + std::remove_if(receiver_maps->begin(), receiver_maps->end(), + [root_map, this](Handle<Map> map) { + MapRef map_ref(broker(), map); + return map_ref.is_abandoned_prototype_map() || + (map_ref.FindRootMap().has_value() && + !map_ref.FindRootMap()->equals(*root_map)); + }), + receiver_maps->end()); + } +} + +// Possibly refine the feedback using inferred map information from the graph. +ElementAccessFeedback const& +JSNativeContextSpecialization::TryRefineElementAccessFeedback( + ElementAccessFeedback const& feedback, Node* receiver, Node* effect) const { + AccessMode access_mode = feedback.keyed_mode().access_mode(); + bool use_inference = + access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas; + if (!use_inference) return feedback; + + ZoneVector<Handle<Map>> inferred_maps(zone()); + if (!InferReceiverMaps(receiver, effect, &inferred_maps)) return feedback; + + RemoveImpossibleReceiverMaps(receiver, &inferred_maps); + // TODO(neis): After Refine, the resulting feedback can still contain + // impossible maps when a target is kept only because more than one of its + // sources was inferred. Think of a way to completely rule out impossible + // maps. + return feedback.Refine(inferred_maps, zone()); +} + Reduction JSNativeContextSpecialization::ReduceElementAccess( Node* node, Node* index, Node* value, - ElementAccessFeedback const& processed) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + ElementAccessFeedback const& feedback) { DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || node->opcode() == IrOpcode::kJSStoreProperty || node->opcode() == IrOpcode::kJSStoreInArrayLiteral || + node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral || node->opcode() == IrOpcode::kJSHasProperty); Node* receiver = NodeProperties::GetValueInput(node, 0); @@ -1443,30 +1517,34 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( Node* frame_state = NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead()); - AccessMode access_mode = processed.keyed_mode.access_mode(); + // TODO(neis): It's odd that we do optimizations below that don't really care + // about the feedback, but we don't do them when the feedback is megamorphic. + if (feedback.transition_groups().empty()) return NoChange(); + + ElementAccessFeedback const& refined_feedback = + TryRefineElementAccessFeedback(feedback, receiver, effect); + + AccessMode access_mode = refined_feedback.keyed_mode().access_mode(); if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) && receiver->opcode() == IrOpcode::kHeapConstant) { - Reduction reduction = ReduceKeyedLoadFromHeapConstant( - node, index, access_mode, processed.keyed_mode.load_mode()); + Reduction reduction = ReduceElementLoadFromHeapConstant( + node, index, access_mode, refined_feedback.keyed_mode().load_mode()); if (reduction.Changed()) return reduction; } - if (HasOnlyStringMaps(broker(), processed.receiver_maps)) { - DCHECK(processed.transitions.empty()); + if (!refined_feedback.transition_groups().empty() && + refined_feedback.HasOnlyStringMaps(broker())) { return ReduceElementAccessOnString(node, index, value, - processed.keyed_mode); + refined_feedback.keyed_mode()); } - // Compute element access infos for the receiver maps. AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); ZoneVector<ElementAccessInfo> access_infos(zone()); - if (!access_info_factory.ComputeElementAccessInfos(processed, access_mode, - &access_infos)) { + if (!access_info_factory.ComputeElementAccessInfos(refined_feedback, + &access_infos) || + access_infos.empty()) { return NoChange(); - } else if (access_infos.empty()) { - return ReduceSoftDeoptimize( - node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess); } // For holey stores or growing stores, we need to check that the prototype @@ -1485,7 +1563,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( // then we need to check that all prototypes have stable maps with // fast elements (and we need to guard against changes to that below). if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) || - IsGrowStoreMode(processed.keyed_mode.store_mode())) && + IsGrowStoreMode(feedback.keyed_mode().store_mode())) && !receiver_map.HasOnlyStablePrototypesWithFastElements( &prototype_maps)) { return NoChange(); @@ -1514,9 +1592,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( base::Optional<JSTypedArrayRef> typed_array = GetTypedArrayConstant(broker(), receiver); if (typed_array.has_value()) { - if (!FLAG_concurrent_inlining) { - typed_array->Serialize(); - } else if (!typed_array->serialized()) { + if (FLAG_concurrent_inlining && !typed_array->serialized()) { TRACE_BROKER_MISSING(broker(), "data for typed array " << *typed_array); return NoChange(); } @@ -1558,7 +1634,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( // Access the actual element. ValueEffectControl continuation = BuildElementAccess(receiver, index, value, effect, control, access_info, - processed.keyed_mode); + feedback.keyed_mode()); value = continuation.value(); effect = continuation.effect(); control = continuation.control(); @@ -1625,7 +1701,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( // Access the actual element. ValueEffectControl continuation = BuildElementAccess(this_receiver, this_index, this_value, this_effect, - this_control, access_info, processed.keyed_mode); + this_control, access_info, feedback.keyed_mode()); values.push_back(continuation.value()); effects.push_back(continuation.effect()); controls.push_back(continuation.control()); @@ -1658,7 +1734,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( return Replace(value); } -Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant( +Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant( Node* node, Node* key, AccessMode access_mode, KeyedAccessLoadMode load_mode) { DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || @@ -1733,67 +1809,35 @@ Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant( Reduction JSNativeContextSpecialization::ReducePropertyAccess( Node* node, Node* key, base::Optional<NameRef> static_name, Node* value, FeedbackSource const& source, AccessMode access_mode) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(key == nullptr, static_name.has_value()); DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || node->opcode() == IrOpcode::kJSStoreProperty || node->opcode() == IrOpcode::kJSStoreInArrayLiteral || + node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral || node->opcode() == IrOpcode::kJSHasProperty || node->opcode() == IrOpcode::kJSLoadNamed || node->opcode() == IrOpcode::kJSStoreNamed || - node->opcode() == IrOpcode::kJSStoreNamedOwn); - - Node* receiver = NodeProperties::GetValueInput(node, 0); - Node* effect = NodeProperties::GetEffectInput(node); - - ProcessedFeedback const* processed = nullptr; - if (FLAG_concurrent_inlining) { - processed = broker()->GetFeedback(source); - // TODO(neis): Infer maps from the graph and consolidate with feedback/hints - // and filter impossible candidates based on inferred root map. - } else { - // TODO(neis): Try to unify this with the similar code in the serializer. - FeedbackNexus nexus(source.vector, source.slot); - if (nexus.ic_state() == UNINITIALIZED) { - processed = new (zone()) InsufficientFeedback(); - } else { - MapHandles receiver_maps; - if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) { - processed = new (zone()) InsufficientFeedback(); - } else if (!receiver_maps.empty()) { - base::Optional<NameRef> name = static_name.has_value() - ? static_name - : broker()->GetNameFeedback(nexus); - if (name.has_value()) { - ZoneVector<PropertyAccessInfo> access_infos(zone()); - AccessInfoFactory access_info_factory(broker(), dependencies(), - graph()->zone()); - access_info_factory.ComputePropertyAccessInfos( - receiver_maps, name->object(), access_mode, &access_infos); - processed = new (zone()) NamedAccessFeedback(*name, access_infos); - } else if (nexus.GetKeyType() == ELEMENT && - MEGAMORPHIC != nexus.ic_state()) { - processed = broker()->ProcessFeedbackMapsForElementAccess( - receiver_maps, KeyedAccessMode::FromNexus(nexus)); - } - } - } - } + node->opcode() == IrOpcode::kJSStoreNamedOwn || + node->opcode() == IrOpcode::kJSGetIterator); + DCHECK_GE(node->op()->ControlOutputCount(), 1); - if (processed == nullptr) return NoChange(); - switch (processed->kind()) { + ProcessedFeedback const& feedback = + broker()->GetFeedbackForPropertyAccess(source, access_mode, static_name); + switch (feedback.kind()) { case ProcessedFeedback::kInsufficient: return ReduceSoftDeoptimize( node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess); case ProcessedFeedback::kNamedAccess: - return ReduceNamedAccess(node, value, *processed->AsNamedAccess(), + return ReduceNamedAccess(node, value, feedback.AsNamedAccess(), access_mode, key); case ProcessedFeedback::kElementAccess: - CHECK_EQ(processed->AsElementAccess()->keyed_mode.access_mode(), - access_mode); - return ReduceElementAccess(node, key, value, - *processed->AsElementAccess()); - case ProcessedFeedback::kGlobalAccess: + DCHECK_EQ(feedback.AsElementAccess().keyed_mode().access_mode(), + access_mode); + return ReduceElementAccess(node, key, value, feedback.AsElementAccess()); + default: UNREACHABLE(); } } @@ -1807,7 +1851,7 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize( Node* frame_state = NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead()); Node* deoptimize = graph()->NewNode( - common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()), + common()->Deoptimize(DeoptimizeKind::kSoft, reason, FeedbackSource()), frame_state, effect, control); // TODO(bmeurer): This should be on the AdvancedReducer somehow. NodeProperties::MergeControlToEnd(graph(), common(), deoptimize); @@ -1818,7 +1862,6 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize( } Reduction JSNativeContextSpecialization::ReduceJSHasProperty(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSHasProperty, node->opcode()); PropertyAccess const& p = PropertyAccessOf(node->op()); Node* key = NodeProperties::GetValueInput(node, 1); @@ -1936,7 +1979,6 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey( } Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode()); PropertyAccess const& p = PropertyAccessOf(node->op()); Node* name = NodeProperties::GetValueInput(node, 1); @@ -1953,7 +1995,6 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) { } Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode()); PropertyAccess const& p = PropertyAccessOf(node->op()); Node* const key = NodeProperties::GetValueInput(node, 1); @@ -1975,7 +2016,7 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall( Node* value; if (constant.IsJSFunction()) { value = *effect = *control = graph()->NewNode( - jsgraph()->javascript()->Call(2, CallFrequency(), VectorSlotPair(), + jsgraph()->javascript()->Call(2, CallFrequency(), FeedbackSource(), ConvertReceiverMode::kNotNullOrUndefined), target, receiver, context, frame_state, *effect, *control); } else { @@ -2012,7 +2053,7 @@ void JSNativeContextSpecialization::InlinePropertySetterCall( // Introduce the call to the setter function. if (constant.IsJSFunction()) { *effect = *control = graph()->NewNode( - jsgraph()->javascript()->Call(3, CallFrequency(), VectorSlotPair(), + jsgraph()->javascript()->Call(3, CallFrequency(), FeedbackSource(), ConvertReceiverMode::kNotNullOrUndefined), target, receiver, value, context, frame_state, *effect, *control); } else { @@ -2197,12 +2238,10 @@ JSNativeContextSpecialization::BuildPropertyStore( Node* storage = receiver; if (!field_index.is_inobject()) { storage = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSObjectPropertiesOrHash()), + simplified()->LoadField( + AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer()), storage, effect, control); } - PropertyConstness constness = access_info.IsDataConstant() - ? PropertyConstness::kConst - : PropertyConstness::kMutable; bool store_to_existing_constant_field = access_info.IsDataConstant() && access_mode == AccessMode::kStore && !access_info.HasTransitionMap(); @@ -2215,24 +2254,25 @@ JSNativeContextSpecialization::BuildPropertyStore( MachineType::TypeForRepresentation(field_representation), kFullWriteBarrier, LoadSensitivity::kUnsafe, - constness}; + access_info.GetConstFieldInfo(), + access_mode == AccessMode::kStoreInLiteral}; switch (field_representation) { case MachineRepresentation::kFloat64: { value = effect = - graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value, + graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), value, effect, control); if (!field_index.is_inobject() || !FLAG_unbox_double_fields) { if (access_info.HasTransitionMap()) { - // Allocate a MutableHeapNumber for the new property. + // Allocate a HeapNumber for the new property. AllocationBuilder a(jsgraph(), effect, control); a.Allocate(HeapNumber::kSize, AllocationType::kYoung, Type::OtherInternal()); a.Store(AccessBuilder::ForMap(), - factory()->mutable_heap_number_map()); + MapRef(broker(), factory()->heap_number_map())); FieldAccess value_field_access = AccessBuilder::ForHeapNumberValue(); - value_field_access.constness = field_access.constness; + value_field_access.const_field_info = field_access.const_field_info; a.Store(value_field_access, value); value = effect = a.Finish(); @@ -2241,7 +2281,7 @@ JSNativeContextSpecialization::BuildPropertyStore( MachineType::TypeCompressedTaggedPointer(); field_access.write_barrier_kind = kPointerWriteBarrier; } else { - // We just store directly to the MutableHeapNumber. + // We just store directly to the HeapNumber. FieldAccess const storage_access = { kTaggedBase, field_index.offset(), @@ -2251,7 +2291,8 @@ JSNativeContextSpecialization::BuildPropertyStore( MachineType::TypeCompressedTaggedPointer(), kPointerWriteBarrier, LoadSensitivity::kUnsafe, - constness}; + access_info.GetConstFieldInfo(), + access_mode == AccessMode::kStoreInLiteral}; storage = effect = graph()->NewNode(simplified()->LoadField(storage_access), storage, effect, control); @@ -2300,7 +2341,7 @@ JSNativeContextSpecialization::BuildPropertyStore( if (field_representation == MachineRepresentation::kTaggedSigned || field_representation == MachineRepresentation::kCompressedSigned) { value = effect = graph()->NewNode( - simplified()->CheckSmi(VectorSlotPair()), value, effect, control); + simplified()->CheckSmi(FeedbackSource()), value, effect, control); field_access.write_barrier_kind = kNoWriteBarrier; } else if (field_representation == @@ -2356,7 +2397,7 @@ JSNativeContextSpecialization::BuildPropertyStore( storage, value, effect, control); // Atomically switch to the new properties below. - field_access = AccessBuilder::ForJSObjectPropertiesOrHash(); + field_access = AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(); value = storage; storage = receiver; } @@ -2382,80 +2423,18 @@ JSNativeContextSpecialization::BuildPropertyStore( Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral( Node* node) { DCHECK_EQ(IrOpcode::kJSStoreDataPropertyInLiteral, node->opcode()); - FeedbackParameter const& p = FeedbackParameterOf(node->op()); + Node* const key = NodeProperties::GetValueInput(node, 1); + Node* const value = NodeProperties::GetValueInput(node, 2); if (!p.feedback().IsValid()) return NoChange(); - - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - if (nexus.IsUninitialized()) { - return NoChange(); - } - - if (nexus.ic_state() == MEGAMORPHIC) { - return NoChange(); - } - - DCHECK_EQ(MONOMORPHIC, nexus.ic_state()); - - Map map = nexus.GetFirstMap(); - if (map.is_null()) { - // Maps are weakly held in the type feedback vector, we may not have one. - return NoChange(); - } - - Handle<Map> receiver_map(map, isolate()); - if (!Map::TryUpdate(isolate(), receiver_map).ToHandle(&receiver_map)) - return NoChange(); - - NameRef cached_name( - broker(), - handle(Name::cast(nexus.GetFeedbackExtra()->GetHeapObjectAssumeStrong()), - isolate())); - - AccessInfoFactory access_info_factory(broker(), dependencies(), - graph()->zone()); - PropertyAccessInfo access_info = - access_info_factory.ComputePropertyAccessInfo( - receiver_map, cached_name.object(), AccessMode::kStoreInLiteral); - if (access_info.IsInvalid()) return NoChange(); - access_info.RecordDependencies(dependencies()); - - Node* receiver = NodeProperties::GetValueInput(node, 0); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); - - // Monomorphic property access. - PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies()); - access_builder.BuildCheckMaps(receiver, &effect, control, - access_info.receiver_maps()); - - // Ensure that {name} matches the cached name. - Node* name = NodeProperties::GetValueInput(node, 1); - Node* check = graph()->NewNode(simplified()->ReferenceEqual(), name, - jsgraph()->Constant(cached_name)); - effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongName), - check, effect, control); - - Node* value = NodeProperties::GetValueInput(node, 2); - Node* context = NodeProperties::GetContextInput(node); - Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node); - - // Generate the actual property access. - ValueEffectControl continuation = BuildPropertyAccess( - receiver, value, context, frame_state_lazy, effect, control, cached_name, - nullptr, access_info, AccessMode::kStoreInLiteral); - value = continuation.value(); - effect = continuation.effect(); - control = continuation.control(); - - ReplaceWithValue(node, value, effect, control); - return Replace(value); + return ReducePropertyAccess(node, key, base::nullopt, value, + FeedbackSource(p.feedback()), + AccessMode::kStoreInLiteral); } Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral( Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreInArrayLiteral, node->opcode()); FeedbackParameter const& p = FeedbackParameterOf(node->op()); Node* const index = NodeProperties::GetValueInput(node, 1); @@ -2591,7 +2570,7 @@ JSNativeContextSpecialization::BuildElementAccess( // bounds check below and just skip the property access if it's out of // bounds for the {receiver}. index = effect = graph()->NewNode( - simplified()->CheckSmi(VectorSlotPair()), index, effect, control); + simplified()->CheckSmi(FeedbackSource()), index, effect, control); // Cast the {index} to Unsigned32 range, so that the bounds checks // below are performed on unsigned values, which means that all the @@ -2600,7 +2579,7 @@ JSNativeContextSpecialization::BuildElementAccess( } else { // Check that the {index} is in the valid range for the {receiver}. index = effect = - graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index, + graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index, length, effect, control); } @@ -2660,7 +2639,7 @@ JSNativeContextSpecialization::BuildElementAccess( // and truncate it to a Number appropriately. value = effect = graph()->NewNode( simplified()->SpeculativeToNumber( - NumberOperationHint::kNumberOrOddball, VectorSlotPair()), + NumberOperationHint::kNumberOrOddball, FeedbackSource()), value, effect, control); // Introduce the appropriate truncation for {value}. Currently we @@ -2756,12 +2735,12 @@ JSNativeContextSpecialization::BuildElementAccess( // bounds check below and just skip the store below if it's out of // bounds for the {receiver}. index = effect = graph()->NewNode( - simplified()->CheckBounds(VectorSlotPair()), index, + simplified()->CheckBounds(FeedbackSource()), index, jsgraph()->Constant(Smi::kMaxValue), effect, control); } else { // Check that the {index} is in the valid range for the {receiver}. index = effect = - graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index, + graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index, length, effect, control); } @@ -2825,7 +2804,7 @@ JSNativeContextSpecialization::BuildElementAccess( // truncating. vtrue = etrue = graph()->NewNode( simplified()->CheckFloat64Hole( - CheckFloat64HoleMode::kAllowReturnHole, VectorSlotPair()), + CheckFloat64HoleMode::kAllowReturnHole, FeedbackSource()), vtrue, etrue, if_true); } } @@ -2874,7 +2853,7 @@ JSNativeContextSpecialization::BuildElementAccess( mode = CheckFloat64HoleMode::kAllowReturnHole; } value = effect = graph()->NewNode( - simplified()->CheckFloat64Hole(mode, VectorSlotPair()), value, + simplified()->CheckFloat64Hole(mode, FeedbackSource()), value, effect, control); } } @@ -2905,7 +2884,7 @@ JSNativeContextSpecialization::BuildElementAccess( Node* etrue = effect; Node* checked = etrue = - graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index, + graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index, length, etrue, if_true); Node* element = etrue = @@ -2936,7 +2915,7 @@ JSNativeContextSpecialization::BuildElementAccess( } else { etrue = graph()->NewNode( simplified()->CheckFloat64Hole( - CheckFloat64HoleMode::kNeverReturnHole, VectorSlotPair()), + CheckFloat64HoleMode::kNeverReturnHole, FeedbackSource()), element, etrue, if_true); } @@ -2956,10 +2935,10 @@ JSNativeContextSpecialization::BuildElementAccess( if (IsSmiElementsKind(elements_kind)) { value = effect = graph()->NewNode( - simplified()->CheckSmi(VectorSlotPair()), value, effect, control); + simplified()->CheckSmi(FeedbackSource()), value, effect, control); } else if (IsDoubleElementsKind(elements_kind)) { value = effect = - graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value, + graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), value, effect, control); // Make sure we do not store signalling NaNs into double arrays. value = graph()->NewNode(simplified()->NumberSilenceNaN(), value); @@ -2994,7 +2973,7 @@ JSNativeContextSpecialization::BuildElementAccess( : graph()->NewNode(simplified()->NumberAdd(), length, jsgraph()->OneConstant()); index = effect = - graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index, + graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index, limit, effect, control); // Grow {elements} backing store if necessary. @@ -3003,7 +2982,7 @@ JSNativeContextSpecialization::BuildElementAccess( ? GrowFastElementsMode::kDoubleElements : GrowFastElementsMode::kSmiOrObjectElements; elements = effect = graph()->NewNode( - simplified()->MaybeGrowFastElements(mode, VectorSlotPair()), + simplified()->MaybeGrowFastElements(mode, FeedbackSource()), receiver, elements, index, elements_length, effect, control); // If we didn't grow {elements}, it might still be COW, in which case we @@ -3063,7 +3042,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad( dependencies()->DependOnNoElementsProtector()) { // Ensure that the {index} is a valid String length. index = *effect = graph()->NewNode( - simplified()->CheckBounds(VectorSlotPair()), index, + simplified()->CheckBounds(FeedbackSource()), index, jsgraph()->Constant(String::kMaxLength), *effect, *control); // Load the single character string from {receiver} or yield @@ -3095,7 +3074,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad( } else { // Ensure that {index} is less than {receiver} length. index = *effect = - graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index, + graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index, length, *effect, *control); Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index); @@ -3196,7 +3175,6 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined( // native contexts, as the global Array protector works isolate-wide). for (Handle<Map> map : receiver_maps) { MapRef receiver_map(broker(), map); - if (!FLAG_concurrent_inlining) receiver_map.SerializePrototype(); ObjectRef receiver_prototype = receiver_map.prototype(); if (!receiver_prototype.IsJSObject() || !broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) { @@ -3208,47 +3186,9 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined( return dependencies()->DependOnNoElementsProtector(); } -// Returns false iff we have insufficient feedback (uninitialized or obsolete). -bool JSNativeContextSpecialization::ExtractReceiverMaps( - Node* receiver, Node* effect, FeedbackNexus const& nexus, - MapHandles* receiver_maps) { - DCHECK(receiver_maps->empty()); - if (nexus.IsUninitialized()) return false; - - // See if we can infer a concrete type for the {receiver}. Solely relying on - // the inference is not safe for keyed stores, because we would potentially - // miss out on transitions that need to be performed. - { - FeedbackSlotKind kind = nexus.kind(); - bool use_inference = - !IsKeyedStoreICKind(kind) && !IsStoreInArrayLiteralICKind(kind); - if (use_inference && InferReceiverMaps(receiver, effect, receiver_maps)) { - TryUpdateThenDropDeprecated(isolate(), receiver_maps); - return true; - } - } - - if (nexus.ExtractMaps(receiver_maps) == 0) return true; - - // Try to filter impossible candidates based on inferred root map. - Handle<Map> root_map; - if (InferReceiverRootMap(receiver).ToHandle(&root_map)) { - DCHECK(!root_map->is_abandoned_prototype_map()); - Isolate* isolate = this->isolate(); - receiver_maps->erase( - std::remove_if(receiver_maps->begin(), receiver_maps->end(), - [root_map, isolate](Handle<Map> map) { - return map->is_abandoned_prototype_map() || - map->FindRootMap(isolate) != *root_map; - }), - receiver_maps->end()); - } - TryUpdateThenDropDeprecated(isolate(), receiver_maps); - return !receiver_maps->empty(); -} - bool JSNativeContextSpecialization::InferReceiverMaps( - Node* receiver, Node* effect, MapHandles* receiver_maps) { + Node* receiver, Node* effect, + ZoneVector<Handle<Map>>* receiver_maps) const { ZoneHandleSet<Map> maps; NodeProperties::InferReceiverMapsResult result = NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect, @@ -3273,21 +3213,24 @@ bool JSNativeContextSpecialization::InferReceiverMaps( return false; } -MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap( - Node* receiver) { +base::Optional<MapRef> JSNativeContextSpecialization::InferReceiverRootMap( + Node* receiver) const { HeapObjectMatcher m(receiver); if (m.HasValue()) { - return handle(m.Value()->map().FindRootMap(isolate()), isolate()); + MapRef map = m.Ref(broker()).map(); + return map.FindRootMap(); } else if (m.IsJSCreate()) { base::Optional<MapRef> initial_map = NodeProperties::GetJSCreateMap(broker(), receiver); if (initial_map.has_value()) { - DCHECK_EQ(*initial_map->object(), - initial_map->object()->FindRootMap(isolate())); - return initial_map->object(); + if (!initial_map->FindRootMap().has_value()) { + return base::nullopt; + } + DCHECK(initial_map->equals(*initial_map->FindRootMap())); + return *initial_map; } } - return MaybeHandle<Map>(); + return base::nullopt; } Graph* JSNativeContextSpecialization::graph() const { diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h index 8510c76bfc..a0707b9830 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.h +++ b/deps/v8/src/compiler/js-native-context-specialization.h @@ -53,7 +53,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags, - Handle<Context> native_context, CompilationDependencies* dependencies, Zone* zone, Zone* shared_zone); @@ -84,6 +83,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final Reduction ReduceJSLoadGlobal(Node* node); Reduction ReduceJSStoreGlobal(Node* node); Reduction ReduceJSLoadNamed(Node* node); + Reduction ReduceJSGetIterator(Node* node); Reduction ReduceJSStoreNamed(Node* node); Reduction ReduceJSHasProperty(Node* node); Reduction ReduceJSLoadProperty(Node* node); @@ -114,9 +114,9 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value, NameRef const& name, AccessMode access_mode, Node* key, PropertyCellRef const& property_cell); - Reduction ReduceKeyedLoadFromHeapConstant(Node* node, Node* key, - AccessMode access_mode, - KeyedAccessLoadMode load_mode); + Reduction ReduceElementLoadFromHeapConstant(Node* node, Node* key, + AccessMode access_mode, + KeyedAccessLoadMode load_mode); Reduction ReduceElementAccessOnString(Node* node, Node* index, Node* value, KeyedAccessMode const& keyed_mode); @@ -212,18 +212,25 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final // code dependencies and might use the array protector cell. bool CanTreatHoleAsUndefined(ZoneVector<Handle<Map>> const& receiver_maps); - // Extract receiver maps from {nexus} and filter based on {receiver} if - // possible. - bool ExtractReceiverMaps(Node* receiver, Node* effect, - FeedbackNexus const& nexus, - MapHandles* receiver_maps); + void RemoveImpossibleReceiverMaps( + Node* receiver, ZoneVector<Handle<Map>>* receiver_maps) const; + + ElementAccessFeedback const& TryRefineElementAccessFeedback( + ElementAccessFeedback const& feedback, Node* receiver, + Node* effect) const; + + void FilterMapsAndGetPropertyAccessInfos( + NamedAccessFeedback const& feedback, AccessMode access_mode, + Node* receiver, Node* effect, + ZoneVector<PropertyAccessInfo>* access_infos); // Try to infer maps for the given {receiver} at the current {effect}. bool InferReceiverMaps(Node* receiver, Node* effect, - MapHandles* receiver_maps); + ZoneVector<Handle<Map>>* receiver_maps) const; + // Try to infer a root map for the {receiver} independent of the current // program location. - MaybeHandle<Map> InferReceiverRootMap(Node* receiver); + base::Optional<MapRef> InferReceiverRootMap(Node* receiver) const; // Checks if we know at compile time that the {receiver} either definitely // has the {prototype} in it's prototype chain, or the {receiver} definitely @@ -234,7 +241,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final kMayBeInPrototypeChain }; InferHasInPrototypeChainResult InferHasInPrototypeChain( - Node* receiver, Node* effect, Handle<HeapObject> prototype); + Node* receiver, Node* effect, HeapObjectRef const& prototype); Graph* graph() const; JSGraph* jsgraph() const { return jsgraph_; } @@ -248,7 +255,9 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final Flags flags() const { return flags_; } Handle<JSGlobalObject> global_object() const { return global_object_; } Handle<JSGlobalProxy> global_proxy() const { return global_proxy_; } - NativeContextRef native_context() const { return broker()->native_context(); } + NativeContextRef native_context() const { + return broker()->target_native_context(); + } CompilationDependencies* dependencies() const { return dependencies_; } Zone* zone() const { return zone_; } Zone* shared_zone() const { return shared_zone_; } diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc index e0f97922b2..d0581b59a5 100644 --- a/deps/v8/src/compiler/js-operator.cc +++ b/deps/v8/src/compiler/js-operator.cc @@ -9,7 +9,6 @@ #include "src/base/lazy-instance.h" #include "src/compiler/opcodes.h" #include "src/compiler/operator.h" -#include "src/compiler/vector-slot-pair.h" #include "src/handles/handles-inl.h" #include "src/objects/objects-inl.h" @@ -51,7 +50,8 @@ bool operator!=(ConstructParameters const& lhs, } size_t hash_value(ConstructParameters const& p) { - return base::hash_combine(p.arity(), p.frequency(), p.feedback()); + return base::hash_combine(p.arity(), p.frequency(), + FeedbackSource::Hash()(p.feedback())); } std::ostream& operator<<(std::ostream& os, ConstructParameters const& p) { @@ -198,7 +198,8 @@ bool operator!=(StoreNamedOwnParameters const& lhs, } size_t hash_value(StoreNamedOwnParameters const& p) { - return base::hash_combine(p.name().location(), p.feedback()); + return base::hash_combine(p.name().location(), + FeedbackSource::Hash()(p.feedback())); } std::ostream& operator<<(std::ostream& os, StoreNamedOwnParameters const& p) { @@ -219,7 +220,7 @@ bool operator!=(FeedbackParameter const& lhs, FeedbackParameter const& rhs) { } size_t hash_value(FeedbackParameter const& p) { - return base::hash_combine(p.feedback()); + return FeedbackSource::Hash()(p.feedback()); } std::ostream& operator<<(std::ostream& os, FeedbackParameter const& p) { @@ -248,7 +249,7 @@ bool operator!=(NamedAccess const& lhs, NamedAccess const& rhs) { size_t hash_value(NamedAccess const& p) { return base::hash_combine(p.name().location(), p.language_mode(), - p.feedback()); + FeedbackSource::Hash()(p.feedback())); } @@ -283,13 +284,15 @@ bool operator!=(PropertyAccess const& lhs, PropertyAccess const& rhs) { PropertyAccess const& PropertyAccessOf(const Operator* op) { DCHECK(op->opcode() == IrOpcode::kJSHasProperty || op->opcode() == IrOpcode::kJSLoadProperty || - op->opcode() == IrOpcode::kJSStoreProperty); + op->opcode() == IrOpcode::kJSStoreProperty || + op->opcode() == IrOpcode::kJSGetIterator); return OpParameter<PropertyAccess>(op); } size_t hash_value(PropertyAccess const& p) { - return base::hash_combine(p.language_mode(), p.feedback()); + return base::hash_combine(p.language_mode(), + FeedbackSource::Hash()(p.feedback())); } @@ -339,7 +342,7 @@ bool operator!=(StoreGlobalParameters const& lhs, size_t hash_value(StoreGlobalParameters const& p) { return base::hash_combine(p.language_mode(), p.name().location(), - p.feedback()); + FeedbackSource::Hash()(p.feedback())); } @@ -518,7 +521,8 @@ bool operator!=(CreateLiteralParameters const& lhs, size_t hash_value(CreateLiteralParameters const& p) { - return base::hash_combine(p.constant().location(), p.feedback(), p.length(), + return base::hash_combine(p.constant().location(), + FeedbackSource::Hash()(p.feedback()), p.length(), p.flags()); } @@ -546,7 +550,7 @@ bool operator!=(CloneObjectParameters const& lhs, } size_t hash_value(CloneObjectParameters const& p) { - return base::hash_combine(p.feedback(), p.flags()); + return base::hash_combine(FeedbackSource::Hash()(p.feedback()), p.flags()); } std::ostream& operator<<(std::ostream& os, CloneObjectParameters const& p) { @@ -795,18 +799,18 @@ COMPARE_OP_LIST(COMPARE_OP) #undef COMPARE_OP const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral( - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { FeedbackParameter parameters(feedback); return new (zone()) Operator1<FeedbackParameter>( // -- IrOpcode::kJSStoreDataPropertyInLiteral, Operator::kNoThrow, // opcode "JSStoreDataPropertyInLiteral", // name - 4, 1, 1, 0, 1, 0, // counts + 4, 1, 1, 0, 1, 1, // counts parameters); // parameter } const Operator* JSOperatorBuilder::StoreInArrayLiteral( - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { FeedbackParameter parameters(feedback); return new (zone()) Operator1<FeedbackParameter>( // -- IrOpcode::kJSStoreInArrayLiteral, @@ -828,7 +832,7 @@ const Operator* JSOperatorBuilder::CallForwardVarargs(size_t arity, const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency const& frequency, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, ConvertReceiverMode convert_mode, SpeculationMode speculation_mode) { DCHECK_IMPLIES(speculation_mode == SpeculationMode::kAllowSpeculation, @@ -853,7 +857,7 @@ const Operator* JSOperatorBuilder::CallWithArrayLike( const Operator* JSOperatorBuilder::CallWithSpread( uint32_t arity, CallFrequency const& frequency, - VectorSlotPair const& feedback, SpeculationMode speculation_mode) { + FeedbackSource const& feedback, SpeculationMode speculation_mode) { DCHECK_IMPLIES(speculation_mode == SpeculationMode::kAllowSpeculation, feedback.IsValid()); CallParameters parameters(arity, frequency, feedback, @@ -903,7 +907,7 @@ const Operator* JSOperatorBuilder::ConstructForwardVarargs( // on AIX (v8:8193). const Operator* JSOperatorBuilder::Construct(uint32_t arity, CallFrequency const& frequency, - VectorSlotPair const& feedback) { + FeedbackSource const& feedback) { ConstructParameters parameters(arity, frequency, feedback); return new (zone()) Operator1<ConstructParameters>( // -- IrOpcode::kJSConstruct, Operator::kNoProperties, // opcode @@ -924,7 +928,7 @@ const Operator* JSOperatorBuilder::ConstructWithArrayLike( const Operator* JSOperatorBuilder::ConstructWithSpread( uint32_t arity, CallFrequency const& frequency, - VectorSlotPair const& feedback) { + FeedbackSource const& feedback) { ConstructParameters parameters(arity, frequency, feedback); return new (zone()) Operator1<ConstructParameters>( // -- IrOpcode::kJSConstructWithSpread, Operator::kNoProperties, // opcode @@ -934,7 +938,7 @@ const Operator* JSOperatorBuilder::ConstructWithSpread( } const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name, - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { NamedAccess access(LanguageMode::kSloppy, name, feedback); return new (zone()) Operator1<NamedAccess>( // -- IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode @@ -944,7 +948,7 @@ const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name, } const Operator* JSOperatorBuilder::LoadProperty( - VectorSlotPair const& feedback) { + FeedbackSource const& feedback) { PropertyAccess access(LanguageMode::kSloppy, feedback); return new (zone()) Operator1<PropertyAccess>( // -- IrOpcode::kJSLoadProperty, Operator::kNoProperties, // opcode @@ -953,7 +957,16 @@ const Operator* JSOperatorBuilder::LoadProperty( access); // parameter } -const Operator* JSOperatorBuilder::HasProperty(VectorSlotPair const& feedback) { +const Operator* JSOperatorBuilder::GetIterator(FeedbackSource const& feedback) { + PropertyAccess access(LanguageMode::kSloppy, feedback); + return new (zone()) Operator1<PropertyAccess>( // -- + IrOpcode::kJSGetIterator, Operator::kNoProperties, // opcode + "JSGetIterator", // name + 1, 1, 1, 1, 1, 2, // counts + access); // parameter +} + +const Operator* JSOperatorBuilder::HasProperty(FeedbackSource const& feedback) { PropertyAccess access(LanguageMode::kSloppy, feedback); return new (zone()) Operator1<PropertyAccess>( // -- IrOpcode::kJSHasProperty, Operator::kNoProperties, // opcode @@ -962,7 +975,7 @@ const Operator* JSOperatorBuilder::HasProperty(VectorSlotPair const& feedback) { access); // parameter } -const Operator* JSOperatorBuilder::InstanceOf(VectorSlotPair const& feedback) { +const Operator* JSOperatorBuilder::InstanceOf(FeedbackSource const& feedback) { FeedbackParameter parameter(feedback); return new (zone()) Operator1<FeedbackParameter>( // -- IrOpcode::kJSInstanceOf, Operator::kNoProperties, // opcode @@ -1021,7 +1034,7 @@ int RestoreRegisterIndexOf(const Operator* op) { const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode, Handle<Name> name, - VectorSlotPair const& feedback) { + FeedbackSource const& feedback) { NamedAccess access(language_mode, name, feedback); return new (zone()) Operator1<NamedAccess>( // -- IrOpcode::kJSStoreNamed, Operator::kNoProperties, // opcode @@ -1030,9 +1043,8 @@ const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode, access); // parameter } - const Operator* JSOperatorBuilder::StoreProperty( - LanguageMode language_mode, VectorSlotPair const& feedback) { + LanguageMode language_mode, FeedbackSource const& feedback) { PropertyAccess access(language_mode, feedback); return new (zone()) Operator1<PropertyAccess>( // -- IrOpcode::kJSStoreProperty, Operator::kNoProperties, // opcode @@ -1042,7 +1054,7 @@ const Operator* JSOperatorBuilder::StoreProperty( } const Operator* JSOperatorBuilder::StoreNamedOwn( - Handle<Name> name, VectorSlotPair const& feedback) { + Handle<Name> name, FeedbackSource const& feedback) { StoreNamedOwnParameters parameters(name, feedback); return new (zone()) Operator1<StoreNamedOwnParameters>( // -- IrOpcode::kJSStoreNamedOwn, Operator::kNoProperties, // opcode @@ -1066,7 +1078,7 @@ const Operator* JSOperatorBuilder::CreateGeneratorObject() { } const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name, - const VectorSlotPair& feedback, + const FeedbackSource& feedback, TypeofMode typeof_mode) { LoadGlobalParameters parameters(name, feedback, typeof_mode); return new (zone()) Operator1<LoadGlobalParameters>( // -- @@ -1076,10 +1088,9 @@ const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name, parameters); // parameter } - const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode, const Handle<Name>& name, - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { StoreGlobalParameters parameters(language_mode, feedback, name); return new (zone()) Operator1<StoreGlobalParameters>( // -- IrOpcode::kJSStoreGlobal, Operator::kNoProperties, // opcode @@ -1088,7 +1099,6 @@ const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode, parameters); // parameter } - const Operator* JSOperatorBuilder::LoadContext(size_t depth, size_t index, bool immutable) { ContextAccess access(depth, index, immutable); @@ -1203,7 +1213,7 @@ const Operator* JSOperatorBuilder::CreateClosure( const Operator* JSOperatorBuilder::CreateLiteralArray( Handle<ArrayBoilerplateDescription> description, - VectorSlotPair const& feedback, int literal_flags, int number_of_elements) { + FeedbackSource const& feedback, int literal_flags, int number_of_elements) { CreateLiteralParameters parameters(description, feedback, number_of_elements, literal_flags); return new (zone()) Operator1<CreateLiteralParameters>( // -- @@ -1215,7 +1225,7 @@ const Operator* JSOperatorBuilder::CreateLiteralArray( } const Operator* JSOperatorBuilder::CreateEmptyLiteralArray( - VectorSlotPair const& feedback) { + FeedbackSource const& feedback) { FeedbackParameter parameters(feedback); return new (zone()) Operator1<FeedbackParameter>( // -- IrOpcode::kJSCreateEmptyLiteralArray, // opcode @@ -1235,7 +1245,7 @@ const Operator* JSOperatorBuilder::CreateArrayFromIterable() { const Operator* JSOperatorBuilder::CreateLiteralObject( Handle<ObjectBoilerplateDescription> constant_properties, - VectorSlotPair const& feedback, int literal_flags, + FeedbackSource const& feedback, int literal_flags, int number_of_properties) { CreateLiteralParameters parameters(constant_properties, feedback, number_of_properties, literal_flags); @@ -1247,7 +1257,7 @@ const Operator* JSOperatorBuilder::CreateLiteralObject( parameters); // parameter } -const Operator* JSOperatorBuilder::CloneObject(VectorSlotPair const& feedback, +const Operator* JSOperatorBuilder::CloneObject(FeedbackSource const& feedback, int literal_flags) { CloneObjectParameters parameters(feedback, literal_flags); return new (zone()) Operator1<CloneObjectParameters>( // -- @@ -1267,7 +1277,7 @@ const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() { } const Operator* JSOperatorBuilder::CreateLiteralRegExp( - Handle<String> constant_pattern, VectorSlotPair const& feedback, + Handle<String> constant_pattern, FeedbackSource const& feedback, int literal_flags) { CreateLiteralParameters parameters(constant_pattern, feedback, -1, literal_flags); diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h index e7d9acb152..f795a2f402 100644 --- a/deps/v8/src/compiler/js-operator.h +++ b/deps/v8/src/compiler/js-operator.h @@ -7,7 +7,7 @@ #include "src/base/compiler-specific.h" #include "src/common/globals.h" -#include "src/compiler/vector-slot-pair.h" +#include "src/compiler/feedback-source.h" #include "src/handles/maybe-handles.h" #include "src/objects/type-hints.h" #include "src/runtime/runtime.h" @@ -102,17 +102,17 @@ ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf( class ConstructParameters final { public: ConstructParameters(uint32_t arity, CallFrequency const& frequency, - VectorSlotPair const& feedback) + FeedbackSource const& feedback) : arity_(arity), frequency_(frequency), feedback_(feedback) {} uint32_t arity() const { return arity_; } CallFrequency const& frequency() const { return frequency_; } - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } private: uint32_t const arity_; CallFrequency const frequency_; - VectorSlotPair const feedback_; + FeedbackSource const feedback_; }; bool operator==(ConstructParameters const&, ConstructParameters const&); @@ -163,7 +163,7 @@ CallForwardVarargsParameters const& CallForwardVarargsParametersOf( class CallParameters final { public: CallParameters(size_t arity, CallFrequency const& frequency, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, ConvertReceiverMode convert_mode, SpeculationMode speculation_mode) : bit_field_(ArityField::encode(arity) | @@ -177,7 +177,7 @@ class CallParameters final { ConvertReceiverMode convert_mode() const { return ConvertReceiverModeField::decode(bit_field_); } - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } SpeculationMode speculation_mode() const { return SpeculationModeField::decode(bit_field_); @@ -192,7 +192,9 @@ class CallParameters final { private: friend size_t hash_value(CallParameters const& p) { - return base::hash_combine(p.bit_field_, p.frequency_, p.feedback_); + FeedbackSource::Hash feedback_hash; + return base::hash_combine(p.bit_field_, p.frequency_, + feedback_hash(p.feedback_)); } using ArityField = BitField<size_t, 0, 28>; @@ -201,7 +203,7 @@ class CallParameters final { uint32_t const bit_field_; CallFrequency const frequency_; - VectorSlotPair const feedback_; + FeedbackSource const feedback_; }; size_t hash_value(CallParameters const&); @@ -297,15 +299,15 @@ CreateFunctionContextParameters const& CreateFunctionContextParametersOf( // Defines parameters for JSStoreNamedOwn operator. class StoreNamedOwnParameters final { public: - StoreNamedOwnParameters(Handle<Name> name, VectorSlotPair const& feedback) + StoreNamedOwnParameters(Handle<Name> name, FeedbackSource const& feedback) : name_(name), feedback_(feedback) {} Handle<Name> name() const { return name_; } - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } private: Handle<Name> const name_; - VectorSlotPair const feedback_; + FeedbackSource const feedback_; }; bool operator==(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&); @@ -322,13 +324,13 @@ const StoreNamedOwnParameters& StoreNamedOwnParametersOf(const Operator* op); // and JSStoreDataPropertyInLiteral operators. class FeedbackParameter final { public: - explicit FeedbackParameter(VectorSlotPair const& feedback) + explicit FeedbackParameter(FeedbackSource const& feedback) : feedback_(feedback) {} - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } private: - VectorSlotPair const feedback_; + FeedbackSource const feedback_; }; bool operator==(FeedbackParameter const&, FeedbackParameter const&); @@ -345,16 +347,16 @@ const FeedbackParameter& FeedbackParameterOf(const Operator* op); class NamedAccess final { public: NamedAccess(LanguageMode language_mode, Handle<Name> name, - VectorSlotPair const& feedback) + FeedbackSource const& feedback) : name_(name), feedback_(feedback), language_mode_(language_mode) {} Handle<Name> name() const { return name_; } LanguageMode language_mode() const { return language_mode_; } - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } private: Handle<Name> const name_; - VectorSlotPair const feedback_; + FeedbackSource const feedback_; LanguageMode const language_mode_; }; @@ -372,18 +374,18 @@ const NamedAccess& NamedAccessOf(const Operator* op); // used as a parameter by JSLoadGlobal operator. class LoadGlobalParameters final { public: - LoadGlobalParameters(const Handle<Name>& name, const VectorSlotPair& feedback, + LoadGlobalParameters(const Handle<Name>& name, const FeedbackSource& feedback, TypeofMode typeof_mode) : name_(name), feedback_(feedback), typeof_mode_(typeof_mode) {} const Handle<Name>& name() const { return name_; } TypeofMode typeof_mode() const { return typeof_mode_; } - const VectorSlotPair& feedback() const { return feedback_; } + const FeedbackSource& feedback() const { return feedback_; } private: const Handle<Name> name_; - const VectorSlotPair feedback_; + const FeedbackSource feedback_; const TypeofMode typeof_mode_; }; @@ -402,18 +404,18 @@ const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op); class StoreGlobalParameters final { public: StoreGlobalParameters(LanguageMode language_mode, - const VectorSlotPair& feedback, + const FeedbackSource& feedback, const Handle<Name>& name) : language_mode_(language_mode), name_(name), feedback_(feedback) {} LanguageMode language_mode() const { return language_mode_; } - const VectorSlotPair& feedback() const { return feedback_; } + const FeedbackSource& feedback() const { return feedback_; } const Handle<Name>& name() const { return name_; } private: const LanguageMode language_mode_; const Handle<Name> name_; - const VectorSlotPair feedback_; + const FeedbackSource feedback_; }; bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&); @@ -430,14 +432,14 @@ const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op); // as a parameter by the JSLoadProperty and JSStoreProperty operators. class PropertyAccess final { public: - PropertyAccess(LanguageMode language_mode, VectorSlotPair const& feedback) + PropertyAccess(LanguageMode language_mode, FeedbackSource const& feedback) : feedback_(feedback), language_mode_(language_mode) {} LanguageMode language_mode() const { return language_mode_; } - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } private: - VectorSlotPair const feedback_; + FeedbackSource const feedback_; LanguageMode const language_mode_; }; @@ -602,20 +604,20 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op); class CreateLiteralParameters final { public: CreateLiteralParameters(Handle<HeapObject> constant, - VectorSlotPair const& feedback, int length, int flags) + FeedbackSource const& feedback, int length, int flags) : constant_(constant), feedback_(feedback), length_(length), flags_(flags) {} Handle<HeapObject> constant() const { return constant_; } - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } int length() const { return length_; } int flags() const { return flags_; } private: Handle<HeapObject> const constant_; - VectorSlotPair const feedback_; + FeedbackSource const feedback_; int const length_; int const flags_; }; @@ -631,14 +633,14 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op); class CloneObjectParameters final { public: - CloneObjectParameters(VectorSlotPair const& feedback, int flags) + CloneObjectParameters(FeedbackSource const& feedback, int flags) : feedback_(feedback), flags_(flags) {} - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } int flags() const { return flags_; } private: - VectorSlotPair const feedback_; + FeedbackSource const feedback_; int const flags_; }; @@ -735,32 +737,32 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* CreateTypedArray(); const Operator* CreateLiteralArray( Handle<ArrayBoilerplateDescription> constant, - VectorSlotPair const& feedback, int literal_flags, + FeedbackSource const& feedback, int literal_flags, int number_of_elements); - const Operator* CreateEmptyLiteralArray(VectorSlotPair const& feedback); + const Operator* CreateEmptyLiteralArray(FeedbackSource const& feedback); const Operator* CreateArrayFromIterable(); const Operator* CreateEmptyLiteralObject(); const Operator* CreateLiteralObject( Handle<ObjectBoilerplateDescription> constant, - VectorSlotPair const& feedback, int literal_flags, + FeedbackSource const& feedback, int literal_flags, int number_of_properties); - const Operator* CloneObject(VectorSlotPair const& feedback, + const Operator* CloneObject(FeedbackSource const& feedback, int literal_flags); const Operator* CreateLiteralRegExp(Handle<String> constant_pattern, - VectorSlotPair const& feedback, + FeedbackSource const& feedback, int literal_flags); const Operator* CallForwardVarargs(size_t arity, uint32_t start_index); const Operator* Call( size_t arity, CallFrequency const& frequency = CallFrequency(), - VectorSlotPair const& feedback = VectorSlotPair(), + FeedbackSource const& feedback = FeedbackSource(), ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny, SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation); const Operator* CallWithArrayLike(CallFrequency const& frequency); const Operator* CallWithSpread( uint32_t arity, CallFrequency const& frequency = CallFrequency(), - VectorSlotPair const& feedback = VectorSlotPair(), + FeedbackSource const& feedback = FeedbackSource(), SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation); const Operator* CallRuntime(Runtime::FunctionId id); const Operator* CallRuntime(Runtime::FunctionId id, size_t arity); @@ -769,39 +771,39 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index); const Operator* Construct(uint32_t arity, CallFrequency const& frequency = CallFrequency(), - VectorSlotPair const& feedback = VectorSlotPair()); + FeedbackSource const& feedback = FeedbackSource()); const Operator* ConstructWithArrayLike(CallFrequency const& frequency); const Operator* ConstructWithSpread( uint32_t arity, CallFrequency const& frequency = CallFrequency(), - VectorSlotPair const& feedback = VectorSlotPair()); + FeedbackSource const& feedback = FeedbackSource()); - const Operator* LoadProperty(VectorSlotPair const& feedback); - const Operator* LoadNamed(Handle<Name> name, VectorSlotPair const& feedback); + const Operator* LoadProperty(FeedbackSource const& feedback); + const Operator* LoadNamed(Handle<Name> name, FeedbackSource const& feedback); const Operator* StoreProperty(LanguageMode language_mode, - VectorSlotPair const& feedback); + FeedbackSource const& feedback); const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name, - VectorSlotPair const& feedback); + FeedbackSource const& feedback); const Operator* StoreNamedOwn(Handle<Name> name, - VectorSlotPair const& feedback); - const Operator* StoreDataPropertyInLiteral(const VectorSlotPair& feedback); - const Operator* StoreInArrayLiteral(const VectorSlotPair& feedback); + FeedbackSource const& feedback); + const Operator* StoreDataPropertyInLiteral(const FeedbackSource& feedback); + const Operator* StoreInArrayLiteral(const FeedbackSource& feedback); const Operator* DeleteProperty(); - const Operator* HasProperty(VectorSlotPair const& feedback); + const Operator* HasProperty(FeedbackSource const& feedback); const Operator* GetSuperConstructor(); const Operator* CreateGeneratorObject(); const Operator* LoadGlobal(const Handle<Name>& name, - const VectorSlotPair& feedback, + const FeedbackSource& feedback, TypeofMode typeof_mode = NOT_INSIDE_TYPEOF); const Operator* StoreGlobal(LanguageMode language_mode, const Handle<Name>& name, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* LoadContext(size_t depth, size_t index, bool immutable); const Operator* StoreContext(size_t depth, size_t index); @@ -810,7 +812,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* StoreModule(int32_t cell_index); const Operator* HasInPrototypeChain(); - const Operator* InstanceOf(const VectorSlotPair& feedback); + const Operator* InstanceOf(const FeedbackSource& feedback); const Operator* OrdinaryHasInstance(); const Operator* AsyncFunctionEnter(); @@ -854,6 +856,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* ParseInt(); const Operator* RegExpTest(); + const Operator* GetIterator(FeedbackSource const& feedback); + private: Zone* zone() const { return zone_; } diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc index f3696bcc48..e1ff928cec 100644 --- a/deps/v8/src/compiler/js-type-hint-lowering.cc +++ b/deps/v8/src/compiler/js-type-hint-lowering.cc @@ -6,6 +6,7 @@ #include "src/compiler/access-builder.h" #include "src/compiler/js-graph.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/operator-properties.h" #include "src/compiler/simplified-operator.h" #include "src/objects/feedback-vector.h" @@ -78,16 +79,6 @@ class JSSpeculativeBinopBuilder final { control_(control), slot_(slot) {} - BinaryOperationHint GetBinaryOperationHint() { - FeedbackNexus nexus(feedback_vector(), slot_); - return nexus.GetBinaryOperationFeedback(); - } - - CompareOperationHint GetCompareOperationHint() { - FeedbackNexus nexus(feedback_vector(), slot_); - return nexus.GetCompareOperationFeedback(); - } - bool GetBinaryNumberOperationHint(NumberOperationHint* hint) { return BinaryOperationHintToNumberOperationHint(GetBinaryOperationHint(), hint); @@ -239,34 +230,52 @@ class JSSpeculativeBinopBuilder final { JSOperatorBuilder* javascript() { return jsgraph()->javascript(); } SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); } CommonOperatorBuilder* common() { return jsgraph()->common(); } - const Handle<FeedbackVector>& feedback_vector() const { - return lowering_->feedback_vector(); - } private: - const JSTypeHintLowering* lowering_; - const Operator* op_; + BinaryOperationHint GetBinaryOperationHint() { + return lowering_->GetBinaryOperationHint(slot_); + } + + CompareOperationHint GetCompareOperationHint() { + return lowering_->GetCompareOperationHint(slot_); + } + + JSTypeHintLowering const* const lowering_; + Operator const* const op_; Node* left_; Node* right_; - Node* effect_; - Node* control_; - FeedbackSlot slot_; + Node* const effect_; + Node* const control_; + FeedbackSlot const slot_; }; -JSTypeHintLowering::JSTypeHintLowering(JSGraph* jsgraph, - Handle<FeedbackVector> feedback_vector, +JSTypeHintLowering::JSTypeHintLowering(JSHeapBroker* broker, JSGraph* jsgraph, + FeedbackVectorRef feedback_vector, Flags flags) - : jsgraph_(jsgraph), flags_(flags), feedback_vector_(feedback_vector) {} + : broker_(broker), + jsgraph_(jsgraph), + flags_(flags), + feedback_vector_(feedback_vector) {} Isolate* JSTypeHintLowering::isolate() const { return jsgraph()->isolate(); } +BinaryOperationHint JSTypeHintLowering::GetBinaryOperationHint( + FeedbackSlot slot) const { + FeedbackSource source(feedback_vector(), slot); + return broker()->GetFeedbackForBinaryOperation(source); +} + +CompareOperationHint JSTypeHintLowering::GetCompareOperationHint( + FeedbackSlot slot) const { + FeedbackSource source(feedback_vector(), slot); + return broker()->GetFeedbackForCompareOperation(source); +} + JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation( const Operator* op, Node* operand, Node* effect, Node* control, FeedbackSlot slot) const { - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForUnaryOperation)) { return LoweringResult::Exit(node); } @@ -309,9 +318,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation( control, slot); node = b.TryBuildNumberBinop(); if (!node) { - FeedbackNexus nexus(feedback_vector(), slot); - if (nexus.GetBinaryOperationFeedback() == - BinaryOperationHint::kBigInt) { + if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) { const Operator* op = jsgraph()->simplified()->SpeculativeBigIntNegate( BigIntOperationHint::kBigInt); node = jsgraph()->graph()->NewNode(op, operand, effect, control); @@ -335,10 +342,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation( FeedbackSlot slot) const { switch (op->opcode()) { case IrOpcode::kJSStrictEqual: { - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) { return LoweringResult::Exit(node); } @@ -351,10 +356,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation( case IrOpcode::kJSGreaterThan: case IrOpcode::kJSLessThanOrEqual: case IrOpcode::kJSGreaterThanOrEqual: { - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) { return LoweringResult::Exit(node); } @@ -365,10 +368,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation( break; } case IrOpcode::kJSInstanceOf: { - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) { return LoweringResult::Exit(node); } @@ -387,10 +388,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation( case IrOpcode::kJSMultiply: case IrOpcode::kJSDivide: case IrOpcode::kJSModulus: { - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation)) { return LoweringResult::Exit(node); } @@ -406,6 +405,11 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation( break; } case IrOpcode::kJSExponentiate: { + if (Node* node = TryBuildSoftDeopt( + slot, effect, control, + DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation)) { + return LoweringResult::Exit(node); + } // TODO(neis): Introduce a SpeculativeNumberPow operator? break; } @@ -418,10 +422,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation( JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceForInNextOperation( Node* receiver, Node* cache_array, Node* cache_type, Node* index, Node* effect, Node* control, FeedbackSlot slot) const { - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) { return LoweringResult::Exit(node); } @@ -432,10 +434,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceForInPrepareOperation(Node* enumerator, Node* effect, Node* control, FeedbackSlot slot) const { - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) { return LoweringResult::Exit(node); } @@ -445,12 +445,11 @@ JSTypeHintLowering::ReduceForInPrepareOperation(Node* enumerator, Node* effect, JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceToNumberOperation( Node* input, Node* effect, Node* control, FeedbackSlot slot) const { DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); NumberOperationHint hint; - if (BinaryOperationHintToNumberOperationHint( - nexus.GetBinaryOperationFeedback(), &hint)) { + if (BinaryOperationHintToNumberOperationHint(GetBinaryOperationHint(slot), + &hint)) { Node* node = jsgraph()->graph()->NewNode( - jsgraph()->simplified()->SpeculativeToNumber(hint, VectorSlotPair()), + jsgraph()->simplified()->SpeculativeToNumber(hint, FeedbackSource()), input, effect, control); return LoweringResult::SideEffectFree(node, node, control); } @@ -462,10 +461,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceCallOperation( Node* control, FeedbackSlot slot) const { DCHECK(op->opcode() == IrOpcode::kJSCall || op->opcode() == IrOpcode::kJSCallWithSpread); - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForCall)) { return LoweringResult::Exit(node); } @@ -477,10 +474,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation( Node* control, FeedbackSlot slot) const { DCHECK(op->opcode() == IrOpcode::kJSConstruct || op->opcode() == IrOpcode::kJSConstructWithSpread); - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForConstruct)) { return LoweringResult::Exit(node); } @@ -490,11 +485,11 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation( JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation( const Operator* op, Node* receiver, Node* effect, Node* control, FeedbackSlot slot) const { - DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode()); - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); + // JSGetIterator involves a named load of the Symbol.iterator property. + DCHECK(op->opcode() == IrOpcode::kJSLoadNamed || + op->opcode() == IrOpcode::kJSGetIterator); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) { return LoweringResult::Exit(node); } @@ -505,10 +500,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadKeyedOperation( const Operator* op, Node* obj, Node* key, Node* effect, Node* control, FeedbackSlot slot) const { DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode()); - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) { return LoweringResult::Exit(node); } @@ -522,10 +515,8 @@ JSTypeHintLowering::ReduceStoreNamedOperation(const Operator* op, Node* obj, FeedbackSlot slot) const { DCHECK(op->opcode() == IrOpcode::kJSStoreNamed || op->opcode() == IrOpcode::kJSStoreNamedOwn); - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) { return LoweringResult::Exit(node); } @@ -538,31 +529,32 @@ JSTypeHintLowering::ReduceStoreKeyedOperation(const Operator* op, Node* obj, Node* effect, Node* control, FeedbackSlot slot) const { DCHECK(op->opcode() == IrOpcode::kJSStoreProperty || - op->opcode() == IrOpcode::kJSStoreInArrayLiteral); - DCHECK(!slot.IsInvalid()); - FeedbackNexus nexus(feedback_vector(), slot); + op->opcode() == IrOpcode::kJSStoreInArrayLiteral || + op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral); if (Node* node = TryBuildSoftDeopt( - nexus, effect, control, + slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) { return LoweringResult::Exit(node); } return LoweringResult::NoChange(); } -Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect, +Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackSlot slot, Node* effect, Node* control, DeoptimizeReason reason) const { - if ((flags() & kBailoutOnUninitialized) && nexus.IsUninitialized()) { - Node* deoptimize = jsgraph()->graph()->NewNode( - jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason, - VectorSlotPair()), - jsgraph()->Dead(), effect, control); - Node* frame_state = - NodeProperties::FindFrameStateBefore(deoptimize, jsgraph()->Dead()); - deoptimize->ReplaceInput(0, frame_state); - return deoptimize; - } - return nullptr; + if (!(flags() & kBailoutOnUninitialized)) return nullptr; + + FeedbackSource source(feedback_vector(), slot); + if (!broker()->FeedbackIsInsufficient(source)) return nullptr; + + Node* deoptimize = jsgraph()->graph()->NewNode( + jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason, + FeedbackSource()), + jsgraph()->Dead(), effect, control); + Node* frame_state = + NodeProperties::FindFrameStateBefore(deoptimize, jsgraph()->Dead()); + deoptimize->ReplaceInput(0, frame_state); + return deoptimize; } } // namespace compiler diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h index a74c019355..3e46fb2ec2 100644 --- a/deps/v8/src/compiler/js-type-hint-lowering.h +++ b/deps/v8/src/compiler/js-type-hint-lowering.h @@ -41,8 +41,8 @@ class JSTypeHintLowering { enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 1 }; using Flags = base::Flags<Flag>; - JSTypeHintLowering(JSGraph* jsgraph, Handle<FeedbackVector> feedback_vector, - Flags flags); + JSTypeHintLowering(JSHeapBroker* broker, JSGraph* jsgraph, + FeedbackVectorRef feedback_vector, Flags flags); // {LoweringResult} describes the result of lowering. The following outcomes // are possible: @@ -153,20 +153,22 @@ class JSTypeHintLowering { private: friend class JSSpeculativeBinopBuilder; - Node* TryBuildSoftDeopt(FeedbackNexus& nexus, // NOLINT(runtime/references) - Node* effect, Node* control, + + BinaryOperationHint GetBinaryOperationHint(FeedbackSlot slot) const; + CompareOperationHint GetCompareOperationHint(FeedbackSlot slot) const; + Node* TryBuildSoftDeopt(FeedbackSlot slot, Node* effect, Node* control, DeoptimizeReason reson) const; + JSHeapBroker* broker() const { return broker_; } JSGraph* jsgraph() const { return jsgraph_; } Isolate* isolate() const; Flags flags() const { return flags_; } - const Handle<FeedbackVector>& feedback_vector() const { - return feedback_vector_; - } + FeedbackVectorRef const& feedback_vector() const { return feedback_vector_; } - JSGraph* jsgraph_; + JSHeapBroker* const broker_; + JSGraph* const jsgraph_; Flags const flags_; - Handle<FeedbackVector> feedback_vector_; + FeedbackVectorRef const feedback_vector_; DISALLOW_COPY_AND_ASSIGN(JSTypeHintLowering); }; diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc index 3190fc9930..8caafe6aad 100644 --- a/deps/v8/src/compiler/js-typed-lowering.cc +++ b/deps/v8/src/compiler/js-typed-lowering.cc @@ -200,14 +200,14 @@ class JSBinopReduction final { void CheckInputsToString() { if (!left_type().Is(Type::String())) { Node* left_input = - graph()->NewNode(simplified()->CheckString(VectorSlotPair()), left(), + graph()->NewNode(simplified()->CheckString(FeedbackSource()), left(), effect(), control()); node_->ReplaceInput(0, left_input); update_effect(left_input); } if (!right_type().Is(Type::String())) { Node* right_input = - graph()->NewNode(simplified()->CheckString(VectorSlotPair()), right(), + graph()->NewNode(simplified()->CheckString(FeedbackSource()), right(), effect(), control()); node_->ReplaceInput(1, right_input); update_effect(right_input); @@ -576,7 +576,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) { // and thus potentially reduces the number of live ranges and allows for // more truncations. length = effect = graph()->NewNode( - simplified()->CheckBounds(VectorSlotPair()), length, + simplified()->CheckBounds(FeedbackSource()), length, jsgraph()->Constant(String::kMaxLength + 1), effect, control); } else { // Check if we would overflow the allowed maximum string length. @@ -1320,7 +1320,7 @@ Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) { for (size_t i = 0; i < access.depth(); ++i) { context = effect = graph()->NewNode( simplified()->LoadField( - AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)), + AccessBuilder::ForContextSlotKnownPointer(Context::PREVIOUS_INDEX)), context, effect, control); } node->ReplaceInput(0, context); @@ -1342,7 +1342,7 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) { for (size_t i = 0; i < access.depth(); ++i) { context = effect = graph()->NewNode( simplified()->LoadField( - AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)), + AccessBuilder::ForContextSlotKnownPointer(Context::PREVIOUS_INDEX)), context, effect, control); } node->ReplaceInput(0, context); @@ -1367,8 +1367,8 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) { if (module_type.IsHeapConstant()) { SourceTextModuleRef module_constant = module_type.AsHeapConstant()->Ref().AsSourceTextModule(); - CellRef cell_constant = module_constant.GetCell(cell_index); - return jsgraph()->Constant(cell_constant); + base::Optional<CellRef> cell_constant = module_constant.GetCell(cell_index); + if (cell_constant.has_value()) return jsgraph()->Constant(*cell_constant); } FieldAccess field_access; @@ -1554,21 +1554,21 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) { if (target_type.IsHeapConstant() && target_type.AsHeapConstant()->Ref().IsJSFunction()) { JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction(); - SharedFunctionInfoRef shared = function.shared(); // Only optimize [[Construct]] here if {function} is a Constructor. if (!function.map().is_constructor()) return NoChange(); - CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState; + if (!function.serialized()) { + TRACE_BROKER_MISSING(broker(), "data for function " << function); + return NoChange(); + } // Patch {node} to an indirect call via the {function}s construct stub. - bool use_builtin_construct_stub = shared.construct_as_builtin(); - + bool use_builtin_construct_stub = function.shared().construct_as_builtin(); CodeRef code(broker(), use_builtin_construct_stub ? BUILTIN_CODE(isolate(), JSBuiltinsConstructStub) : BUILTIN_CODE(isolate(), JSConstructStubGeneric)); - node->RemoveInput(arity + 1); node->InsertInput(graph()->zone(), 0, jsgraph()->Constant(code)); node->InsertInput(graph()->zone(), 2, new_target); @@ -1576,10 +1576,9 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) { node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant()); node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant()); NodeProperties::ChangeOp( - node, - common()->Call(Linkage::GetStubCallDescriptor( - graph()->zone(), ConstructStubDescriptor{}, 1 + arity, flags))); - + node, common()->Call(Linkage::GetStubCallDescriptor( + graph()->zone(), ConstructStubDescriptor{}, 1 + arity, + CallDescriptor::kNeedsFrameState))); return Changed(node); } @@ -1637,12 +1636,15 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) { if (target_type.IsHeapConstant() && target_type.AsHeapConstant()->Ref().IsJSFunction()) { JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction(); - SharedFunctionInfoRef shared = function.shared(); - if (shared.HasBreakInfo()) { - // Do not inline the call if we need to check whether to break at entry. + if (!function.serialized()) { + TRACE_BROKER_MISSING(broker(), "data for function " << function); return NoChange(); } + SharedFunctionInfoRef shared = function.shared(); + + // Do not inline the call if we need to check whether to break at entry. + if (shared.HasBreakInfo()) return NoChange(); // Class constructors are callable, but [[Call]] will raise an exception. // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ). @@ -1652,7 +1654,8 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) { // require data from a foreign native context. if (is_sloppy(shared.language_mode()) && !shared.native() && !receiver_type.Is(Type::Receiver())) { - if (!function.native_context().equals(broker()->native_context())) { + if (!function.native_context().equals( + broker()->target_native_context())) { return NoChange(); } Node* global_proxy = diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc index 1d88a27a5f..39c93c0328 100644 --- a/deps/v8/src/compiler/linkage.cc +++ b/deps/v8/src/compiler/linkage.cc @@ -7,9 +7,7 @@ #include "src/codegen/assembler-inl.h" #include "src/codegen/macro-assembler.h" #include "src/codegen/optimized-compilation-info.h" -#include "src/compiler/common-operator.h" #include "src/compiler/frame.h" -#include "src/compiler/node.h" #include "src/compiler/osr.h" #include "src/compiler/pipeline.h" @@ -75,15 +73,6 @@ MachineSignature* CallDescriptor::GetMachineSignature(Zone* zone) const { return new (zone) MachineSignature(return_count, param_count, types); } -bool CallDescriptor::HasSameReturnLocationsAs( - const CallDescriptor* other) const { - if (ReturnCount() != other->ReturnCount()) return false; - for (size_t i = 0; i < ReturnCount(); ++i) { - if (GetReturnLocation(i) != other->GetReturnLocation(i)) return false; - } - return true; -} - int CallDescriptor::GetFirstUnusedStackSlot() const { int slots_above_sp = 0; for (size_t i = 0; i < InputCount(); ++i) { @@ -104,19 +93,16 @@ int CallDescriptor::GetStackParameterDelta( int callee_slots_above_sp = GetFirstUnusedStackSlot(); int tail_caller_slots_above_sp = tail_caller->GetFirstUnusedStackSlot(); int stack_param_delta = callee_slots_above_sp - tail_caller_slots_above_sp; - if (kPadArguments) { - // Adjust stack delta when it is odd. - if (stack_param_delta % 2 != 0) { - if (callee_slots_above_sp % 2 != 0) { - // The delta is odd due to the callee - we will need to add one slot - // of padding. - ++stack_param_delta; - } else { - // The delta is odd because of the caller. We already have one slot of - // padding that we can reuse for arguments, so we will need one fewer - // slot. - --stack_param_delta; - } + if (ShouldPadArguments(stack_param_delta)) { + if (callee_slots_above_sp % 2 != 0) { + // The delta is odd due to the callee - we will need to add one slot + // of padding. + ++stack_param_delta; + } else { + // The delta is odd because of the caller. We already have one slot of + // padding that we can reuse for arguments, so we will need one fewer + // slot. + --stack_param_delta; } } return stack_param_delta; @@ -133,8 +119,14 @@ int CallDescriptor::GetTaggedParameterSlots() const { return result; } -bool CallDescriptor::CanTailCall(const Node* node) const { - return HasSameReturnLocationsAs(CallDescriptorOf(node->op())); +bool CallDescriptor::CanTailCall(const CallDescriptor* callee) const { + if (ReturnCount() != callee->ReturnCount()) return false; + for (size_t i = 0; i < ReturnCount(); ++i) { + if (!LinkageLocation::IsSameLocation(GetReturnLocation(i), + callee->GetReturnLocation(i))) + return false; + } + return true; } // TODO(jkummerow, sigurds): Arguably frame size calculation should be diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h index 05eb0e7d11..69e7fbfa42 100644 --- a/deps/v8/src/compiler/linkage.h +++ b/deps/v8/src/compiler/linkage.h @@ -28,20 +28,33 @@ namespace compiler { const RegList kNoCalleeSaved = 0; -class Node; class OsrHelper; // Describes the location for a parameter or a return value to a call. class LinkageLocation { public: bool operator==(const LinkageLocation& other) const { - return bit_field_ == other.bit_field_; + return bit_field_ == other.bit_field_ && + machine_type_ == other.machine_type_; } bool operator!=(const LinkageLocation& other) const { return !(*this == other); } + static bool IsSameLocation(const LinkageLocation& a, + const LinkageLocation& b) { + // Different MachineTypes may end up at the same physical location. With the + // sub-type check we make sure that types like {AnyTagged} and + // {TaggedPointer} which would end up with the same physical location are + // considered equal here. + return (a.bit_field_ == b.bit_field_) && + (IsSubtype(a.machine_type_.representation(), + b.machine_type_.representation()) || + IsSubtype(b.machine_type_.representation(), + a.machine_type_.representation())); + } + static LinkageLocation ForAnyRegister( MachineType type = MachineType::None()) { return LinkageLocation(REGISTER, ANY_REGISTER, type); @@ -144,8 +157,8 @@ class LinkageLocation { private: enum LocationType { REGISTER, STACK_SLOT }; - class TypeField : public BitField<LocationType, 0, 1> {}; - class LocationField : public BitField<int32_t, TypeField::kNext, 31> {}; + using TypeField = BitField<LocationType, 0, 1>; + using LocationField = TypeField::Next<int32_t, 31>; static constexpr int32_t ANY_REGISTER = -1; static constexpr int32_t MAX_STACK_SLOT = 32767; @@ -197,7 +210,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final // Use the kJavaScriptCallCodeStartRegister (fixed) register for the // indirect target address when calling. kFixedTargetRegister = 1u << 7, - kAllowCallThroughSlot = 1u << 8 + kAllowCallThroughSlot = 1u << 8, + kCallerSavedRegisters = 1u << 9 }; using Flags = base::Flags<Flag>; @@ -276,6 +290,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final bool InitializeRootRegister() const { return flags() & kInitializeRootRegister; } + bool NeedsCallerSavedRegisters() const { + return flags() & kCallerSavedRegisters; + } LinkageLocation GetReturnLocation(size_t index) const { return location_sig_->GetReturn(index); @@ -314,8 +331,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final bool UsesOnlyRegisters() const; - bool HasSameReturnLocationsAs(const CallDescriptor* other) const; - // Returns the first stack slot that is not used by the stack parameters. int GetFirstUnusedStackSlot() const; @@ -323,7 +338,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final int GetTaggedParameterSlots() const; - bool CanTailCall(const Node* call) const; + bool CanTailCall(const CallDescriptor* callee) const; int CalculateFixedFrameSize(Code::Kind code_kind) const; @@ -418,7 +433,7 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) { // structs, pointers to members, etc. static CallDescriptor* GetSimplifiedCDescriptor( Zone* zone, const MachineSignature* sig, - bool set_initialize_root_flag = false); + CallDescriptor::Flags flags = CallDescriptor::kNoFlags); // Get the location of an (incoming) parameter to this function. LinkageLocation GetParameterLocation(int index) const { diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc index f9998723f3..3778775e9b 100644 --- a/deps/v8/src/compiler/load-elimination.cc +++ b/deps/v8/src/compiler/load-elimination.cc @@ -8,7 +8,6 @@ #include "src/compiler/common-operator.h" #include "src/compiler/js-graph.h" #include "src/compiler/node-properties.h" -#include "src/compiler/simplified-operator.h" #include "src/heap/factory.h" #include "src/objects/objects-inl.h" @@ -284,6 +283,28 @@ class LoadElimination::AliasStateInfo { MaybeHandle<Map> map_; }; +LoadElimination::AbstractField const* LoadElimination::AbstractField::KillConst( + Node* object, Zone* zone) const { + for (auto pair : this->info_for_node_) { + if (pair.first->IsDead()) continue; + // If we previously recorded information about a const store on the given + // 'object', we might not have done it on the same node; e.g. we might now + // identify the object by a FinishRegion node, whereas the initial const + // store was performed on the Allocate node. We therefore remove information + // on all nodes that must alias with 'object'. + if (MustAlias(object, pair.first)) { + AbstractField* that = new (zone) AbstractField(zone); + for (auto pair : this->info_for_node_) { + if (!MustAlias(object, pair.first)) { + that->info_for_node_.insert(pair); + } + } + return that; + } + } + return this; +} + LoadElimination::AbstractField const* LoadElimination::AbstractField::Kill( const AliasStateInfo& alias_info, MaybeHandle<Name> name, Zone* zone) const { @@ -527,38 +548,60 @@ LoadElimination::AbstractState::KillElement(Node* object, Node* index, } LoadElimination::AbstractState const* LoadElimination::AbstractState::AddField( - Node* object, size_t index, LoadElimination::FieldInfo info, - PropertyConstness constness, Zone* zone) const { + Node* object, IndexRange index_range, LoadElimination::FieldInfo info, + Zone* zone) const { AbstractState* that = new (zone) AbstractState(*this); - AbstractFields& fields = constness == PropertyConstness::kConst - ? that->const_fields_ - : that->fields_; - if (fields[index]) { - fields[index] = fields[index]->Extend(object, info, zone); - } else { - fields[index] = new (zone) AbstractField(object, info, zone); + AbstractFields& fields = + info.const_field_info.IsConst() ? that->const_fields_ : that->fields_; + for (int index : index_range) { + if (fields[index]) { + fields[index] = fields[index]->Extend(object, info, zone); + } else { + fields[index] = new (zone) AbstractField(object, info, zone); + } } return that; } -LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField( - Node* object, size_t index, MaybeHandle<Name> name, Zone* zone) const { +LoadElimination::AbstractState const* +LoadElimination::AbstractState::KillConstField(Node* object, + IndexRange index_range, + Zone* zone) const { AliasStateInfo alias_info(this, object); - return KillField(alias_info, index, name, zone); + AbstractState* that = nullptr; + for (int index : index_range) { + if (AbstractField const* this_field = this->const_fields_[index]) { + this_field = this_field->KillConst(object, zone); + if (this->const_fields_[index] != this_field) { + if (!that) that = new (zone) AbstractState(*this); + that->const_fields_[index] = this_field; + } + } + } + return that ? that : this; } LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField( - const AliasStateInfo& alias_info, size_t index, MaybeHandle<Name> name, + Node* object, IndexRange index_range, MaybeHandle<Name> name, Zone* zone) const { - if (AbstractField const* this_field = this->fields_[index]) { - this_field = this_field->Kill(alias_info, name, zone); - if (this->fields_[index] != this_field) { - AbstractState* that = new (zone) AbstractState(*this); - that->fields_[index] = this_field; - return that; + AliasStateInfo alias_info(this, object); + return KillField(alias_info, index_range, name, zone); +} + +LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField( + const AliasStateInfo& alias_info, IndexRange index_range, + MaybeHandle<Name> name, Zone* zone) const { + AbstractState* that = nullptr; + for (int index : index_range) { + if (AbstractField const* this_field = this->fields_[index]) { + this_field = this_field->Kill(alias_info, name, zone); + if (this->fields_[index] != this_field) { + if (!that) that = new (zone) AbstractState(*this); + that->fields_[index] = this_field; + } } } - return this; + return that ? that : this; } LoadElimination::AbstractState const* @@ -598,13 +641,38 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::KillAll( } LoadElimination::FieldInfo const* LoadElimination::AbstractState::LookupField( - Node* object, size_t index, PropertyConstness constness) const { - AbstractFields const& fields = - constness == PropertyConstness::kConst ? const_fields_ : fields_; - if (AbstractField const* this_field = fields[index]) { - return this_field->Lookup(object); + Node* object, IndexRange index_range, + ConstFieldInfo const_field_info) const { + // Check if all the indices in {index_range} contain identical information. + // If not, a partially overlapping access has invalidated part of the value. + base::Optional<LoadElimination::FieldInfo const*> result; + for (int index : index_range) { + LoadElimination::FieldInfo const* info = nullptr; + if (const_field_info.IsConst()) { + if (AbstractField const* this_field = const_fields_[index]) { + info = this_field->Lookup(object); + } + if (!(info && info->const_field_info == const_field_info)) return nullptr; + } else { + if (AbstractField const* this_field = fields_[index]) { + info = this_field->Lookup(object); + } + if (!info) return nullptr; + } + if (!result.has_value()) { + result = info; + } else { + // We detected a partially overlapping access here. + // We currently don't seem to have such accesses, so this code path is + // unreachable, but if we eventually have them, it is safe to return + // nullptr and continue the analysis. But store-store elimination is + // currently unsafe for such overlapping accesses, so when we remove + // this check, we should double-check that store-store elimination can + // handle it too. + DCHECK_EQ(**result, *info); + } } - return nullptr; + return *result; } bool LoadElimination::AliasStateInfo::MayAlias(Node* other) const { @@ -733,12 +801,13 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) { // We know that the resulting elements have the fixed array map. state = state->SetMaps(node, fixed_array_maps, zone()); // Kill the previous elements on {object}. - state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), + state = state->KillField(object, + FieldIndexOf(JSObject::kElementsOffset, kTaggedSize), MaybeHandle<Name>(), zone()); // Add the new elements on {object}. - state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset), - {node, MachineType::RepCompressedTaggedPointer()}, - PropertyConstness::kMutable, zone()); + state = state->AddField( + object, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize), + {node, MachineType::RepCompressedTaggedPointer()}, zone()); return UpdateState(node, state); } @@ -760,12 +829,13 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) { state = state->SetMaps(node, fixed_array_maps, zone()); } // Kill the previous elements on {object}. - state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), + state = state->KillField(object, + FieldIndexOf(JSObject::kElementsOffset, kTaggedSize), MaybeHandle<Name>(), zone()); // Add the new elements on {object}. - state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset), - {node, MachineType::RepCompressedTaggedPointer()}, - PropertyConstness::kMutable, zone()); + state = state->AddField( + object, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize), + {node, MachineType::RepCompressedTaggedPointer()}, zone()); return UpdateState(node, state); } @@ -783,9 +853,9 @@ Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) { case ElementsTransition::kSlowTransition: // Kill the elements as well. AliasStateInfo alias_info(state, object, source_map); - state = - state->KillField(alias_info, FieldIndexOf(JSObject::kElementsOffset), - MaybeHandle<Name>(), zone()); + state = state->KillField( + alias_info, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize), + MaybeHandle<Name>(), zone()); break; } ZoneHandleSet<Map> object_maps; @@ -828,7 +898,8 @@ Reduction LoadElimination::ReduceTransitionAndStoreElement(Node* node) { state = state->SetMaps(object, object_maps, zone()); } // Kill the elements as well. - state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), + state = state->KillField(object, + FieldIndexOf(JSObject::kElementsOffset, kTaggedSize), MaybeHandle<Name>(), zone()); return UpdateState(node, state); } @@ -851,16 +922,17 @@ Reduction LoadElimination::ReduceLoadField(Node* node, return Replace(value); } } else { - int field_index = FieldIndexOf(access); - if (field_index >= 0) { - PropertyConstness constness = access.constness; + IndexRange field_index = FieldIndexOf(access); + if (field_index != IndexRange::Invalid()) { MachineRepresentation representation = access.machine_type.representation(); FieldInfo const* lookup_result = - state->LookupField(object, field_index, constness); - if (!lookup_result && constness == PropertyConstness::kConst) { - lookup_result = state->LookupField(object, field_index, - PropertyConstness::kMutable); + state->LookupField(object, field_index, access.const_field_info); + if (!lookup_result && access.const_field_info.IsConst()) { + // If the access is const and we didn't find anything, also try to look + // up information from mutable stores + lookup_result = + state->LookupField(object, field_index, ConstFieldInfo::None()); } if (lookup_result) { // Make sure we don't reuse values that were recorded with a different @@ -884,8 +956,9 @@ Reduction LoadElimination::ReduceLoadField(Node* node, return Replace(replacement); } } - FieldInfo info(node, access.name, representation); - state = state->AddField(object, field_index, info, constness, zone()); + FieldInfo info(node, representation, access.name, + access.const_field_info); + state = state->AddField(object, field_index, info, zone()); } } Handle<Map> field_map; @@ -910,26 +983,26 @@ Reduction LoadElimination::ReduceStoreField(Node* node, Type const new_value_type = NodeProperties::GetType(new_value); if (new_value_type.IsHeapConstant()) { // Record the new {object} map information. - AllowHandleDereference handle_dereference; ZoneHandleSet<Map> object_maps( - Handle<Map>::cast(new_value_type.AsHeapConstant()->Value())); + new_value_type.AsHeapConstant()->Ref().AsMap().object()); state = state->SetMaps(object, object_maps, zone()); } } else { - int field_index = FieldIndexOf(access); - if (field_index >= 0) { - PropertyConstness constness = access.constness; + IndexRange field_index = FieldIndexOf(access); + if (field_index != IndexRange::Invalid()) { + bool is_const_store = access.const_field_info.IsConst(); MachineRepresentation representation = access.machine_type.representation(); FieldInfo const* lookup_result = - state->LookupField(object, field_index, constness); + state->LookupField(object, field_index, access.const_field_info); - if (lookup_result && (constness == PropertyConstness::kMutable || - V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL)) { + if (lookup_result && + (!is_const_store || V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL)) { // At runtime, we should never encounter // - any store replacing existing info with a different, incompatible // representation, nor - // - two consecutive const stores. + // - two consecutive const stores, unless the latter is a store into + // a literal. // However, we may see such code statically, so we guard against // executing it by emitting Unreachable. // TODO(gsps): Re-enable the double const store check even for @@ -939,8 +1012,9 @@ Reduction LoadElimination::ReduceStoreField(Node* node, bool incompatible_representation = !lookup_result->name.is_null() && !IsCompatible(representation, lookup_result->representation); - if (incompatible_representation || - constness == PropertyConstness::kConst) { + bool illegal_double_const_store = + is_const_store && !access.is_store_in_literal; + if (incompatible_representation || illegal_double_const_store) { Node* control = NodeProperties::GetControlInput(node); Node* unreachable = graph()->NewNode(common()->Unreachable(), effect, control); @@ -953,16 +1027,22 @@ Reduction LoadElimination::ReduceStoreField(Node* node, } // Kill all potentially aliasing fields and record the new value. - FieldInfo new_info(new_value, access.name, representation); + FieldInfo new_info(new_value, representation, access.name, + access.const_field_info); + if (is_const_store && access.is_store_in_literal) { + // We only kill const information when there is a chance that we + // previously stored information about the given const field (namely, + // when we observe const stores to literals). + state = state->KillConstField(object, field_index, zone()); + } state = state->KillField(object, field_index, access.name, zone()); - state = state->AddField(object, field_index, new_info, - PropertyConstness::kMutable, zone()); - if (constness == PropertyConstness::kConst) { + state = state->AddField(object, field_index, new_info, zone()); + if (is_const_store) { // For const stores, we track information in both the const and the // mutable world to guard against field accesses that should have // been marked const, but were not. - state = - state->AddField(object, field_index, new_info, constness, zone()); + new_info.const_field_info = ConstFieldInfo::None(); + state = state->AddField(object, field_index, new_info, zone()); } } else { // Unsupported StoreField operator. @@ -1180,8 +1260,8 @@ LoadElimination::ComputeLoopStateForStoreField( // Invalidate what we know about the {object}s map. state = state->KillMaps(object, zone()); } else { - int field_index = FieldIndexOf(access); - if (field_index < 0) { + IndexRange field_index = FieldIndexOf(access); + if (field_index == IndexRange::Invalid()) { state = state->KillFields(object, access.name, zone()); } else { state = state->KillField(object, field_index, access.name, zone()); @@ -1197,9 +1277,12 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState( ElementsTransition transition; Node* object; }; - ZoneVector<TransitionElementsKindInfo> element_transitions_(zone()); - ZoneQueue<Node*> queue(zone()); - ZoneSet<Node*> visited(zone()); + // Allocate zone data structures in a temporary zone with a lifetime limited + // to this function to avoid blowing up the size of the stage-global zone. + Zone temp_zone(zone()->allocator(), "Temporary scoped zone"); + ZoneVector<TransitionElementsKindInfo> element_transitions_(&temp_zone); + ZoneQueue<Node*> queue(&temp_zone); + ZoneSet<Node*> visited(&temp_zone); visited.insert(node); for (int i = 1; i < control->InputCount(); ++i) { queue.push(node->InputAt(i)); @@ -1213,16 +1296,16 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState( switch (current->opcode()) { case IrOpcode::kEnsureWritableFastElements: { Node* const object = NodeProperties::GetValueInput(current, 0); - state = state->KillField(object, - FieldIndexOf(JSObject::kElementsOffset), - MaybeHandle<Name>(), zone()); + state = state->KillField( + object, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize), + MaybeHandle<Name>(), zone()); break; } case IrOpcode::kMaybeGrowFastElements: { Node* const object = NodeProperties::GetValueInput(current, 0); - state = state->KillField(object, - FieldIndexOf(JSObject::kElementsOffset), - MaybeHandle<Name>(), zone()); + state = state->KillField( + object, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize), + MaybeHandle<Name>(), zone()); break; } case IrOpcode::kTransitionElementsKind: { @@ -1241,9 +1324,9 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState( // Invalidate what we know about the {object}s map. state = state->KillMaps(object, zone()); // Kill the elements as well. - state = state->KillField(object, - FieldIndexOf(JSObject::kElementsOffset), - MaybeHandle<Name>(), zone()); + state = state->KillField( + object, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize), + MaybeHandle<Name>(), zone()); break; } case IrOpcode::kStoreField: { @@ -1305,9 +1388,9 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState( break; case ElementsTransition::kSlowTransition: { AliasStateInfo alias_info(state, t.object, t.transition.source()); - state = state->KillField(alias_info, - FieldIndexOf(JSObject::kElementsOffset), - MaybeHandle<Name>(), zone()); + state = state->KillField( + alias_info, FieldIndexOf(JSObject::kElementsOffset, kTaggedSize), + MaybeHandle<Name>(), zone()); break; } } @@ -1316,55 +1399,49 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState( } // static -int LoadElimination::FieldIndexOf(int offset) { +LoadElimination::IndexRange LoadElimination::FieldIndexOf( + int offset, int representation_size) { DCHECK(IsAligned(offset, kTaggedSize)); - int field_index = offset / kTaggedSize; - if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1; - DCHECK_LT(0, field_index); - return field_index - 1; + int field_index = offset / kTaggedSize - 1; + DCHECK_EQ(0, representation_size % kTaggedSize); + return IndexRange(field_index, representation_size / kTaggedSize); } // static -int LoadElimination::FieldIndexOf(FieldAccess const& access) { +LoadElimination::IndexRange LoadElimination::FieldIndexOf( + FieldAccess const& access) { MachineRepresentation rep = access.machine_type.representation(); switch (rep) { case MachineRepresentation::kNone: case MachineRepresentation::kBit: case MachineRepresentation::kSimd128: UNREACHABLE(); - case MachineRepresentation::kWord32: - if (kInt32Size != kTaggedSize) { - return -1; // We currently only track tagged pointer size fields. - } - break; - case MachineRepresentation::kWord64: - if (kInt64Size != kTaggedSize) { - return -1; // We currently only track tagged pointer size fields. - } - break; case MachineRepresentation::kWord8: case MachineRepresentation::kWord16: case MachineRepresentation::kFloat32: - return -1; // Currently untracked. + // Currently untracked. + return IndexRange::Invalid(); case MachineRepresentation::kFloat64: - if (kDoubleSize != kTaggedSize) { - return -1; // We currently only track tagged pointer size fields. - } - break; + case MachineRepresentation::kWord32: + case MachineRepresentation::kWord64: case MachineRepresentation::kTaggedSigned: case MachineRepresentation::kTaggedPointer: case MachineRepresentation::kTagged: case MachineRepresentation::kCompressedSigned: case MachineRepresentation::kCompressedPointer: case MachineRepresentation::kCompressed: - // TODO(bmeurer): Check that we never do overlapping load/stores of - // individual parts of Float64 values. break; } + int representation_size = ElementSizeInBytes(rep); + // We currently only track fields that are at least tagged pointer sized. + if (representation_size < kTaggedSize) return IndexRange::Invalid(); + DCHECK_EQ(0, representation_size % kTaggedSize); + if (access.base_is_tagged != kTaggedBase) { - return -1; // We currently only track tagged objects. + // We currently only track tagged objects. + return IndexRange::Invalid(); } - return FieldIndexOf(access.offset); + return FieldIndexOf(access.offset, representation_size); } CommonOperatorBuilder* LoadElimination::common() const { diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h index 4ad1fa64a2..b97fd7b883 100644 --- a/deps/v8/src/compiler/load-elimination.h +++ b/deps/v8/src/compiler/load-elimination.h @@ -9,6 +9,7 @@ #include "src/codegen/machine-type.h" #include "src/common/globals.h" #include "src/compiler/graph-reducer.h" +#include "src/compiler/simplified-operator.h" #include "src/handles/maybe-handles.h" #include "src/zone/zone-handle-set.h" @@ -100,20 +101,25 @@ class V8_EXPORT_PRIVATE LoadElimination final struct FieldInfo { FieldInfo() = default; - FieldInfo(Node* value, MachineRepresentation representation) - : value(value), name(), representation(representation) {} - FieldInfo(Node* value, MaybeHandle<Name> name, - MachineRepresentation representation) - : value(value), name(name), representation(representation) {} + FieldInfo(Node* value, MachineRepresentation representation, + MaybeHandle<Name> name = {}, + ConstFieldInfo const_field_info = ConstFieldInfo::None()) + : value(value), + representation(representation), + name(name), + const_field_info(const_field_info) {} bool operator==(const FieldInfo& other) const { - return value == other.value && name.address() == other.name.address() && - representation == other.representation; + return value == other.value && representation == other.representation && + name.address() == other.name.address() && + const_field_info == other.const_field_info; } + bool operator!=(const FieldInfo& other) const { return !(*this == other); } Node* value = nullptr; - MaybeHandle<Name> name; MachineRepresentation representation = MachineRepresentation::kNone; + MaybeHandle<Name> name; + ConstFieldInfo const_field_info; }; // Abstract state to approximate the current state of a certain field along @@ -134,6 +140,7 @@ class V8_EXPORT_PRIVATE LoadElimination final return that; } FieldInfo const* Lookup(Node* object) const; + AbstractField const* KillConst(Node* object, Zone* zone) const; AbstractField const* Kill(const AliasStateInfo& alias_info, MaybeHandle<Name> name, Zone* zone) const; bool Equals(AbstractField const* that) const { @@ -186,6 +193,39 @@ class V8_EXPORT_PRIVATE LoadElimination final ZoneMap<Node*, ZoneHandleSet<Map>> info_for_node_; }; + class IndexRange { + public: + IndexRange(int begin, int size) : begin_(begin), end_(begin + size) { + DCHECK_LE(0, begin); + DCHECK_LE(1, size); + if (end_ > static_cast<int>(kMaxTrackedFields)) { + *this = IndexRange::Invalid(); + } + } + static IndexRange Invalid() { return IndexRange(); } + + bool operator==(const IndexRange& other) { + return begin_ == other.begin_ && end_ == other.end_; + } + bool operator!=(const IndexRange& other) { return !(*this == other); } + + struct Iterator { + int i; + int operator*() { return i; } + void operator++() { ++i; } + bool operator!=(Iterator other) { return i != other.i; } + }; + + Iterator begin() { return {begin_}; } + Iterator end() { return {end_}; } + + private: + int begin_; + int end_; + + IndexRange() : begin_(-1), end_(-1) {} + }; + class AbstractState final : public ZoneObject { public: AbstractState() {} @@ -200,19 +240,20 @@ class V8_EXPORT_PRIVATE LoadElimination final Zone* zone) const; bool LookupMaps(Node* object, ZoneHandleSet<Map>* object_maps) const; - AbstractState const* AddField(Node* object, size_t index, FieldInfo info, - PropertyConstness constness, - Zone* zone) const; + AbstractState const* AddField(Node* object, IndexRange index, + FieldInfo info, Zone* zone) const; + AbstractState const* KillConstField(Node* object, IndexRange index_range, + Zone* zone) const; AbstractState const* KillField(const AliasStateInfo& alias_info, - size_t index, MaybeHandle<Name> name, + IndexRange index, MaybeHandle<Name> name, Zone* zone) const; - AbstractState const* KillField(Node* object, size_t index, + AbstractState const* KillField(Node* object, IndexRange index, MaybeHandle<Name> name, Zone* zone) const; AbstractState const* KillFields(Node* object, MaybeHandle<Name> name, Zone* zone) const; AbstractState const* KillAll(Zone* zone) const; - FieldInfo const* LookupField(Node* object, size_t index, - PropertyConstness constness) const; + FieldInfo const* LookupField(Node* object, IndexRange index, + ConstFieldInfo const_field_info) const; AbstractState const* AddElement(Node* object, Node* index, Node* value, MachineRepresentation representation, @@ -280,8 +321,8 @@ class V8_EXPORT_PRIVATE LoadElimination final AbstractState const* UpdateStateForPhi(AbstractState const* state, Node* effect_phi, Node* phi); - static int FieldIndexOf(int offset); - static int FieldIndexOf(FieldAccess const& access); + static IndexRange FieldIndexOf(int offset, int representation_size); + static IndexRange FieldIndexOf(FieldAccess const& access); static AbstractState const* empty_state() { return AbstractState::empty_state(); diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc index 80205f80b6..4c7ee1d141 100644 --- a/deps/v8/src/compiler/machine-graph-verifier.cc +++ b/deps/v8/src/compiler/machine-graph-verifier.cc @@ -60,8 +60,7 @@ class MachineRepresentationInferrer { CHECK_LE(index, static_cast<size_t>(1)); return index == 0 ? MachineRepresentation::kWord64 : MachineRepresentation::kBit; - case IrOpcode::kCall: - case IrOpcode::kCallWithCallerSavedRegisters: { + case IrOpcode::kCall: { auto call_descriptor = CallDescriptorOf(input->op()); return call_descriptor->GetReturnType(index).representation(); } @@ -128,7 +127,6 @@ class MachineRepresentationInferrer { representation_vector_[node->id()] = PromoteRepresentation( LoadRepresentationOf(node->op()).representation()); break; - case IrOpcode::kLoadStackPointer: case IrOpcode::kLoadFramePointer: case IrOpcode::kLoadParentFramePointer: representation_vector_[node->id()] = @@ -142,8 +140,7 @@ class MachineRepresentationInferrer { representation_vector_[node->id()] = PhiRepresentationOf(node->op()); break; - case IrOpcode::kCall: - case IrOpcode::kCallWithCallerSavedRegisters: { + case IrOpcode::kCall: { auto call_descriptor = CallDescriptorOf(node->op()); if (call_descriptor->ReturnCount() > 0) { representation_vector_[node->id()] = @@ -235,6 +232,10 @@ class MachineRepresentationInferrer { case IrOpcode::kWord64PoisonOnSpeculation: representation_vector_[node->id()] = MachineRepresentation::kWord64; break; + case IrOpcode::kCompressedHeapConstant: + representation_vector_[node->id()] = + MachineRepresentation::kCompressedPointer; + break; case IrOpcode::kExternalConstant: representation_vector_[node->id()] = MachineType::PointerRepresentation(); @@ -248,6 +249,13 @@ class MachineRepresentationInferrer { representation_vector_[node->id()] = MachineRepresentation::kTaggedSigned; break; + case IrOpcode::kBitcastWord32ToCompressedSigned: + representation_vector_[node->id()] = + MachineRepresentation::kCompressedSigned; + break; + case IrOpcode::kBitcastCompressedSignedToWord32: + representation_vector_[node->id()] = MachineRepresentation::kWord32; + break; case IrOpcode::kWord32Equal: case IrOpcode::kInt32LessThan: case IrOpcode::kInt32LessThanOrEqual: @@ -265,6 +273,7 @@ class MachineRepresentationInferrer { case IrOpcode::kFloat64LessThan: case IrOpcode::kFloat64LessThanOrEqual: case IrOpcode::kChangeTaggedToBit: + case IrOpcode::kStackPointerGreaterThan: representation_vector_[node->id()] = MachineRepresentation::kBit; break; #define LABEL(opcode) case IrOpcode::k##opcode: @@ -373,7 +382,6 @@ class MachineRepresentationChecker { } switch (node->opcode()) { case IrOpcode::kCall: - case IrOpcode::kCallWithCallerSavedRegisters: case IrOpcode::kTailCall: CheckCallInputs(node); break; @@ -433,6 +441,13 @@ class MachineRepresentationChecker { case IrOpcode::kTaggedPoisonOnSpeculation: CheckValueInputIsTagged(node, 0); break; + case IrOpcode::kBitcastWord32ToCompressedSigned: + CheckValueInputRepresentationIs(node, 0, + MachineRepresentation::kWord32); + break; + case IrOpcode::kBitcastCompressedSignedToWord32: + CheckValueInputIsCompressed(node, 0); + break; case IrOpcode::kTruncateFloat64ToWord32: case IrOpcode::kTruncateFloat64ToUint32: case IrOpcode::kTruncateFloat64ToFloat32: @@ -699,6 +714,10 @@ class MachineRepresentationChecker { } break; } + case IrOpcode::kStackPointerGreaterThan: + CheckValueInputRepresentationIs( + node, 0, MachineType::PointerRepresentation()); + break; case IrOpcode::kThrow: case IrOpcode::kTypedStateValues: case IrOpcode::kFrameState: @@ -751,11 +770,6 @@ class MachineRepresentationChecker { case MachineRepresentation::kCompressedPointer: case MachineRepresentation::kCompressedSigned: return; - case MachineRepresentation::kNone: - if (input->opcode() == IrOpcode::kCompressedHeapConstant) { - return; - } - break; default: break; } @@ -858,17 +872,6 @@ class MachineRepresentationChecker { case MachineRepresentation::kCompressedSigned: case MachineRepresentation::kCompressedPointer: return; - case MachineRepresentation::kNone: { - if (input->opcode() == IrOpcode::kCompressedHeapConstant) { - return; - } - std::ostringstream str; - str << "TypeError: node #" << input->id() << ":" << *input->op() - << " is untyped."; - PrintDebugHelp(str, node); - FATAL("%s", str.str().c_str()); - break; - } default: break; } diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc index f720c29084..11124579f6 100644 --- a/deps/v8/src/compiler/machine-operator-reducer.cc +++ b/deps/v8/src/compiler/machine-operator-reducer.cc @@ -34,17 +34,14 @@ Node* MachineOperatorReducer::Float32Constant(volatile float value) { return graph()->NewNode(common()->Float32Constant(value)); } - Node* MachineOperatorReducer::Float64Constant(volatile double value) { return mcgraph()->Float64Constant(value); } - Node* MachineOperatorReducer::Int32Constant(int32_t value) { return mcgraph()->Int32Constant(value); } - Node* MachineOperatorReducer::Int64Constant(int64_t value) { return graph()->NewNode(common()->Int64Constant(value)); } @@ -70,23 +67,27 @@ Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) { return reduction.Changed() ? reduction.replacement() : node; } - Node* MachineOperatorReducer::Word32Sar(Node* lhs, uint32_t rhs) { if (rhs == 0) return lhs; return graph()->NewNode(machine()->Word32Sar(), lhs, Uint32Constant(rhs)); } - Node* MachineOperatorReducer::Word32Shr(Node* lhs, uint32_t rhs) { if (rhs == 0) return lhs; return graph()->NewNode(machine()->Word32Shr(), lhs, Uint32Constant(rhs)); } - Node* MachineOperatorReducer::Word32Equal(Node* lhs, Node* rhs) { return graph()->NewNode(machine()->Word32Equal(), lhs, rhs); } +Node* MachineOperatorReducer::BitcastWord32ToCompressedSigned(Node* value) { + return graph()->NewNode(machine()->BitcastWord32ToCompressedSigned(), value); +} + +Node* MachineOperatorReducer::BitcastCompressedSignedToWord32(Node* value) { + return graph()->NewNode(machine()->BitcastCompressedSignedToWord32(), value); +} Node* MachineOperatorReducer::Int32Add(Node* lhs, Node* rhs) { Node* const node = graph()->NewNode(machine()->Int32Add(), lhs, rhs); @@ -94,19 +95,16 @@ Node* MachineOperatorReducer::Int32Add(Node* lhs, Node* rhs) { return reduction.Changed() ? reduction.replacement() : node; } - Node* MachineOperatorReducer::Int32Sub(Node* lhs, Node* rhs) { Node* const node = graph()->NewNode(machine()->Int32Sub(), lhs, rhs); Reduction const reduction = ReduceInt32Sub(node); return reduction.Changed() ? reduction.replacement() : node; } - Node* MachineOperatorReducer::Int32Mul(Node* lhs, Node* rhs) { return graph()->NewNode(machine()->Int32Mul(), lhs, rhs); } - Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) { DCHECK_NE(0, divisor); DCHECK_NE(std::numeric_limits<int32_t>::min(), divisor); @@ -122,7 +120,6 @@ Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) { return Int32Add(Word32Sar(quotient, mag.shift), Word32Shr(dividend, 31)); } - Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) { DCHECK_LT(0u, divisor); // If the divisor is even, we can avoid using the expensive fixup by shifting @@ -146,7 +143,6 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) { return quotient; } - // Perform constant folding and strength reduction on machine operators. Reduction MachineOperatorReducer::Reduce(Node* node) { switch (node->opcode()) { @@ -664,6 +660,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value())); break; } + case IrOpcode::kChangeTaggedToCompressed: { + Int64Matcher m(node->InputAt(0)); + if (m.IsBitcastWordToTaggedSigned()) { + Int64Matcher n(m.node()->InputAt(0)); + if (n.IsChangeInt32ToInt64()) { + DCHECK(machine()->Is64() && SmiValuesAre31Bits()); + return Replace(BitcastWord32ToCompressedSigned(n.node()->InputAt(0))); + } + } + break; + } case IrOpcode::kTruncateFloat64ToWord32: { Float64Matcher m(node->InputAt(0)); if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value())); @@ -674,6 +681,13 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { Int64Matcher m(node->InputAt(0)); if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value())); if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0)); + if (m.IsBitcastTaggedSignedToWord()) { + Int64Matcher n(m.node()->InputAt(0)); + if (n.IsChangeCompressedToTagged()) { + DCHECK(machine()->Is64() && SmiValuesAre31Bits()); + return Replace(BitcastCompressedSignedToWord32(n.node()->InputAt(0))); + } + } break; } case IrOpcode::kTruncateFloat64ToFloat32: { @@ -871,7 +885,6 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) { return NoChange(); } - Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) { Uint32BinopMatcher m(node); if (m.left().Is(0)) return Replace(m.left().node()); // 0 / x => 0 @@ -900,7 +913,6 @@ Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) { return NoChange(); } - Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) { Int32BinopMatcher m(node); if (m.left().Is(0)) return Replace(m.left().node()); // 0 % x => 0 @@ -937,7 +949,6 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) { return NoChange(); } - Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) { Uint32BinopMatcher m(node); if (m.left().Is(0)) return Replace(m.left().node()); // 0 % x => 0 @@ -967,7 +978,6 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) { return NoChange(); } - Reduction MachineOperatorReducer::ReduceStore(Node* node) { NodeMatcher nm(node); MachineRepresentation rep; @@ -1015,7 +1025,6 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) { return NoChange(); } - Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) { switch (node->opcode()) { case IrOpcode::kInt32AddWithOverflow: { @@ -1069,7 +1078,6 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) { return NoChange(); } - Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) { DCHECK((node->opcode() == IrOpcode::kWord32Shl) || (node->opcode() == IrOpcode::kWord32Shr) || @@ -1089,7 +1097,6 @@ Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) { return NoChange(); } - Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) { DCHECK_EQ(IrOpcode::kWord32Shl, node->opcode()); Int32BinopMatcher m(node); @@ -1399,7 +1406,6 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) { return NoChange(); } - Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) { DCHECK_EQ(IrOpcode::kFloat64InsertHighWord32, node->opcode()); Float64Matcher mlhs(node->InputAt(0)); @@ -1412,7 +1418,6 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) { return NoChange(); } - namespace { bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) { @@ -1492,7 +1497,6 @@ CommonOperatorBuilder* MachineOperatorReducer::common() const { return mcgraph()->common(); } - MachineOperatorBuilder* MachineOperatorReducer::machine() const { return mcgraph()->machine(); } diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h index a8e4cd5749..6eab08653e 100644 --- a/deps/v8/src/compiler/machine-operator-reducer.h +++ b/deps/v8/src/compiler/machine-operator-reducer.h @@ -51,6 +51,8 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final Node* Word32Sar(Node* lhs, uint32_t rhs); Node* Word32Shr(Node* lhs, uint32_t rhs); Node* Word32Equal(Node* lhs, Node* rhs); + Node* BitcastWord32ToCompressedSigned(Node* value); + Node* BitcastCompressedSignedToWord32(Node* value); Node* Int32Add(Node* lhs, Node* rhs); Node* Int32Sub(Node* lhs, Node* rhs); Node* Int32Mul(Node* lhs, Node* rhs); diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc index f447861aca..0355534408 100644 --- a/deps/v8/src/compiler/machine-operator.cc +++ b/deps/v8/src/compiler/machine-operator.cc @@ -89,6 +89,8 @@ MachineType AtomicOpType(Operator const* op) { return OpParameter<MachineType>(op); } +// The format is: +// V(Name, properties, value_input_count, control_input_count, output_count) #define PURE_BINARY_OP_LIST_32(V) \ V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ @@ -112,6 +114,8 @@ MachineType AtomicOpType(Operator const* op) { V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \ V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) +// The format is: +// V(Name, properties, value_input_count, control_input_count, output_count) #define PURE_BINARY_OP_LIST_64(V) \ V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ @@ -133,6 +137,8 @@ MachineType AtomicOpType(Operator const* op) { V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1) \ V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) +// The format is: +// V(Name, properties, value_input_count, control_input_count, output_count) #define MACHINE_PURE_OP_LIST(V) \ PURE_BINARY_OP_LIST_32(V) \ PURE_BINARY_OP_LIST_64(V) \ @@ -142,6 +148,8 @@ MachineType AtomicOpType(Operator const* op) { V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \ V(BitcastTaggedSignedToWord, Operator::kNoProperties, 1, 0, 1) \ V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \ + V(BitcastWord32ToCompressedSigned, Operator::kNoProperties, 1, 0, 1) \ + V(BitcastCompressedSignedToWord32, Operator::kNoProperties, 1, 0, 1) \ V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \ V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \ V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \ @@ -236,7 +244,6 @@ MachineType AtomicOpType(Operator const* op) { V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \ V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \ V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \ - V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \ V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \ V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \ V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \ @@ -248,6 +255,12 @@ MachineType AtomicOpType(Operator const* op) { V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \ V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \ V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \ + V(F64x2Add, Operator::kCommutative, 2, 0, 1) \ + V(F64x2Sub, Operator::kNoProperties, 2, 0, 1) \ + V(F64x2Mul, Operator::kCommutative, 2, 0, 1) \ + V(F64x2Div, Operator::kNoProperties, 2, 0, 1) \ + V(F64x2Min, Operator::kCommutative, 2, 0, 1) \ + V(F64x2Max, Operator::kCommutative, 2, 0, 1) \ V(F64x2Eq, Operator::kCommutative, 2, 0, 1) \ V(F64x2Ne, Operator::kCommutative, 2, 0, 1) \ V(F64x2Lt, Operator::kNoProperties, 2, 0, 1) \ @@ -263,6 +276,7 @@ MachineType AtomicOpType(Operator const* op) { V(F32x4AddHoriz, Operator::kNoProperties, 2, 0, 1) \ V(F32x4Sub, Operator::kNoProperties, 2, 0, 1) \ V(F32x4Mul, Operator::kCommutative, 2, 0, 1) \ + V(F32x4Div, Operator::kNoProperties, 2, 0, 1) \ V(F32x4Min, Operator::kCommutative, 2, 0, 1) \ V(F32x4Max, Operator::kCommutative, 2, 0, 1) \ V(F32x4Eq, Operator::kCommutative, 2, 0, 1) \ @@ -271,13 +285,20 @@ MachineType AtomicOpType(Operator const* op) { V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \ V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \ V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \ + V(I64x2Shl, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2ShrS, Operator::kNoProperties, 2, 0, 1) \ V(I64x2Add, Operator::kCommutative, 2, 0, 1) \ V(I64x2Sub, Operator::kNoProperties, 2, 0, 1) \ V(I64x2Mul, Operator::kCommutative, 2, 0, 1) \ + V(I64x2MinS, Operator::kCommutative, 2, 0, 1) \ + V(I64x2MaxS, Operator::kCommutative, 2, 0, 1) \ V(I64x2Eq, Operator::kCommutative, 2, 0, 1) \ V(I64x2Ne, Operator::kCommutative, 2, 0, 1) \ V(I64x2GtS, Operator::kNoProperties, 2, 0, 1) \ V(I64x2GeS, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2ShrU, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2MinU, Operator::kCommutative, 2, 0, 1) \ + V(I64x2MaxU, Operator::kCommutative, 2, 0, 1) \ V(I64x2GtU, Operator::kNoProperties, 2, 0, 1) \ V(I64x2GeU, Operator::kNoProperties, 2, 0, 1) \ V(I32x4Splat, Operator::kNoProperties, 1, 0, 1) \ @@ -285,6 +306,8 @@ MachineType AtomicOpType(Operator const* op) { V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \ V(I32x4SConvertI16x8High, Operator::kNoProperties, 1, 0, 1) \ V(I32x4Neg, Operator::kNoProperties, 1, 0, 1) \ + V(I32x4Shl, Operator::kNoProperties, 2, 0, 1) \ + V(I32x4ShrS, Operator::kNoProperties, 2, 0, 1) \ V(I32x4Add, Operator::kCommutative, 2, 0, 1) \ V(I32x4AddHoriz, Operator::kNoProperties, 2, 0, 1) \ V(I32x4Sub, Operator::kNoProperties, 2, 0, 1) \ @@ -298,6 +321,7 @@ MachineType AtomicOpType(Operator const* op) { V(I32x4UConvertF32x4, Operator::kNoProperties, 1, 0, 1) \ V(I32x4UConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \ V(I32x4UConvertI16x8High, Operator::kNoProperties, 1, 0, 1) \ + V(I32x4ShrU, Operator::kNoProperties, 2, 0, 1) \ V(I32x4MinU, Operator::kCommutative, 2, 0, 1) \ V(I32x4MaxU, Operator::kCommutative, 2, 0, 1) \ V(I32x4GtU, Operator::kNoProperties, 2, 0, 1) \ @@ -306,6 +330,8 @@ MachineType AtomicOpType(Operator const* op) { V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \ V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \ V(I16x8Neg, Operator::kNoProperties, 1, 0, 1) \ + V(I16x8Shl, Operator::kNoProperties, 2, 0, 1) \ + V(I16x8ShrS, Operator::kNoProperties, 2, 0, 1) \ V(I16x8SConvertI32x4, Operator::kNoProperties, 2, 0, 1) \ V(I16x8Add, Operator::kCommutative, 2, 0, 1) \ V(I16x8AddSaturateS, Operator::kCommutative, 2, 0, 1) \ @@ -321,6 +347,7 @@ MachineType AtomicOpType(Operator const* op) { V(I16x8GeS, Operator::kNoProperties, 2, 0, 1) \ V(I16x8UConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \ V(I16x8UConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \ + V(I16x8ShrU, Operator::kNoProperties, 2, 0, 1) \ V(I16x8UConvertI32x4, Operator::kNoProperties, 2, 0, 1) \ V(I16x8AddSaturateU, Operator::kCommutative, 2, 0, 1) \ V(I16x8SubSaturateU, Operator::kNoProperties, 2, 0, 1) \ @@ -330,6 +357,8 @@ MachineType AtomicOpType(Operator const* op) { V(I16x8GeU, Operator::kNoProperties, 2, 0, 1) \ V(I8x16Splat, Operator::kNoProperties, 1, 0, 1) \ V(I8x16Neg, Operator::kNoProperties, 1, 0, 1) \ + V(I8x16Shl, Operator::kNoProperties, 2, 0, 1) \ + V(I8x16ShrS, Operator::kNoProperties, 2, 0, 1) \ V(I8x16SConvertI16x8, Operator::kNoProperties, 2, 0, 1) \ V(I8x16Add, Operator::kCommutative, 2, 0, 1) \ V(I8x16AddSaturateS, Operator::kCommutative, 2, 0, 1) \ @@ -342,6 +371,7 @@ MachineType AtomicOpType(Operator const* op) { V(I8x16Ne, Operator::kCommutative, 2, 0, 1) \ V(I8x16GtS, Operator::kNoProperties, 2, 0, 1) \ V(I8x16GeS, Operator::kNoProperties, 2, 0, 1) \ + V(I8x16ShrU, Operator::kNoProperties, 2, 0, 1) \ V(I8x16UConvertI16x8, Operator::kNoProperties, 2, 0, 1) \ V(I8x16AddSaturateU, Operator::kCommutative, 2, 0, 1) \ V(I8x16SubSaturateU, Operator::kNoProperties, 2, 0, 1) \ @@ -364,8 +394,11 @@ MachineType AtomicOpType(Operator const* op) { V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \ V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1) \ V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \ - V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1) + V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1) \ + V(StackPointerGreaterThan, Operator::kNoProperties, 1, 0, 1) +// The format is: +// V(Name, properties, value_input_count, control_input_count, output_count) #define PURE_OPTIONAL_OP_LIST(V) \ V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \ V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \ @@ -385,6 +418,8 @@ MachineType AtomicOpType(Operator const* op) { V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \ V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1) +// The format is: +// V(Name, properties, value_input_count, control_input_count, output_count) #define OVERFLOW_OP_LIST(V) \ V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative) \ V(Int32SubWithOverflow, Operator::kNoProperties) \ @@ -467,12 +502,6 @@ MachineType AtomicOpType(Operator const* op) { V(I16x8, 8) \ V(I8x16, 16) -#define SIMD_FORMAT_LIST(V) \ - V(64x2, 64) \ - V(32x4, 32) \ - V(16x8, 16) \ - V(8x16, 8) - #define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \ V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16) @@ -1305,28 +1334,6 @@ const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() { SIMD_LANE_OP_LIST(SIMD_LANE_OPS) #undef SIMD_LANE_OPS -#define SIMD_SHIFT_OPS(format, bits) \ - const Operator* MachineOperatorBuilder::I##format##Shl(int32_t shift) { \ - DCHECK(0 <= shift && shift < bits); \ - return new (zone_) \ - Operator1<int32_t>(IrOpcode::kI##format##Shl, Operator::kPure, \ - "Shift left", 1, 0, 0, 1, 0, 0, shift); \ - } \ - const Operator* MachineOperatorBuilder::I##format##ShrS(int32_t shift) { \ - DCHECK(0 < shift && shift <= bits); \ - return new (zone_) \ - Operator1<int32_t>(IrOpcode::kI##format##ShrS, Operator::kPure, \ - "Arithmetic shift right", 1, 0, 0, 1, 0, 0, shift); \ - } \ - const Operator* MachineOperatorBuilder::I##format##ShrU(int32_t shift) { \ - DCHECK(0 <= shift && shift < bits); \ - return new (zone_) \ - Operator1<int32_t>(IrOpcode::kI##format##ShrU, Operator::kPure, \ - "Shift right", 1, 0, 0, 1, 0, 0, shift); \ - } -SIMD_FORMAT_LIST(SIMD_SHIFT_OPS) -#undef SIMD_SHIFT_OPS - const Operator* MachineOperatorBuilder::S8x16Shuffle( const uint8_t shuffle[16]) { uint8_t* array = zone_->NewArray<uint8_t>(16); @@ -1354,7 +1361,6 @@ const uint8_t* S8x16ShuffleOf(Operator const* op) { #undef ATOMIC_REPRESENTATION_LIST #undef ATOMIC64_REPRESENTATION_LIST #undef SIMD_LANE_OP_LIST -#undef SIMD_FORMAT_LIST #undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST } // namespace compiler diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h index 0f81301206..17db145f58 100644 --- a/deps/v8/src/compiler/machine-operator.h +++ b/deps/v8/src/compiler/machine-operator.h @@ -314,6 +314,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final // This operator reinterprets the bits of a word as a Smi. const Operator* BitcastWordToTaggedSigned(); + // This operator reinterprets the bits of a word32 as a Compressed Smi. + const Operator* BitcastWord32ToCompressedSigned(); + + // This operator reinterprets the bits of a Compressed Smi as a word32. + const Operator* BitcastCompressedSignedToWord32(); + // JavaScript float64 to int32/uint32 truncation. const Operator* TruncateFloat64ToWord32(); @@ -471,7 +477,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* F64x2Splat(); const Operator* F64x2Abs(); const Operator* F64x2Neg(); + const Operator* F64x2Add(); + const Operator* F64x2Sub(); + const Operator* F64x2Mul(); + const Operator* F64x2Div(); const Operator* F64x2ExtractLane(int32_t); + const Operator* F64x2Min(); + const Operator* F64x2Max(); const Operator* F64x2ReplaceLane(int32_t); const Operator* F64x2Eq(); const Operator* F64x2Ne(); @@ -503,16 +515,20 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* I64x2ExtractLane(int32_t); const Operator* I64x2ReplaceLane(int32_t); const Operator* I64x2Neg(); - const Operator* I64x2Shl(int32_t); - const Operator* I64x2ShrS(int32_t); + const Operator* I64x2Shl(); + const Operator* I64x2ShrS(); const Operator* I64x2Add(); const Operator* I64x2Sub(); const Operator* I64x2Mul(); + const Operator* I64x2MinS(); + const Operator* I64x2MaxS(); const Operator* I64x2Eq(); const Operator* I64x2Ne(); const Operator* I64x2GtS(); const Operator* I64x2GeS(); - const Operator* I64x2ShrU(int32_t); + const Operator* I64x2ShrU(); + const Operator* I64x2MinU(); + const Operator* I64x2MaxU(); const Operator* I64x2GtU(); const Operator* I64x2GeU(); @@ -523,8 +539,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* I32x4SConvertI16x8Low(); const Operator* I32x4SConvertI16x8High(); const Operator* I32x4Neg(); - const Operator* I32x4Shl(int32_t); - const Operator* I32x4ShrS(int32_t); + const Operator* I32x4Shl(); + const Operator* I32x4ShrS(); const Operator* I32x4Add(); const Operator* I32x4AddHoriz(); const Operator* I32x4Sub(); @@ -539,7 +555,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* I32x4UConvertF32x4(); const Operator* I32x4UConvertI16x8Low(); const Operator* I32x4UConvertI16x8High(); - const Operator* I32x4ShrU(int32_t); + const Operator* I32x4ShrU(); const Operator* I32x4MinU(); const Operator* I32x4MaxU(); const Operator* I32x4GtU(); @@ -551,8 +567,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* I16x8SConvertI8x16Low(); const Operator* I16x8SConvertI8x16High(); const Operator* I16x8Neg(); - const Operator* I16x8Shl(int32_t); - const Operator* I16x8ShrS(int32_t); + const Operator* I16x8Shl(); + const Operator* I16x8ShrS(); const Operator* I16x8SConvertI32x4(); const Operator* I16x8Add(); const Operator* I16x8AddSaturateS(); @@ -569,7 +585,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* I16x8UConvertI8x16Low(); const Operator* I16x8UConvertI8x16High(); - const Operator* I16x8ShrU(int32_t); + const Operator* I16x8ShrU(); const Operator* I16x8UConvertI32x4(); const Operator* I16x8AddSaturateU(); const Operator* I16x8SubSaturateU(); @@ -582,8 +598,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* I8x16ExtractLane(int32_t); const Operator* I8x16ReplaceLane(int32_t); const Operator* I8x16Neg(); - const Operator* I8x16Shl(int32_t); - const Operator* I8x16ShrS(int32_t); + const Operator* I8x16Shl(); + const Operator* I8x16ShrS(); const Operator* I8x16SConvertI16x8(); const Operator* I8x16Add(); const Operator* I8x16AddSaturateS(); @@ -597,7 +613,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* I8x16GtS(); const Operator* I8x16GeS(); - const Operator* I8x16ShrU(int32_t); + const Operator* I8x16ShrU(); const Operator* I8x16UConvertI16x8(); const Operator* I8x16AddSaturateU(); const Operator* I8x16SubSaturateU(); @@ -651,10 +667,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* Word64PoisonOnSpeculation(); // Access to the machine stack. - const Operator* LoadStackPointer(); const Operator* LoadFramePointer(); const Operator* LoadParentFramePointer(); + // Compares: stack_pointer > value. + const Operator* StackPointerGreaterThan(); + // Memory barrier. const Operator* MemBarrier(); diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc index 07ac95b4f7..1e2434f4ae 100644 --- a/deps/v8/src/compiler/map-inference.cc +++ b/deps/v8/src/compiler/map-inference.cc @@ -5,9 +5,9 @@ #include "src/compiler/map-inference.h" #include "src/compiler/compilation-dependencies.h" +#include "src/compiler/feedback-source.h" #include "src/compiler/js-graph.h" #include "src/compiler/simplified-operator.h" -#include "src/compiler/vector-slot-pair.h" #include "src/objects/map-inl.h" #include "src/zone/zone-handle-set.h" @@ -93,7 +93,7 @@ MapHandles const& MapInference::GetMaps() { void MapInference::InsertMapChecks(JSGraph* jsgraph, Node** effect, Node* control, - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { CHECK(HaveMaps()); CHECK(feedback.IsValid()); ZoneHandleSet<Map> maps; @@ -112,7 +112,7 @@ bool MapInference::RelyOnMapsViaStability( bool MapInference::RelyOnMapsPreferStability( CompilationDependencies* dependencies, JSGraph* jsgraph, Node** effect, - Node* control, const VectorSlotPair& feedback) { + Node* control, const FeedbackSource& feedback) { CHECK(HaveMaps()); if (Safe()) return false; if (RelyOnMapsViaStability(dependencies)) return true; @@ -123,7 +123,7 @@ bool MapInference::RelyOnMapsPreferStability( bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies, JSGraph* jsgraph, Node** effect, Node* control, - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { if (Safe()) return true; auto is_stable = [this](Handle<Map> map) { diff --git a/deps/v8/src/compiler/map-inference.h b/deps/v8/src/compiler/map-inference.h index 64cec77f2b..acba2eb0f2 100644 --- a/deps/v8/src/compiler/map-inference.h +++ b/deps/v8/src/compiler/map-inference.h @@ -13,11 +13,11 @@ namespace v8 { namespace internal { -class VectorSlotPair; namespace compiler { class CompilationDependencies; +struct FeedbackSource; class JSGraph; class JSHeapBroker; class Node; @@ -67,10 +67,10 @@ class MapInference { // dependencies were taken. bool RelyOnMapsPreferStability(CompilationDependencies* dependencies, JSGraph* jsgraph, Node** effect, Node* control, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); // Inserts map checks even if maps were already reliable. void InsertMapChecks(JSGraph* jsgraph, Node** effect, Node* control, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); // Internally marks the maps as reliable (thus bypassing the safety check) and // returns the NoChange reduction. USE THIS ONLY WHEN RETURNING, e.g.: @@ -98,7 +98,7 @@ class MapInference { std::function<bool(InstanceType)> f) const; V8_WARN_UNUSED_RESULT bool RelyOnMapsHelper( CompilationDependencies* dependencies, JSGraph* jsgraph, Node** effect, - Node* control, const VectorSlotPair& feedback); + Node* control, const FeedbackSource& feedback); }; } // namespace compiler diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc index 368c060c1d..8684f2ce3c 100644 --- a/deps/v8/src/compiler/memory-optimizer.cc +++ b/deps/v8/src/compiler/memory-optimizer.cc @@ -101,6 +101,12 @@ bool CanAllocate(const Node* node) { switch (node->opcode()) { case IrOpcode::kBitcastTaggedToWord: case IrOpcode::kBitcastWordToTagged: + case IrOpcode::kChangeCompressedToTagged: + case IrOpcode::kChangeCompressedSignedToTaggedSigned: + case IrOpcode::kChangeCompressedPointerToTaggedPointer: + case IrOpcode::kChangeTaggedToCompressed: + case IrOpcode::kChangeTaggedSignedToCompressedSigned: + case IrOpcode::kChangeTaggedPointerToCompressedPointer: case IrOpcode::kComment: case IrOpcode::kAbortCSAAssert: case IrOpcode::kDebugBreak: @@ -161,7 +167,6 @@ bool CanAllocate(const Node* node) { return false; case IrOpcode::kCall: - case IrOpcode::kCallWithCallerSavedRegisters: return !(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate); default: @@ -231,8 +236,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { return VisitAllocateRaw(node, state); case IrOpcode::kCall: return VisitCall(node, state); - case IrOpcode::kCallWithCallerSavedRegisters: - return VisitCallWithCallerSavedRegisters(node, state); case IrOpcode::kLoadFromObject: return VisitLoadFromObject(node, state); case IrOpcode::kLoadElement: @@ -258,6 +261,35 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { #define __ gasm()-> +bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node, + const Edge edge) { + if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) { + // In Pointer Compression we might have a Compress node between an + // AllocateRaw and the value used as input. This case is trickier since we + // have to check all of the Compress node edges to test for a StoreField. + for (Edge const new_edge : node->use_edges()) { + if (AllocationTypeNeedsUpdateToOld(new_edge.from(), new_edge)) { + return true; + } + } + + // If we arrived here, we tested all the edges of the Compress node and + // didn't find it necessary to update the AllocationType. + return false; + } + + // Test to see if we need to update the AllocationType. + if (node->opcode() == IrOpcode::kStoreField && edge.index() == 1) { + Node* parent = node->InputAt(0); + if (parent->opcode() == IrOpcode::kAllocateRaw && + AllocationTypeOf(parent->op()) == AllocationType::kOld) { + return true; + } + } + + return false; +} + void MemoryOptimizer::VisitAllocateRaw(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode()); @@ -278,8 +310,17 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, if (allocation_type == AllocationType::kOld) { for (Edge const edge : node->use_edges()) { Node* const user = edge.from(); + if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) { - Node* const child = user->InputAt(1); + Node* child = user->InputAt(1); + // In Pointer Compression we might have a Compress node between an + // AllocateRaw and the value used as input. If so, we need to update + // child to point to the StoreField. + if (COMPRESS_POINTERS_BOOL && + IrOpcode::IsCompressOpcode(child->opcode())) { + child = child->InputAt(0); + } + if (child->opcode() == IrOpcode::kAllocateRaw && AllocationTypeOf(child->op()) == AllocationType::kYoung) { NodeProperties::ChangeOp(child, node->op()); @@ -291,13 +332,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, DCHECK_EQ(AllocationType::kYoung, allocation_type); for (Edge const edge : node->use_edges()) { Node* const user = edge.from(); - if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) { - Node* const parent = user->InputAt(0); - if (parent->opcode() == IrOpcode::kAllocateRaw && - AllocationTypeOf(parent->op()) == AllocationType::kOld) { - allocation_type = AllocationType::kOld; - break; - } + if (AllocationTypeNeedsUpdateToOld(user, edge)) { + allocation_type = AllocationType::kOld; + break; } } } @@ -523,16 +560,6 @@ void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) { EnqueueUses(node, state); } -void MemoryOptimizer::VisitCallWithCallerSavedRegisters( - Node* node, AllocationState const* state) { - DCHECK_EQ(IrOpcode::kCallWithCallerSavedRegisters, node->opcode()); - // If the call can allocate, we start with a fresh state. - if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) { - state = empty_state(); - } - EnqueueUses(node, state); -} - void MemoryOptimizer::VisitLoadElement(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kLoadElement, node->opcode()); @@ -540,9 +567,7 @@ void MemoryOptimizer::VisitLoadElement(Node* node, Node* index = node->InputAt(1); node->ReplaceInput(1, ComputeIndex(access, index)); MachineType type = access.machine_type; - if (NeedsPoisoning(access.load_sensitivity) && - type.representation() != MachineRepresentation::kTaggedPointer && - type.representation() != MachineRepresentation::kCompressedPointer) { + if (NeedsPoisoning(access.load_sensitivity)) { NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type)); } else { NodeProperties::ChangeOp(node, machine()->Load(type)); @@ -556,9 +581,7 @@ void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) { Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); node->InsertInput(graph()->zone(), 1, offset); MachineType type = access.machine_type; - if (NeedsPoisoning(access.load_sensitivity) && - type.representation() != MachineRepresentation::kTaggedPointer && - type.representation() != MachineRepresentation::kCompressedPointer) { + if (NeedsPoisoning(access.load_sensitivity)) { NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type)); } else { NodeProperties::ChangeOp(node, machine()->Load(type)); diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h index 71f33fa3d7..a663bf07ed 100644 --- a/deps/v8/src/compiler/memory-optimizer.h +++ b/deps/v8/src/compiler/memory-optimizer.h @@ -118,7 +118,6 @@ class MemoryOptimizer final { void VisitNode(Node*, AllocationState const*); void VisitAllocateRaw(Node*, AllocationState const*); void VisitCall(Node*, AllocationState const*); - void VisitCallWithCallerSavedRegisters(Node*, AllocationState const*); void VisitLoadFromObject(Node*, AllocationState const*); void VisitLoadElement(Node*, AllocationState const*); void VisitLoadField(Node*, AllocationState const*); @@ -142,6 +141,11 @@ class MemoryOptimizer final { bool NeedsPoisoning(LoadSensitivity load_sensitivity) const; + // Returns true if the AllocationType of the current AllocateRaw node that we + // are visiting needs to be updated to kOld, due to propagation of tenuring + // from outer to inner allocations. + bool AllocationTypeNeedsUpdateToOld(Node* const user, const Edge edge); + AllocationState const* empty_state() const { return empty_state_; } Graph* graph() const; Isolate* isolate() const; diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h index 7c0c702e3f..20698f4cd6 100644 --- a/deps/v8/src/compiler/node-matchers.h +++ b/deps/v8/src/compiler/node-matchers.h @@ -761,66 +761,6 @@ struct V8_EXPORT_PRIVATE DiamondMatcher Node* if_false_; }; -template <class BinopMatcher, IrOpcode::Value expected_opcode> -struct WasmStackCheckMatcher { - explicit WasmStackCheckMatcher(Node* compare) : compare_(compare) {} - - bool Matched() { - if (compare_->opcode() != expected_opcode) return false; - BinopMatcher m(compare_); - return MatchedInternal(m.left(), m.right()); - } - - private: - bool MatchedInternal(const typename BinopMatcher::LeftMatcher& l, - const typename BinopMatcher::RightMatcher& r) { - // In wasm, the stack check is performed by loading the value given by - // the address of a field stored in the instance object. That object is - // passed as a parameter. - if (l.IsLoad() && r.IsLoadStackPointer()) { - LoadMatcher<LoadMatcher<NodeMatcher>> mleft(l.node()); - if (mleft.object().IsLoad() && mleft.index().Is(0) && - mleft.object().object().IsParameter()) { - return true; - } - } - return false; - } - Node* compare_; -}; - -template <class BinopMatcher, IrOpcode::Value expected_opcode> -struct StackCheckMatcher { - StackCheckMatcher(Isolate* isolate, Node* compare) - : isolate_(isolate), compare_(compare) { - DCHECK_NOT_NULL(isolate); - } - bool Matched() { - // TODO(jgruber): Ideally, we could be more flexible here and also match the - // same pattern with switched operands (i.e.: left is LoadStackPointer and - // right is the js_stack_limit load). But to be correct in all cases, we'd - // then have to invert the outcome of the stack check comparison. - if (compare_->opcode() != expected_opcode) return false; - BinopMatcher m(compare_); - return MatchedInternal(m.left(), m.right()); - } - - private: - bool MatchedInternal(const typename BinopMatcher::LeftMatcher& l, - const typename BinopMatcher::RightMatcher& r) { - if (l.IsLoad() && r.IsLoadStackPointer()) { - LoadMatcher<ExternalReferenceMatcher> mleft(l.node()); - ExternalReference js_stack_limit = - ExternalReference::address_of_stack_limit(isolate_); - if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) return true; - } - return false; - } - - Isolate* isolate_; - Node* compare_; -}; - } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc index 1e00ec00f4..7ba3a59f6f 100644 --- a/deps/v8/src/compiler/node-properties.cc +++ b/deps/v8/src/compiler/node-properties.cc @@ -380,7 +380,10 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker, ObjectRef target = mtarget.Ref(broker); JSFunctionRef newtarget = mnewtarget.Ref(broker).AsJSFunction(); if (newtarget.map().has_prototype_slot() && newtarget.has_initial_map()) { - if (broker->mode() == JSHeapBroker::kSerializing) newtarget.Serialize(); + if (!newtarget.serialized()) { + TRACE_BROKER_MISSING(broker, "initial map on " << newtarget); + return base::nullopt; + } MapRef initial_map = newtarget.initial_map(); if (initial_map.GetConstructor().equals(target)) { DCHECK(target.AsJSFunction().map().is_constructor()); @@ -449,7 +452,7 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe( } case IrOpcode::kJSCreatePromise: { if (IsSame(receiver, effect)) { - *maps_return = ZoneHandleSet<Map>(broker->native_context() + *maps_return = ZoneHandleSet<Map>(broker->target_native_context() .promise_function() .initial_map() .object()); diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc index 7688379e9f..525ce33c84 100644 --- a/deps/v8/src/compiler/node.cc +++ b/deps/v8/src/compiler/node.cc @@ -22,10 +22,12 @@ Node::OutOfLineInputs* Node::OutOfLineInputs::New(Zone* zone, int capacity) { void Node::OutOfLineInputs::ExtractFrom(Use* old_use_ptr, Node** old_input_ptr, int count) { + DCHECK_GE(count, 0); // Extract the inputs from the old use and input pointers and copy them // to this out-of-line-storage. Use* new_use_ptr = reinterpret_cast<Use*>(this) - 1; Node** new_input_ptr = inputs(); + CHECK_IMPLIES(count > 0, Use::InputIndexField::is_valid(count - 1)); for (int current = 0; current < count; current++) { new_use_ptr->bit_field_ = Use::InputIndexField::encode(current) | Use::InlineField::encode(false); @@ -51,6 +53,8 @@ void Node::OutOfLineInputs::ExtractFrom(Use* old_use_ptr, Node** old_input_ptr, Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count, Node* const* inputs, bool has_extensible_inputs) { + DCHECK_GE(input_count, 0); + Node** input_ptr; Use* use_ptr; Node* node; @@ -102,6 +106,8 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count, } // Initialize the input pointers and the uses. + CHECK_IMPLIES(input_count > 0, + Use::InputIndexField::is_valid(input_count - 1)); for (int current = 0; current < input_count; ++current) { Node* to = *inputs++; input_ptr[current] = to; @@ -137,19 +143,20 @@ void Node::AppendInput(Zone* zone, Node* new_to) { DCHECK_NOT_NULL(zone); DCHECK_NOT_NULL(new_to); - int inline_count = InlineCountField::decode(bit_field_); - int inline_capacity = InlineCapacityField::decode(bit_field_); + int const inline_count = InlineCountField::decode(bit_field_); + int const inline_capacity = InlineCapacityField::decode(bit_field_); if (inline_count < inline_capacity) { // Append inline input. bit_field_ = InlineCountField::update(bit_field_, inline_count + 1); *GetInputPtr(inline_count) = new_to; Use* use = GetUsePtr(inline_count); + STATIC_ASSERT(InlineCapacityField::kMax <= Use::InputIndexField::kMax); use->bit_field_ = Use::InputIndexField::encode(inline_count) | Use::InlineField::encode(true); new_to->AppendUse(use); } else { // Append out-of-line input. - int input_count = InputCount(); + int const input_count = InputCount(); OutOfLineInputs* outline = nullptr; if (inline_count != kOutlineMarker) { // switch to out of line inputs. @@ -172,6 +179,7 @@ void Node::AppendInput(Zone* zone, Node* new_to) { outline->count_++; *GetInputPtr(input_count) = new_to; Use* use = GetUsePtr(input_count); + CHECK(Use::InputIndexField::is_valid(input_count)); use->bit_field_ = Use::InputIndexField::encode(input_count) | Use::InlineField::encode(false); new_to->AppendUse(use); @@ -336,9 +344,13 @@ Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity) bit_field_(IdField::encode(id) | InlineCountField::encode(inline_count) | InlineCapacityField::encode(inline_capacity)), first_use_(nullptr) { + // Check that the id didn't overflow. + STATIC_ASSERT(IdField::kMax < std::numeric_limits<NodeId>::max()); + CHECK(IdField::is_valid(id)); + // Inputs must either be out of line or within the inline capacity. - DCHECK_GE(kMaxInlineCapacity, inline_capacity); DCHECK(inline_count == kOutlineMarker || inline_count <= inline_capacity); + DCHECK_LE(inline_capacity, kMaxInlineCapacity); } diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h index d7daca38ef..76ea4bb1a9 100644 --- a/deps/v8/src/compiler/node.h +++ b/deps/v8/src/compiler/node.h @@ -201,9 +201,7 @@ class V8_EXPORT_PRIVATE Node final { } using InlineField = BitField<bool, 0, 1>; - using InputIndexField = BitField<unsigned, 1, 17>; - // Leaving some space in the bitset in case we ever decide to record - // the output index. + using InputIndexField = BitField<unsigned, 1, 31>; }; //============================================================================ @@ -291,7 +289,6 @@ class V8_EXPORT_PRIVATE Node final { using InlineCountField = BitField<unsigned, 24, 4>; using InlineCapacityField = BitField<unsigned, 28, 4>; static const int kOutlineMarker = InlineCountField::kMax; - static const int kMaxInlineCount = InlineCountField::kMax - 1; static const int kMaxInlineCapacity = InlineCapacityField::kMax - 1; const Operator* op_; diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h index d621e23e3a..fe45d9276a 100644 --- a/deps/v8/src/compiler/opcodes.h +++ b/deps/v8/src/compiler/opcodes.h @@ -66,7 +66,6 @@ V(ObjectId) \ V(TypedObjectState) \ V(Call) \ - V(CallWithCallerSavedRegisters) \ V(Parameter) \ V(OsrValue) \ V(LoopExit) \ @@ -204,6 +203,7 @@ V(JSForInEnumerate) \ V(JSForInNext) \ V(JSForInPrepare) \ + V(JSGetIterator) \ V(JSLoadMessage) \ V(JSStoreMessage) \ V(JSLoadModule) \ @@ -617,15 +617,33 @@ V(Float64Mod) \ V(Float64Pow) -#define MACHINE_WORD64_ATOMIC_OP_LIST(V) \ - V(Word64AtomicLoad) \ - V(Word64AtomicStore) \ - V(Word64AtomicAdd) \ - V(Word64AtomicSub) \ - V(Word64AtomicAnd) \ - V(Word64AtomicOr) \ - V(Word64AtomicXor) \ - V(Word64AtomicExchange) \ +#define MACHINE_ATOMIC_OP_LIST(V) \ + V(Word32AtomicLoad) \ + V(Word32AtomicStore) \ + V(Word32AtomicExchange) \ + V(Word32AtomicCompareExchange) \ + V(Word32AtomicAdd) \ + V(Word32AtomicSub) \ + V(Word32AtomicAnd) \ + V(Word32AtomicOr) \ + V(Word32AtomicXor) \ + V(Word32AtomicPairLoad) \ + V(Word32AtomicPairStore) \ + V(Word32AtomicPairAdd) \ + V(Word32AtomicPairSub) \ + V(Word32AtomicPairAnd) \ + V(Word32AtomicPairOr) \ + V(Word32AtomicPairXor) \ + V(Word32AtomicPairExchange) \ + V(Word32AtomicPairCompareExchange) \ + V(Word64AtomicLoad) \ + V(Word64AtomicStore) \ + V(Word64AtomicAdd) \ + V(Word64AtomicSub) \ + V(Word64AtomicAnd) \ + V(Word64AtomicOr) \ + V(Word64AtomicXor) \ + V(Word64AtomicExchange) \ V(Word64AtomicCompareExchange) #define MACHINE_OP_LIST(V) \ @@ -637,7 +655,7 @@ MACHINE_FLOAT32_UNOP_LIST(V) \ MACHINE_FLOAT64_BINOP_LIST(V) \ MACHINE_FLOAT64_UNOP_LIST(V) \ - MACHINE_WORD64_ATOMIC_OP_LIST(V) \ + MACHINE_ATOMIC_OP_LIST(V) \ V(AbortCSAAssert) \ V(DebugBreak) \ V(Comment) \ @@ -656,6 +674,8 @@ V(BitcastTaggedSignedToWord) \ V(BitcastWordToTagged) \ V(BitcastWordToTaggedSigned) \ + V(BitcastWord32ToCompressedSigned) \ + V(BitcastCompressedSignedToWord32) \ V(TruncateFloat64ToWord32) \ V(ChangeFloat32ToFloat64) \ V(ChangeFloat64ToInt32) \ @@ -702,7 +722,6 @@ V(TaggedPoisonOnSpeculation) \ V(Word32PoisonOnSpeculation) \ V(Word64PoisonOnSpeculation) \ - V(LoadStackPointer) \ V(LoadFramePointer) \ V(LoadParentFramePointer) \ V(UnalignedLoad) \ @@ -716,30 +735,13 @@ V(ProtectedLoad) \ V(ProtectedStore) \ V(MemoryBarrier) \ - V(Word32AtomicLoad) \ - V(Word32AtomicStore) \ - V(Word32AtomicExchange) \ - V(Word32AtomicCompareExchange) \ - V(Word32AtomicAdd) \ - V(Word32AtomicSub) \ - V(Word32AtomicAnd) \ - V(Word32AtomicOr) \ - V(Word32AtomicXor) \ - V(Word32AtomicPairLoad) \ - V(Word32AtomicPairStore) \ - V(Word32AtomicPairAdd) \ - V(Word32AtomicPairSub) \ - V(Word32AtomicPairAnd) \ - V(Word32AtomicPairOr) \ - V(Word32AtomicPairXor) \ - V(Word32AtomicPairExchange) \ - V(Word32AtomicPairCompareExchange) \ V(SignExtendWord8ToInt32) \ V(SignExtendWord16ToInt32) \ V(SignExtendWord8ToInt64) \ V(SignExtendWord16ToInt64) \ V(SignExtendWord32ToInt64) \ - V(UnsafePointerAdd) + V(UnsafePointerAdd) \ + V(StackPointerGreaterThan) #define MACHINE_SIMD_OP_LIST(V) \ V(F64x2Splat) \ @@ -747,6 +749,12 @@ V(F64x2ReplaceLane) \ V(F64x2Abs) \ V(F64x2Neg) \ + V(F64x2Add) \ + V(F64x2Sub) \ + V(F64x2Mul) \ + V(F64x2Div) \ + V(F64x2Min) \ + V(F64x2Max) \ V(F64x2Eq) \ V(F64x2Ne) \ V(F64x2Lt) \ @@ -764,6 +772,7 @@ V(F32x4AddHoriz) \ V(F32x4Sub) \ V(F32x4Mul) \ + V(F32x4Div) \ V(F32x4Min) \ V(F32x4Max) \ V(F32x4Eq) \ @@ -781,11 +790,15 @@ V(I64x2Add) \ V(I64x2Sub) \ V(I64x2Mul) \ + V(I64x2MinS) \ + V(I64x2MaxS) \ V(I64x2Eq) \ V(I64x2Ne) \ V(I64x2GtS) \ V(I64x2GeS) \ V(I64x2ShrU) \ + V(I64x2MinU) \ + V(I64x2MaxU) \ V(I64x2GtU) \ V(I64x2GeU) \ V(I32x4Splat) \ diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc index 959e743369..1fcc12291d 100644 --- a/deps/v8/src/compiler/operator-properties.cc +++ b/deps/v8/src/compiler/operator-properties.cc @@ -54,6 +54,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) { case IrOpcode::kJSStackCheck: case IrOpcode::kJSStoreGlobal: case IrOpcode::kJSStoreMessage: + case IrOpcode::kJSGetIterator: return false; case IrOpcode::kJSCallRuntime: @@ -237,6 +238,9 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) { case IrOpcode::kJSPerformPromiseThen: case IrOpcode::kJSObjectIsArray: case IrOpcode::kJSRegExpTest: + + // Iterator protocol operations + case IrOpcode::kJSGetIterator: return true; default: diff --git a/deps/v8/src/compiler/per-isolate-compiler-cache.h b/deps/v8/src/compiler/per-isolate-compiler-cache.h index b715950c0c..f4f7438128 100644 --- a/deps/v8/src/compiler/per-isolate-compiler-cache.h +++ b/deps/v8/src/compiler/per-isolate-compiler-cache.h @@ -19,41 +19,41 @@ namespace compiler { class ObjectData; -// This class serves as a per-isolate container of data that should be -// persisted between compiler runs. For now it stores the code builtins -// so they are not serialized on each compiler run. +// This class serves as a container of data that should persist across all +// (optimizing) compiler runs in an isolate. For now it stores serialized data +// for various common objects such as builtins, so that these objects don't have +// to be serialized in each compilation job. See JSHeapBroker::InitializeRefsMap +// for details. class PerIsolateCompilerCache : public ZoneObject { public: explicit PerIsolateCompilerCache(Zone* zone) : zone_(zone), refs_snapshot_(nullptr) {} - RefsMap* GetSnapshot() { return refs_snapshot_; } + bool HasSnapshot() const { return refs_snapshot_ != nullptr; } + RefsMap* GetSnapshot() { + DCHECK(HasSnapshot()); + return refs_snapshot_; + } void SetSnapshot(RefsMap* refs) { - DCHECK_NULL(refs_snapshot_); + DCHECK(!HasSnapshot()); DCHECK(!refs->IsEmpty()); refs_snapshot_ = new (zone_) RefsMap(refs, zone_); + DCHECK(HasSnapshot()); } - bool HasSnapshot() const { return refs_snapshot_; } - Zone* zone() const { return zone_; } static void Setup(Isolate* isolate) { - if (isolate->compiler_cache()) return; - - // The following zone is supposed to contain compiler-related objects - // that should live through all compilations, as opposed to the - // broker_zone which holds per-compilation data. It's not meant for - // per-compilation or heap broker data. - Zone* compiler_zone = new Zone(isolate->allocator(), "Compiler zone"); - PerIsolateCompilerCache* compiler_cache = - new (compiler_zone) PerIsolateCompilerCache(compiler_zone); - isolate->set_compiler_utils(compiler_cache, compiler_zone); + if (isolate->compiler_cache() == nullptr) { + Zone* zone = new Zone(isolate->allocator(), "Compiler zone"); + PerIsolateCompilerCache* cache = new (zone) PerIsolateCompilerCache(zone); + isolate->set_compiler_utils(cache, zone); + } + DCHECK_NOT_NULL(isolate->compiler_cache()); } private: Zone* const zone_; - RefsMap* refs_snapshot_; }; diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc index eb060b71e1..8b2f424789 100644 --- a/deps/v8/src/compiler/pipeline.cc +++ b/deps/v8/src/compiler/pipeline.cc @@ -110,6 +110,9 @@ class PipelineData { may_have_unverifiable_graph_(false), zone_stats_(zone_stats), pipeline_statistics_(pipeline_statistics), + roots_relative_addressing_enabled_( + !isolate->serializer_enabled() && + !isolate->IsGeneratingEmbeddedBuiltins()), graph_zone_scope_(zone_stats_, ZONE_NAME), graph_zone_(graph_zone_scope_.zone()), instruction_zone_scope_(zone_stats_, ZONE_NAME), @@ -173,12 +176,12 @@ class PipelineData { // For CodeStubAssembler and machine graph testing entry point. PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info, - Isolate* isolate, Graph* graph, Schedule* schedule, - SourcePositionTable* source_positions, + Isolate* isolate, AccountingAllocator* allocator, Graph* graph, + Schedule* schedule, SourcePositionTable* source_positions, NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt, const AssemblerOptions& assembler_options) : isolate_(isolate), - allocator_(isolate->allocator()), + allocator_(allocator), info_(info), debug_name_(info_->GetDebugName()), zone_stats_(zone_stats), @@ -320,6 +323,13 @@ class PipelineData { return assembler_options_; } + size_t* address_of_max_unoptimized_frame_height() { + return &max_unoptimized_frame_height_; + } + size_t max_unoptimized_frame_height() const { + return max_unoptimized_frame_height_; + } + CodeTracer* GetCodeTracer() const { return wasm_engine_ == nullptr ? isolate_->GetCodeTracer() : wasm_engine_->GetCodeTracer(); @@ -434,7 +444,8 @@ class PipelineData { codegen_zone(), frame(), linkage, sequence(), info(), isolate(), osr_helper_, start_source_position_, jump_optimization_info_, info()->GetPoisoningMitigationLevel(), assembler_options_, - info_->builtin_index(), std::move(buffer)); + info_->builtin_index(), max_unoptimized_frame_height(), + std::move(buffer)); } void BeginPhaseKind(const char* phase_kind_name) { @@ -451,6 +462,10 @@ class PipelineData { const char* debug_name() const { return debug_name_.get(); } + bool roots_relative_addressing_enabled() { + return roots_relative_addressing_enabled_; + } + private: Isolate* const isolate_; wasm::WasmEngine* const wasm_engine_ = nullptr; @@ -468,6 +483,7 @@ class PipelineData { CodeGenerator* code_generator_ = nullptr; Typer* typer_ = nullptr; Typer::Flags typer_flags_ = Typer::kNoFlags; + bool roots_relative_addressing_enabled_ = false; // All objects in the following group of fields are allocated in graph_zone_. // They are all set to nullptr when the graph_zone_ is destroyed. @@ -516,6 +532,11 @@ class PipelineData { JumpOptimizationInfo* jump_optimization_info_ = nullptr; AssemblerOptions assembler_options_; + // The maximal combined height of all inlined frames in their unoptimized + // state. Calculated during instruction selection, applied during code + // generation. + size_t max_unoptimized_frame_height_ = 0; + DISALLOW_COPY_AND_ASSIGN(PipelineData); }; @@ -893,9 +914,7 @@ PipelineCompilationJob::PipelineCompilationJob( // Note that the OptimizedCompilationInfo is not initialized at the time // we pass it to the CompilationJob constructor, but it is not // dereferenced there. - : OptimizedCompilationJob( - function->GetIsolate()->stack_guard()->real_climit(), - &compilation_info_, "TurboFan"), + : OptimizedCompilationJob(&compilation_info_, "TurboFan"), zone_(function->GetIsolate()->allocator(), ZONE_NAME), zone_stats_(function->GetIsolate()->allocator()), compilation_info_(&zone_, function->GetIsolate(), shared_info, function), @@ -973,11 +992,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( linkage_ = new (compilation_info()->zone()) Linkage( Linkage::ComputeIncoming(compilation_info()->zone(), compilation_info())); - if (!pipeline_.CreateGraph()) { - if (isolate->has_pending_exception()) return FAILED; // Stack overflowed. - return AbortOptimization(BailoutReason::kGraphBuildingFailed); - } - if (compilation_info()->is_osr()) data_.InitializeOsrHelper(); // Make sure that we have generated the deopt entries code. This is in order @@ -985,6 +999,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( // assembly. Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate); + if (!pipeline_.CreateGraph()) { + CHECK(!isolate->has_pending_exception()); + return AbortOptimization(BailoutReason::kGraphBuildingFailed); + } + return SUCCEEDED; } @@ -1048,7 +1067,8 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode( class WasmHeapStubCompilationJob final : public OptimizedCompilationJob { public: - WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor, + WasmHeapStubCompilationJob(Isolate* isolate, wasm::WasmEngine* wasm_engine, + CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph, Code::Kind kind, std::unique_ptr<char[]> debug_name, @@ -1057,17 +1077,19 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob { // Note that the OptimizedCompilationInfo is not initialized at the time // we pass it to the CompilationJob constructor, but it is not // dereferenced there. - : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_, - "TurboFan"), + : OptimizedCompilationJob(&info_, "TurboFan", + CompilationJob::State::kReadyToExecute), debug_name_(std::move(debug_name)), info_(CStrVector(debug_name_.get()), graph->zone(), kind), call_descriptor_(call_descriptor), - zone_stats_(isolate->allocator()), + zone_stats_(zone->allocator()), zone_(std::move(zone)), graph_(graph), - data_(&zone_stats_, &info_, isolate, graph_, nullptr, source_positions, + data_(&zone_stats_, &info_, isolate, wasm_engine->allocator(), graph_, + nullptr, source_positions, new (zone_.get()) NodeOriginTable(graph_), nullptr, options), - pipeline_(&data_) {} + pipeline_(&data_), + wasm_engine_(wasm_engine) {} ~WasmHeapStubCompilationJob() = default; @@ -1085,30 +1107,33 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob { Graph* graph_; PipelineData data_; PipelineImpl pipeline_; + wasm::WasmEngine* wasm_engine_; DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob); }; // static std::unique_ptr<OptimizedCompilationJob> -Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate, - CallDescriptor* call_descriptor, - std::unique_ptr<Zone> zone, - Graph* graph, Code::Kind kind, - std::unique_ptr<char[]> debug_name, - const AssemblerOptions& options, - SourcePositionTable* source_positions) { +Pipeline::NewWasmHeapStubCompilationJob( + Isolate* isolate, wasm::WasmEngine* wasm_engine, + CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph, + Code::Kind kind, std::unique_ptr<char[]> debug_name, + const AssemblerOptions& options, SourcePositionTable* source_positions) { return base::make_unique<WasmHeapStubCompilationJob>( - isolate, call_descriptor, std::move(zone), graph, kind, + isolate, wasm_engine, call_descriptor, std::move(zone), graph, kind, std::move(debug_name), options, source_positions); } CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl( Isolate* isolate) { + UNREACHABLE(); +} + +CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() { std::unique_ptr<PipelineStatistics> pipeline_statistics; if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) { pipeline_statistics.reset(new PipelineStatistics( - &info_, isolate->GetTurboStatistics(), &zone_stats_)); + &info_, wasm_engine_->GetOrCreateTurboStatistics(), &zone_stats_)); pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen"); } if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) { @@ -1130,10 +1155,6 @@ CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl( << "\", \"source\":\"\",\n\"phases\":["; } pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true); - return CompilationJob::SUCCEEDED; -} - -CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() { pipeline_.ComputeScheduledGraph(); if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) { return CompilationJob::SUCCEEDED; @@ -1144,8 +1165,11 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() { CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl( Isolate* isolate) { Handle<Code> code; - if (pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code) && - pipeline_.CommitDependencies(code)) { + if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) { + V8::FatalProcessOutOfMemory(isolate, + "WasmHeapStubCompilationJob::FinalizeJobImpl"); + } + if (pipeline_.CommitDependencies(code)) { info_.SetCode(code); #ifdef ENABLE_DISASSEMBLER if (FLAG_print_opt_code) { @@ -1177,14 +1201,14 @@ struct GraphBuilderPhase { if (data->info()->is_bailout_on_uninitialized()) { flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized; } + + JSFunctionRef closure(data->broker(), data->info()->closure()); CallFrequency frequency(1.0f); BuildGraphFromBytecode( - data->broker(), temp_zone, data->info()->bytecode_array(), - data->info()->shared_info(), - handle(data->info()->closure()->feedback_vector(), data->isolate()), + data->broker(), temp_zone, closure.shared(), closure.feedback_vector(), data->info()->osr_offset(), data->jsgraph(), frequency, - data->source_positions(), data->native_context(), - SourcePosition::kNotInlined, flags, &data->info()->tick_counter()); + data->source_positions(), SourcePosition::kNotInlined, flags, + &data->info()->tick_counter()); } }; @@ -1253,14 +1277,15 @@ struct InliningPhase { // that need to live until code generation. JSNativeContextSpecialization native_context_specialization( &graph_reducer, data->jsgraph(), data->broker(), flags, - data->native_context(), data->dependencies(), temp_zone, info->zone()); + data->dependencies(), temp_zone, info->zone()); JSInliningHeuristic inlining(&graph_reducer, data->info()->is_inlining_enabled() ? JSInliningHeuristic::kGeneralInlining : JSInliningHeuristic::kRestrictedInlining, temp_zone, data->info(), data->jsgraph(), data->broker(), data->source_positions()); - JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph()); + JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(), + data->broker()); AddReducer(data, &graph_reducer, &dead_code_elimination); AddReducer(data, &graph_reducer, &checkpoint_elimination); AddReducer(data, &graph_reducer, &common_reducer); @@ -1323,11 +1348,11 @@ struct UntyperPhase { } }; -struct SerializeStandardObjectsPhase { - static const char* phase_name() { return "V8.TFSerializeStandardObjects"; } +struct HeapBrokerInitializationPhase { + static const char* phase_name() { return "V8.TFHeapBrokerInitialization"; } void Run(PipelineData* data, Zone* temp_zone) { - data->broker()->SerializeStandardObjects(); + data->broker()->InitializeAndStartSerializing(data->native_context()); } }; @@ -1349,11 +1374,8 @@ struct CopyMetadataForConcurrentCompilePhase { } }; -// TODO(turbofan): Move all calls from CopyMetaDataForConcurrentCompilePhase -// here. Also all the calls to Serialize* methods that are currently sprinkled -// over inlining will move here as well. struct SerializationPhase { - static const char* phase_name() { return "V8.TFSerializeBytecode"; } + static const char* phase_name() { return "V8.TFSerialization"; } void Run(PipelineData* data, Zone* temp_zone) { SerializerForBackgroundCompilationFlags flags; @@ -1488,7 +1510,8 @@ struct GenericLoweringPhase { GraphReducer graph_reducer(temp_zone, data->graph(), &data->info()->tick_counter(), data->jsgraph()->Dead()); - JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer); + JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer, + data->broker()); AddReducer(data, &graph_reducer, &generic_lowering); graph_reducer.ReduceGraph(); } @@ -1613,7 +1636,8 @@ struct LoadEliminationPhase { &data->info()->tick_counter(), data->jsgraph()->Dead()); BranchElimination branch_condition_elimination(&graph_reducer, - data->jsgraph(), temp_zone); + data->jsgraph(), temp_zone, + BranchElimination::kEARLY); DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), data->common(), temp_zone); RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone); @@ -1849,6 +1873,7 @@ struct InstructionSelectionPhase { ? InstructionSelector::kEnableSwitchJumpTable : InstructionSelector::kDisableSwitchJumpTable, &data->info()->tick_counter(), + data->address_of_max_unoptimized_frame_height(), data->info()->is_source_positions_enabled() ? InstructionSelector::kAllSourcePositions : InstructionSelector::kCallSourcePositions, @@ -1856,10 +1881,9 @@ struct InstructionSelectionPhase { FLAG_turbo_instruction_scheduling ? InstructionSelector::kEnableScheduling : InstructionSelector::kDisableScheduling, - !data->isolate() || data->isolate()->serializer_enabled() || - data->isolate()->IsGeneratingEmbeddedBuiltins() - ? InstructionSelector::kDisableRootsRelativeAddressing - : InstructionSelector::kEnableRootsRelativeAddressing, + data->roots_relative_addressing_enabled() + ? InstructionSelector::kEnableRootsRelativeAddressing + : InstructionSelector::kDisableRootsRelativeAddressing, data->info()->GetPoisoningMitigationLevel(), data->info()->trace_turbo_json_enabled() ? InstructionSelector::kEnableTraceTurboJson @@ -2175,12 +2199,10 @@ bool PipelineImpl::CreateGraph() { data->node_origins()->AddDecorator(); } + data->broker()->SetTargetNativeContextRef(data->native_context()); if (FLAG_concurrent_inlining) { - data->broker()->StartSerializing(); - Run<SerializeStandardObjectsPhase>(); + Run<HeapBrokerInitializationPhase>(); Run<SerializationPhase>(); - } else { - data->broker()->SetNativeContextRef(); } Run<GraphBuilderPhase>(); @@ -2219,8 +2241,7 @@ bool PipelineImpl::CreateGraph() { Run<CopyMetadataForConcurrentCompilePhase>(); data->broker()->StopSerializing(); } else { - data->broker()->StartSerializing(); - Run<SerializeStandardObjectsPhase>(); + Run<HeapBrokerInitializationPhase>(); Run<CopyMetadataForConcurrentCompilePhase>(); data->broker()->StopSerializing(); } @@ -2356,8 +2377,8 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub( JumpOptimizationInfo jump_opt; bool should_optimize_jumps = isolate->serializer_enabled() && FLAG_turbo_rewrite_far_jumps; - PipelineData data(&zone_stats, &info, isolate, graph, nullptr, - source_positions, &node_origins, + PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph, + nullptr, source_positions, &node_origins, should_optimize_jumps ? &jump_opt : nullptr, options); data.set_verify_graph(FLAG_verify_csa); std::unique_ptr<PipelineStatistics> pipeline_statistics; @@ -2402,10 +2423,10 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub( // First run code generation on a copy of the pipeline, in order to be able to // repeat it for jump optimization. The first run has to happen on a temporary // pipeline to avoid deletion of zones on the main pipeline. - PipelineData second_data(&zone_stats, &info, isolate, data.graph(), - data.schedule(), data.source_positions(), - data.node_origins(), data.jump_optimization_info(), - options); + PipelineData second_data(&zone_stats, &info, isolate, isolate->allocator(), + data.graph(), data.schedule(), + data.source_positions(), data.node_origins(), + data.jump_optimization_info(), options); second_data.set_verify_graph(FLAG_verify_csa); PipelineImpl second_pipeline(&second_data); second_pipeline.SelectInstructionsAndAssemble(call_descriptor); @@ -2421,6 +2442,23 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub( return code; } +struct BlockStartsAsJSON { + const ZoneVector<int>* block_starts; +}; + +std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) { + out << ", \"blockIdToOffset\": {"; + bool need_comma = false; + for (size_t i = 0; i < s.block_starts->size(); ++i) { + if (need_comma) out << ", "; + int offset = (*s.block_starts)[i]; + out << "\"" << i << "\":" << offset; + need_comma = true; + } + out << "},"; + return out; +} + // static wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( wasm::WasmEngine* wasm_engine, CallDescriptor* call_descriptor, @@ -2491,7 +2529,9 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( if (info.trace_turbo_json_enabled()) { TurboJsonFile json_of(&info, std::ios_base::app); - json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\""; + json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\"" + << BlockStartsAsJSON{&code_generator->block_starts()} + << "\"data\":\""; #ifdef ENABLE_DISASSEMBLER std::stringstream disassembler_stream; Disassembler::Decode( @@ -2551,8 +2591,8 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting( // Construct a pipeline for scheduling and code generation. ZoneStats zone_stats(isolate->allocator()); NodeOriginTable* node_positions = new (info->zone()) NodeOriginTable(graph); - PipelineData data(&zone_stats, info, isolate, graph, schedule, nullptr, - node_positions, nullptr, options); + PipelineData data(&zone_stats, info, isolate, isolate->allocator(), graph, + schedule, nullptr, node_positions, nullptr, options); std::unique_ptr<PipelineStatistics> pipeline_statistics; if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) { pipeline_statistics.reset(new PipelineStatistics( @@ -2684,7 +2724,9 @@ void Pipeline::GenerateCodeForWasmFunction( if (data.info()->trace_turbo_json_enabled()) { TurboJsonFile json_of(data.info(), std::ios_base::app); - json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\""; + json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\"" + << BlockStartsAsJSON{&code_generator->block_starts()} + << "\"data\":\""; #ifdef ENABLE_DISASSEMBLER std::stringstream disassembler_stream; Disassembler::Decode( @@ -2888,7 +2930,7 @@ void PipelineImpl::VerifyGeneratedCodeIsIdempotent() { } struct InstructionStartsAsJSON { - const ZoneVector<int>* instr_starts; + const ZoneVector<TurbolizerInstructionStartInfo>* instr_starts; }; std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) { @@ -2896,14 +2938,39 @@ std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) { bool need_comma = false; for (size_t i = 0; i < s.instr_starts->size(); ++i) { if (need_comma) out << ", "; - int offset = (*s.instr_starts)[i]; - out << "\"" << i << "\":" << offset; + const TurbolizerInstructionStartInfo& info = (*s.instr_starts)[i]; + out << "\"" << i << "\": {"; + out << "\"gap\": " << info.gap_pc_offset; + out << ", \"arch\": " << info.arch_instr_pc_offset; + out << ", \"condition\": " << info.condition_pc_offset; + out << "}"; need_comma = true; } out << "}"; return out; } +struct TurbolizerCodeOffsetsInfoAsJSON { + const TurbolizerCodeOffsetsInfo* offsets_info; +}; + +std::ostream& operator<<(std::ostream& out, + const TurbolizerCodeOffsetsInfoAsJSON& s) { + out << ", \"codeOffsetsInfo\": {"; + out << "\"codeStartRegisterCheck\": " + << s.offsets_info->code_start_register_check << ", "; + out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", "; + out << "\"initPoison\": " << s.offsets_info->init_poison << ", "; + out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", "; + out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", "; + out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits + << ", "; + out << "\"pools\": " << s.offsets_info->pools << ", "; + out << "\"jumpTables\": " << s.offsets_info->jump_tables; + out << "}"; + return out; +} + void PipelineImpl::AssembleCode(Linkage* linkage, std::unique_ptr<AssemblerBuffer> buffer) { PipelineData* data = this->data_; @@ -2915,30 +2982,15 @@ void PipelineImpl::AssembleCode(Linkage* linkage, TurboJsonFile json_of(data->info(), std::ios_base::app); json_of << "{\"name\":\"code generation\"" << ", \"type\":\"instructions\"" - << InstructionStartsAsJSON{&data->code_generator()->instr_starts()}; + << InstructionStartsAsJSON{&data->code_generator()->instr_starts()} + << TurbolizerCodeOffsetsInfoAsJSON{ + &data->code_generator()->offsets_info()}; json_of << "},\n"; } data->DeleteInstructionZone(); data->EndPhaseKind(); } -struct BlockStartsAsJSON { - const ZoneVector<int>* block_starts; -}; - -std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) { - out << ", \"blockIdToOffset\": {"; - bool need_comma = false; - for (size_t i = 0; i < s.block_starts->size(); ++i) { - if (need_comma) out << ", "; - int offset = (*s.block_starts)[i]; - out << "\"" << i << "\":" << offset; - need_comma = true; - } - out << "},"; - return out; -} - MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) { PipelineData* data = this->data_; if (data->broker() && retire_broker) { diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h index 6898faaad0..3707bfb06e 100644 --- a/deps/v8/src/compiler/pipeline.h +++ b/deps/v8/src/compiler/pipeline.h @@ -61,9 +61,10 @@ class Pipeline : public AllStatic { // Returns a new compilation job for a wasm heap stub. static std::unique_ptr<OptimizedCompilationJob> NewWasmHeapStubCompilationJob( - Isolate* isolate, CallDescriptor* call_descriptor, - std::unique_ptr<Zone> zone, Graph* graph, Code::Kind kind, - std::unique_ptr<char[]> debug_name, const AssemblerOptions& options, + Isolate* isolate, wasm::WasmEngine* wasm_engine, + CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph, + Code::Kind kind, std::unique_ptr<char[]> debug_name, + const AssemblerOptions& options, SourcePositionTable* source_positions = nullptr); // Run the pipeline on a machine graph and generate code. diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h new file mode 100644 index 0000000000..17829863de --- /dev/null +++ b/deps/v8/src/compiler/processed-feedback.h @@ -0,0 +1,226 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_PROCESSED_FEEDBACK_H_ +#define V8_COMPILER_PROCESSED_FEEDBACK_H_ + +#include "src/compiler/heap-refs.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class BinaryOperationFeedback; +class CallFeedback; +class CompareOperationFeedback; +class ElementAccessFeedback; +class ForInFeedback; +class GlobalAccessFeedback; +class InstanceOfFeedback; +class NamedAccessFeedback; + +class ProcessedFeedback : public ZoneObject { + public: + enum Kind { + kInsufficient, + kBinaryOperation, + kCall, + kCompareOperation, + kElementAccess, + kForIn, + kGlobalAccess, + kInstanceOf, + kNamedAccess, + }; + Kind kind() const { return kind_; } + + FeedbackSlotKind slot_kind() const { return slot_kind_; } + bool IsInsufficient() const { return kind() == kInsufficient; } + + BinaryOperationFeedback const& AsBinaryOperation() const; + CallFeedback const& AsCall() const; + CompareOperationFeedback const& AsCompareOperation() const; + ElementAccessFeedback const& AsElementAccess() const; + ForInFeedback const& AsForIn() const; + GlobalAccessFeedback const& AsGlobalAccess() const; + InstanceOfFeedback const& AsInstanceOf() const; + NamedAccessFeedback const& AsNamedAccess() const; + + protected: + ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind); + + private: + Kind const kind_; + FeedbackSlotKind const slot_kind_; +}; + +class InsufficientFeedback final : public ProcessedFeedback { + public: + explicit InsufficientFeedback(FeedbackSlotKind slot_kind); +}; + +class GlobalAccessFeedback : public ProcessedFeedback { + public: + GlobalAccessFeedback(PropertyCellRef cell, FeedbackSlotKind slot_kind); + GlobalAccessFeedback(ContextRef script_context, int slot_index, + bool immutable, FeedbackSlotKind slot_kind); + explicit GlobalAccessFeedback(FeedbackSlotKind slot_kind); // Megamorphic + + bool IsMegamorphic() const; + + bool IsPropertyCell() const; + PropertyCellRef property_cell() const; + + bool IsScriptContextSlot() const; + ContextRef script_context() const; + int slot_index() const; + bool immutable() const; + + base::Optional<ObjectRef> GetConstantHint() const; + + private: + base::Optional<ObjectRef> const cell_or_context_; + int const index_and_immutable_; +}; + +class KeyedAccessMode { + public: + static KeyedAccessMode FromNexus(FeedbackNexus const& nexus); + + AccessMode access_mode() const; + bool IsLoad() const; + bool IsStore() const; + KeyedAccessLoadMode load_mode() const; + KeyedAccessStoreMode store_mode() const; + + private: + AccessMode const access_mode_; + union LoadStoreMode { + LoadStoreMode(KeyedAccessLoadMode load_mode); + LoadStoreMode(KeyedAccessStoreMode store_mode); + KeyedAccessLoadMode load_mode; + KeyedAccessStoreMode store_mode; + } const load_store_mode_; + + KeyedAccessMode(AccessMode access_mode, KeyedAccessLoadMode load_mode); + KeyedAccessMode(AccessMode access_mode, KeyedAccessStoreMode store_mode); +}; + +class ElementAccessFeedback : public ProcessedFeedback { + public: + ElementAccessFeedback(Zone* zone, KeyedAccessMode const& keyed_mode, + FeedbackSlotKind slot_kind); + + KeyedAccessMode keyed_mode() const; + + // A transition group is a target and a possibly empty set of sources that can + // transition to the target. It is represented as a non-empty vector with the + // target at index 0. + using TransitionGroup = ZoneVector<Handle<Map>>; + ZoneVector<TransitionGroup> const& transition_groups() const; + + bool HasOnlyStringMaps(JSHeapBroker* broker) const; + + void AddGroup(TransitionGroup&& group); + + // Refine {this} by trying to restrict it to the maps in {inferred_maps}. A + // transition group's target is kept iff it is in {inferred_maps} or if more + // than one of its sources is in {inferred_maps}. Here's an (unrealistic) + // example showing all the possible situations: + // + // inferred_maps = [a0, a2, c1, c2, d1, e0, e1] + // + // Groups before: Groups after: + // [a0, a1, a2] [a0, a2] + // [b0] + // [c0, c1, c2, c3] [c0, c1, c2] + // [d0, d1] [d1] + // [e0, e1] [e0, e1] + // + ElementAccessFeedback const& Refine( + ZoneVector<Handle<Map>> const& inferred_maps, Zone* zone) const; + + private: + KeyedAccessMode const keyed_mode_; + ZoneVector<TransitionGroup> transition_groups_; +}; + +class NamedAccessFeedback : public ProcessedFeedback { + public: + NamedAccessFeedback(NameRef const& name, ZoneVector<Handle<Map>> const& maps, + FeedbackSlotKind slot_kind); + + NameRef const& name() const { return name_; } + ZoneVector<Handle<Map>> const& maps() const { return maps_; } + + private: + NameRef const name_; + ZoneVector<Handle<Map>> const maps_; +}; + +class CallFeedback : public ProcessedFeedback { + public: + CallFeedback(base::Optional<HeapObjectRef> target, float frequency, + SpeculationMode mode, FeedbackSlotKind slot_kind) + : ProcessedFeedback(kCall, slot_kind), + target_(target), + frequency_(frequency), + mode_(mode) {} + + base::Optional<HeapObjectRef> target() const { return target_; } + float frequency() const { return frequency_; } + SpeculationMode speculation_mode() const { return mode_; } + + private: + base::Optional<HeapObjectRef> const target_; + float const frequency_; + SpeculationMode const mode_; +}; + +template <class T, ProcessedFeedback::Kind K> +class SingleValueFeedback : public ProcessedFeedback { + public: + explicit SingleValueFeedback(T value, FeedbackSlotKind slot_kind) + : ProcessedFeedback(K, slot_kind), value_(value) { + DCHECK( + (K == kBinaryOperation && slot_kind == FeedbackSlotKind::kBinaryOp) || + (K == kCompareOperation && slot_kind == FeedbackSlotKind::kCompareOp) || + (K == kForIn && slot_kind == FeedbackSlotKind::kForIn) || + (K == kInstanceOf && slot_kind == FeedbackSlotKind::kInstanceOf)); + } + + T value() const { return value_; } + + private: + T const value_; +}; + +class InstanceOfFeedback + : public SingleValueFeedback<base::Optional<JSObjectRef>, + ProcessedFeedback::kInstanceOf> { + using SingleValueFeedback::SingleValueFeedback; +}; + +class BinaryOperationFeedback + : public SingleValueFeedback<BinaryOperationHint, + ProcessedFeedback::kBinaryOperation> { + using SingleValueFeedback::SingleValueFeedback; +}; + +class CompareOperationFeedback + : public SingleValueFeedback<CompareOperationHint, + ProcessedFeedback::kCompareOperation> { + using SingleValueFeedback::SingleValueFeedback; +}; + +class ForInFeedback + : public SingleValueFeedback<ForInHint, ProcessedFeedback::kForIn> { + using SingleValueFeedback::SingleValueFeedback; +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_PROCESSED_FEEDBACK_H_ diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc index 99a06ef874..6c33ae227f 100644 --- a/deps/v8/src/compiler/property-access-builder.cc +++ b/deps/v8/src/compiler/property-access-builder.cc @@ -61,7 +61,7 @@ bool PropertyAccessBuilder::TryBuildStringCheck( // Monormorphic string access (ignoring the fact that there are multiple // String maps). *receiver = *effect = - graph()->NewNode(simplified()->CheckString(VectorSlotPair()), *receiver, + graph()->NewNode(simplified()->CheckString(FeedbackSource()), *receiver, *effect, control); return true; } @@ -74,7 +74,7 @@ bool PropertyAccessBuilder::TryBuildNumberCheck( if (HasOnlyNumberMaps(broker, maps)) { // Monomorphic number access (we also deal with Smis here). *receiver = *effect = - graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), *receiver, + graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), *receiver, *effect, control); return true; } @@ -151,14 +151,6 @@ MachineRepresentation PropertyAccessBuilder::ConvertRepresentation( Node* PropertyAccessBuilder::TryBuildLoadConstantDataField( NameRef const& name, PropertyAccessInfo const& access_info, Node* receiver) { - // TODO(neis): Eliminate FastPropertyAt call below by doing the lookup during - // acccess info computation. Requires extra care in the case where the - // receiver is the holder. - AllowCodeDependencyChange dependency_change_; - AllowHandleAllocation handle_allocation_; - AllowHandleDereference handle_dereference_; - AllowHeapAllocation heap_allocation_; - if (!access_info.IsDataConstant()) return nullptr; // First, determine if we have a constant holder to load from. @@ -174,17 +166,21 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField( MapRef receiver_map = m.Ref(broker()).map(); if (std::find_if(access_info.receiver_maps().begin(), access_info.receiver_maps().end(), [&](Handle<Map> map) { - return map.equals(receiver_map.object()); + return MapRef(broker(), map).equals(receiver_map); }) == access_info.receiver_maps().end()) { // The map of the receiver is not in the feedback, let us bail out. return nullptr; } - holder = Handle<JSObject>::cast(m.Value()); + holder = m.Ref(broker()).AsJSObject().object(); } - Handle<Object> value = JSObject::FastPropertyAt( - holder, access_info.field_representation(), access_info.field_index()); - return jsgraph()->Constant(value); + JSObjectRef holder_ref(broker(), holder); + base::Optional<ObjectRef> value = holder_ref.GetOwnDataProperty( + access_info.field_representation(), access_info.field_index()); + if (!value.has_value()) { + return nullptr; + } + return jsgraph()->Constant(*value); } Node* PropertyAccessBuilder::BuildLoadDataField( @@ -203,12 +199,10 @@ Node* PropertyAccessBuilder::BuildLoadDataField( Node* storage = ResolveHolder(access_info, receiver); if (!field_index.is_inobject()) { storage = *effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSObjectPropertiesOrHash()), + simplified()->LoadField( + AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer()), storage, *effect, *control); } - PropertyConstness constness = access_info.IsDataConstant() - ? PropertyConstness::kConst - : PropertyConstness::kMutable; FieldAccess field_access = { kTaggedBase, field_index.offset(), @@ -218,7 +212,7 @@ Node* PropertyAccessBuilder::BuildLoadDataField( MachineType::TypeForRepresentation(field_representation), kFullWriteBarrier, LoadSensitivity::kCritical, - constness}; + access_info.GetConstFieldInfo()}; if (field_representation == MachineRepresentation::kFloat64) { if (!field_index.is_inobject() || !FLAG_unbox_double_fields) { FieldAccess const storage_access = { @@ -230,7 +224,7 @@ Node* PropertyAccessBuilder::BuildLoadDataField( MachineType::TypeCompressedTaggedPointer(), kPointerWriteBarrier, LoadSensitivity::kCritical, - constness}; + access_info.GetConstFieldInfo()}; storage = *effect = graph()->NewNode( simplified()->LoadField(storage_access), storage, *effect, *control); field_access.offset = HeapNumber::kValueOffset; diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc index 277c89c932..e399b9c4f6 100644 --- a/deps/v8/src/compiler/raw-machine-assembler.cc +++ b/deps/v8/src/compiler/raw-machine-assembler.cc @@ -77,7 +77,7 @@ Node* RawMachineAssembler::OptimizedAllocate( size); } -Schedule* RawMachineAssembler::Export() { +Schedule* RawMachineAssembler::ExportForTest() { // Compute the correct codegen order. DCHECK(schedule_->rpo_order()->empty()); if (FLAG_trace_turbo_scheduler) { @@ -106,6 +106,7 @@ Graph* RawMachineAssembler::ExportForOptimization() { StdoutStream{} << *schedule_; } schedule_->EnsureCFGWellFormedness(); + OptimizeControlFlow(schedule_, graph(), common()); Scheduler::ComputeSpecialRPO(zone(), schedule_); if (FLAG_trace_turbo_scheduler) { PrintF("--- SCHEDULE BEFORE GRAPH CREATION -------------------------\n"); @@ -117,6 +118,99 @@ Graph* RawMachineAssembler::ExportForOptimization() { return graph(); } +void RawMachineAssembler::OptimizeControlFlow(Schedule* schedule, Graph* graph, + CommonOperatorBuilder* common) { + for (bool changed = true; changed;) { + changed = false; + for (size_t i = 0; i < schedule->all_blocks()->size(); ++i) { + BasicBlock* block = (*schedule->all_blocks())[i]; + if (block == nullptr) continue; + + // Short-circuit a goto if the succeeding block is not a control-flow + // merge. This is not really useful on it's own since graph construction + // has the same effect, but combining blocks improves the pattern-match on + // their structure below. + if (block->control() == BasicBlock::kGoto) { + DCHECK_EQ(block->SuccessorCount(), 1); + BasicBlock* successor = block->SuccessorAt(0); + if (successor->PredecessorCount() == 1) { + DCHECK_EQ(successor->PredecessorAt(0), block); + for (Node* node : *successor) { + schedule->SetBlockForNode(nullptr, node); + schedule->AddNode(block, node); + } + block->set_control(successor->control()); + Node* control_input = successor->control_input(); + block->set_control_input(control_input); + if (control_input) { + schedule->SetBlockForNode(block, control_input); + } + if (successor->deferred()) block->set_deferred(true); + block->ClearSuccessors(); + schedule->MoveSuccessors(successor, block); + schedule->ClearBlockById(successor->id()); + changed = true; + --i; + continue; + } + } + // Block-cloning in the simple case where a block consists only of a phi + // node and a branch on that phi. This just duplicates the branch block + // for each predecessor, replacing the phi node with the corresponding phi + // input. + if (block->control() == BasicBlock::kBranch && block->NodeCount() == 1) { + Node* phi = block->NodeAt(0); + if (phi->opcode() != IrOpcode::kPhi) continue; + Node* branch = block->control_input(); + DCHECK_EQ(branch->opcode(), IrOpcode::kBranch); + if (NodeProperties::GetValueInput(branch, 0) != phi) continue; + if (phi->UseCount() != 1) continue; + DCHECK_EQ(phi->op()->ValueInputCount(), block->PredecessorCount()); + + // Turn projection blocks into normal blocks. + DCHECK_EQ(block->SuccessorCount(), 2); + BasicBlock* true_block = block->SuccessorAt(0); + BasicBlock* false_block = block->SuccessorAt(1); + DCHECK_EQ(true_block->NodeAt(0)->opcode(), IrOpcode::kIfTrue); + DCHECK_EQ(false_block->NodeAt(0)->opcode(), IrOpcode::kIfFalse); + (*true_block->begin())->Kill(); + true_block->RemoveNode(true_block->begin()); + (*false_block->begin())->Kill(); + false_block->RemoveNode(false_block->begin()); + true_block->ClearPredecessors(); + false_block->ClearPredecessors(); + + size_t arity = block->PredecessorCount(); + for (size_t i = 0; i < arity; ++i) { + BasicBlock* predecessor = block->PredecessorAt(i); + predecessor->ClearSuccessors(); + if (block->deferred()) predecessor->set_deferred(true); + Node* branch_clone = graph->CloneNode(branch); + int phi_input = static_cast<int>(i); + NodeProperties::ReplaceValueInput( + branch_clone, NodeProperties::GetValueInput(phi, phi_input), 0); + BasicBlock* new_true_block = schedule->NewBasicBlock(); + BasicBlock* new_false_block = schedule->NewBasicBlock(); + new_true_block->AddNode( + graph->NewNode(common->IfTrue(), branch_clone)); + new_false_block->AddNode( + graph->NewNode(common->IfFalse(), branch_clone)); + schedule->AddGoto(new_true_block, true_block); + schedule->AddGoto(new_false_block, false_block); + DCHECK_EQ(predecessor->control(), BasicBlock::kGoto); + predecessor->set_control(BasicBlock::kNone); + schedule->AddBranch(predecessor, branch_clone, new_true_block, + new_false_block); + } + branch->Kill(); + schedule->ClearBlockById(block->id()); + changed = true; + continue; + } + } + } +} + void RawMachineAssembler::MakeReschedulable() { std::vector<Node*> block_final_control(schedule_->all_blocks_.size()); std::vector<Node*> block_final_effect(schedule_->all_blocks_.size()); @@ -619,8 +713,10 @@ Node* CallCFunctionImpl( builder.AddReturn(return_type); for (const auto& arg : args) builder.AddParam(arg.first); - auto call_descriptor = - Linkage::GetSimplifiedCDescriptor(rasm->zone(), builder.Build()); + auto call_descriptor = Linkage::GetSimplifiedCDescriptor( + rasm->zone(), builder.Build(), + caller_saved_regs ? CallDescriptor::kCallerSavedRegisters + : CallDescriptor::kNoFlags); if (caller_saved_regs) call_descriptor->set_save_fp_mode(mode); @@ -631,10 +727,8 @@ Node* CallCFunctionImpl( [](const RawMachineAssembler::CFunctionArg& arg) { return arg.second; }); auto common = rasm->common(); - return rasm->AddNode( - caller_saved_regs ? common->CallWithCallerSavedRegisters(call_descriptor) - : common->Call(call_descriptor), - static_cast<int>(nodes.size()), nodes.begin()); + return rasm->AddNode(common->Call(call_descriptor), + static_cast<int>(nodes.size()), nodes.begin()); } } // namespace diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h index 890c38c551..46940df44f 100644 --- a/deps/v8/src/compiler/raw-machine-assembler.h +++ b/deps/v8/src/compiler/raw-machine-assembler.h @@ -65,9 +65,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { CallDescriptor* call_descriptor() const { return call_descriptor_; } PoisoningMitigationLevel poisoning_level() const { return poisoning_level_; } - // Finalizes the schedule and exports it to be used for code generation. Note - // that this RawMachineAssembler becomes invalid after export. - Schedule* Export(); + // Only used for tests: Finalizes the schedule and exports it to be used for + // code generation. Note that this RawMachineAssembler becomes invalid after + // export. + Schedule* ExportForTest(); // Finalizes the schedule and transforms it into a graph that's suitable for // it to be used for Turbofan optimization and re-scheduling. Note that this // RawMachineAssembler becomes invalid after export. @@ -577,6 +578,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { Node* Word32PairSar(Node* low_word, Node* high_word, Node* shift) { return AddNode(machine()->Word32PairSar(), low_word, high_word, shift); } + Node* StackPointerGreaterThan(Node* value) { + return AddNode(machine()->StackPointerGreaterThan(), value); + } #define INTPTR_BINOP(prefix, name) \ Node* IntPtr##name(Node* a, Node* b) { \ @@ -907,7 +911,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { } // Stack operations. - Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); } Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); } Node* LoadParentFramePointer() { return AddNode(machine()->LoadParentFramePointer()); @@ -1091,6 +1094,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { Schedule* schedule() { return schedule_; } size_t parameter_count() const { return call_descriptor_->ParameterCount(); } + static void OptimizeControlFlow(Schedule* schedule, Graph* graph, + CommonOperatorBuilder* common); + Isolate* isolate_; Graph* graph_; Schedule* schedule_; diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc index 7a4577b799..fd0cbabe66 100644 --- a/deps/v8/src/compiler/representation-change.cc +++ b/deps/v8/src/compiler/representation-change.cc @@ -316,12 +316,12 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor( node = InsertChangeFloat64ToUint32(node); op = simplified()->CheckedUint32ToTaggedSigned(use_info.feedback()); } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) { - op = simplified()->CheckedFloat64ToInt32( + node = InsertCheckedFloat64ToInt32( + node, output_type.Maybe(Type::MinusZero()) ? CheckForMinusZeroMode::kCheckForMinusZero : CheckForMinusZeroMode::kDontCheckForMinusZero, - use_info.feedback()); - node = InsertConversion(node, op, use_node); + use_info.feedback(), use_node); if (SmiValuesAre32Bits()) { op = simplified()->ChangeInt32ToTagged(); } else { @@ -333,14 +333,13 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor( } } else if (output_rep == MachineRepresentation::kFloat32) { if (use_info.type_check() == TypeCheckKind::kSignedSmall) { - op = machine()->ChangeFloat32ToFloat64(); - node = InsertConversion(node, op, use_node); - op = simplified()->CheckedFloat64ToInt32( + node = InsertChangeFloat32ToFloat64(node); + node = InsertCheckedFloat64ToInt32( + node, output_type.Maybe(Type::MinusZero()) ? CheckForMinusZeroMode::kCheckForMinusZero : CheckForMinusZeroMode::kDontCheckForMinusZero, - use_info.feedback()); - node = InsertConversion(node, op, use_node); + use_info.feedback(), use_node); if (SmiValuesAre32Bits()) { op = simplified()->ChangeInt32ToTagged(); } else { @@ -475,7 +474,7 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor( } else if (output_rep == MachineRepresentation::kCompressedPointer) { if (use_info.type_check() == TypeCheckKind::kBigInt && !output_type.Is(Type::BigInt())) { - node = InsertChangeCompressedToTagged(node); + node = InsertChangeCompressedPointerToTaggedPointer(node); op = simplified()->CheckBigInt(use_info.feedback()); } else { op = machine()->ChangeCompressedPointerToTaggedPointer(); @@ -671,13 +670,48 @@ Node* RepresentationChanger::GetCompressedSignedRepresentationFor( use_node, use_info); op = machine()->ChangeTaggedSignedToCompressedSigned(); } else if (output_rep == MachineRepresentation::kFloat32) { - node = GetTaggedSignedRepresentationFor(node, output_rep, output_type, - use_node, use_info); - op = machine()->ChangeTaggedSignedToCompressedSigned(); + // float 32 -> float64 -> int32 -> Compressed signed + if (use_info.type_check() == TypeCheckKind::kSignedSmall) { + node = InsertChangeFloat32ToFloat64(node); + node = InsertCheckedFloat64ToInt32( + node, + output_type.Maybe(Type::MinusZero()) + ? CheckForMinusZeroMode::kCheckForMinusZero + : CheckForMinusZeroMode::kDontCheckForMinusZero, + use_info.feedback(), use_node); + op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback()); + } else { + return TypeError(node, output_rep, output_type, + MachineRepresentation::kCompressedSigned); + } } else if (output_rep == MachineRepresentation::kFloat64) { - node = GetTaggedSignedRepresentationFor(node, output_rep, output_type, - use_node, use_info); - op = machine()->ChangeTaggedSignedToCompressedSigned(); + if (output_type.Is(Type::Signed31())) { + // float64 -> int32 -> compressed signed + node = InsertChangeFloat64ToInt32(node); + op = simplified()->ChangeInt31ToCompressedSigned(); + } else if (output_type.Is(Type::Signed32())) { + // float64 -> int32 -> compressed signed + node = InsertChangeFloat64ToInt32(node); + if (use_info.type_check() == TypeCheckKind::kSignedSmall) { + op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback()); + } else { + return TypeError(node, output_rep, output_type, + MachineRepresentation::kCompressedSigned); + } + } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) { + node = InsertCheckedFloat64ToInt32( + node, + output_type.Maybe(Type::MinusZero()) + ? CheckForMinusZeroMode::kCheckForMinusZero + : CheckForMinusZeroMode::kDontCheckForMinusZero, + use_info.feedback(), use_node); + op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback()); + } else { + // TODO(v8:8977): specialize here and below. Missing the unsigned case. + node = GetTaggedSignedRepresentationFor(node, output_rep, output_type, + use_node, use_info); + op = machine()->ChangeTaggedSignedToCompressedSigned(); + } } else { return TypeError(node, output_rep, output_type, MachineRepresentation::kCompressedSigned); @@ -830,20 +864,17 @@ Node* RepresentationChanger::GetFloat32RepresentationFor( } } else if (output_rep == MachineRepresentation::kCompressed) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedToTagged(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedToTagged(node); return GetFloat32RepresentationFor(node, MachineRepresentation::kTagged, output_type, truncation); } else if (output_rep == MachineRepresentation::kCompressedSigned) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedSignedToTaggedSigned(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedSignedToTaggedSigned(node); return GetFloat32RepresentationFor( node, MachineRepresentation::kTaggedSigned, output_type, truncation); } else if (output_rep == MachineRepresentation::kCompressedPointer) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedPointerToTaggedPointer(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedPointerToTaggedPointer(node); return GetFloat32RepresentationFor( node, MachineRepresentation::kTaggedPointer, output_type, truncation); } else if (output_rep == MachineRepresentation::kFloat64) { @@ -948,21 +979,18 @@ Node* RepresentationChanger::GetFloat64RepresentationFor( } } else if (output_rep == MachineRepresentation::kCompressed) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedToTagged(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedToTagged(node); return GetFloat64RepresentationFor(node, MachineRepresentation::kTagged, output_type, use_node, use_info); } else if (output_rep == MachineRepresentation::kCompressedSigned) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedSignedToTaggedSigned(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedSignedToTaggedSigned(node); return GetFloat64RepresentationFor(node, MachineRepresentation::kTaggedSigned, output_type, use_node, use_info); } else if (output_rep == MachineRepresentation::kCompressedPointer) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedPointerToTaggedPointer(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedPointerToTaggedPointer(node); return GetFloat64RepresentationFor(node, MachineRepresentation::kTaggedPointer, output_type, use_node, use_info); @@ -1116,8 +1144,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor( } } else if (output_rep == MachineRepresentation::kCompressed) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedToTagged(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedToTagged(node); return GetWord32RepresentationFor(node, MachineRepresentation::kTagged, output_type, use_node, use_info); } else if (output_rep == MachineRepresentation::kCompressedSigned) { @@ -1125,16 +1152,14 @@ Node* RepresentationChanger::GetWord32RepresentationFor( if (output_type.Is(Type::SignedSmall())) { op = simplified()->ChangeCompressedSignedToInt32(); } else { - op = machine()->ChangeCompressedSignedToTaggedSigned(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedSignedToTaggedSigned(node); return GetWord32RepresentationFor(node, MachineRepresentation::kTaggedSigned, output_type, use_node, use_info); } } else if (output_rep == MachineRepresentation::kCompressedPointer) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedPointerToTaggedPointer(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedPointerToTaggedPointer(node); return GetWord32RepresentationFor(node, MachineRepresentation::kTaggedPointer, output_type, use_node, use_info); @@ -1253,20 +1278,17 @@ Node* RepresentationChanger::GetBitRepresentationFor( jsgraph()->Int32Constant(0)); } else if (output_rep == MachineRepresentation::kCompressed) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedToTagged(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedToTagged(node); return GetBitRepresentationFor(node, MachineRepresentation::kTagged, output_type); } else if (output_rep == MachineRepresentation::kCompressedSigned) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedSignedToTaggedSigned(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedSignedToTaggedSigned(node); return GetBitRepresentationFor(node, MachineRepresentation::kTaggedSigned, output_type); } else if (output_rep == MachineRepresentation::kCompressedPointer) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedPointerToTaggedPointer(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedPointerToTaggedPointer(node); return GetBitRepresentationFor(node, MachineRepresentation::kTaggedPointer, output_type); } else if (IsWord(output_rep)) { @@ -1423,21 +1445,18 @@ Node* RepresentationChanger::GetWord64RepresentationFor( } } else if (output_rep == MachineRepresentation::kCompressed) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedToTagged(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedToTagged(node); return GetWord64RepresentationFor(node, MachineRepresentation::kTagged, output_type, use_node, use_info); } else if (output_rep == MachineRepresentation::kCompressedSigned) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedSignedToTaggedSigned(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedSignedToTaggedSigned(node); return GetWord64RepresentationFor(node, MachineRepresentation::kTaggedSigned, output_type, use_node, use_info); } else if (output_rep == MachineRepresentation::kCompressedPointer) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedPointerToTaggedPointer(); - node = jsgraph()->graph()->NewNode(op, node); + node = InsertChangeCompressedPointerToTaggedPointer(node); return GetWord64RepresentationFor(node, MachineRepresentation::kTaggedPointer, output_type, use_node, use_info); @@ -1741,11 +1760,30 @@ Node* RepresentationChanger::InsertTruncateInt64ToInt32(Node* node) { return jsgraph()->graph()->NewNode(machine()->TruncateInt64ToInt32(), node); } +Node* RepresentationChanger::InsertChangeCompressedPointerToTaggedPointer( + Node* node) { + return jsgraph()->graph()->NewNode( + machine()->ChangeCompressedPointerToTaggedPointer(), node); +} + +Node* RepresentationChanger::InsertChangeCompressedSignedToTaggedSigned( + Node* node) { + return jsgraph()->graph()->NewNode( + machine()->ChangeCompressedSignedToTaggedSigned(), node); +} + Node* RepresentationChanger::InsertChangeCompressedToTagged(Node* node) { return jsgraph()->graph()->NewNode(machine()->ChangeCompressedToTagged(), node); } +Node* RepresentationChanger::InsertCheckedFloat64ToInt32( + Node* node, CheckForMinusZeroMode check, const FeedbackSource& feedback, + Node* use_node) { + return InsertConversion( + node, simplified()->CheckedFloat64ToInt32(check, feedback), use_node); +} + Isolate* RepresentationChanger::isolate() const { return broker_->isolate(); } } // namespace compiler diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h index d338667603..43e85085ba 100644 --- a/deps/v8/src/compiler/representation-change.h +++ b/deps/v8/src/compiler/representation-change.h @@ -5,6 +5,7 @@ #ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_ #define V8_COMPILER_REPRESENTATION_CHANGE_H_ +#include "src/compiler/feedback-source.h" #include "src/compiler/js-graph.h" #include "src/compiler/simplified-operator.h" @@ -165,7 +166,7 @@ class UseInfo { public: UseInfo(MachineRepresentation representation, Truncation truncation, TypeCheckKind type_check = TypeCheckKind::kNone, - const VectorSlotPair& feedback = VectorSlotPair()) + const FeedbackSource& feedback = FeedbackSource()) : representation_(representation), truncation_(truncation), type_check_(type_check), @@ -176,7 +177,7 @@ class UseInfo { static UseInfo TruncatingWord64() { return UseInfo(MachineRepresentation::kWord64, Truncation::Word64()); } - static UseInfo CheckedBigIntTruncatingWord64(const VectorSlotPair& feedback) { + static UseInfo CheckedBigIntTruncatingWord64(const FeedbackSource& feedback) { return UseInfo(MachineRepresentation::kWord64, Truncation::Word64(), TypeCheckKind::kBigInt, feedback); } @@ -219,59 +220,59 @@ class UseInfo { // Possibly deoptimizing conversions. static UseInfo CheckedHeapObjectAsTaggedPointer( - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(), TypeCheckKind::kHeapObject, feedback); } - static UseInfo CheckedBigIntAsTaggedPointer(const VectorSlotPair& feedback) { + static UseInfo CheckedBigIntAsTaggedPointer(const FeedbackSource& feedback) { return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(), TypeCheckKind::kBigInt, feedback); } static UseInfo CheckedSignedSmallAsTaggedSigned( - const VectorSlotPair& feedback, + const FeedbackSource& feedback, IdentifyZeros identify_zeros = kDistinguishZeros) { return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any(identify_zeros), TypeCheckKind::kSignedSmall, feedback); } static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros, - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { return UseInfo(MachineRepresentation::kWord32, Truncation::Any(identify_zeros), TypeCheckKind::kSignedSmall, feedback); } static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros, - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { return UseInfo(MachineRepresentation::kWord32, Truncation::Any(identify_zeros), TypeCheckKind::kSigned32, feedback); } static UseInfo CheckedSigned64AsWord64(IdentifyZeros identify_zeros, - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { return UseInfo(MachineRepresentation::kWord64, Truncation::Any(identify_zeros), TypeCheckKind::kSigned64, feedback); } static UseInfo CheckedNumberAsFloat64(IdentifyZeros identify_zeros, - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(identify_zeros), TypeCheckKind::kNumber, feedback); } - static UseInfo CheckedNumberAsWord32(const VectorSlotPair& feedback) { + static UseInfo CheckedNumberAsWord32(const FeedbackSource& feedback) { return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(), TypeCheckKind::kNumber, feedback); } static UseInfo CheckedNumberOrOddballAsFloat64( - IdentifyZeros identify_zeros, const VectorSlotPair& feedback) { + IdentifyZeros identify_zeros, const FeedbackSource& feedback) { return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(identify_zeros), TypeCheckKind::kNumberOrOddball, feedback); } static UseInfo CheckedNumberOrOddballAsWord32( - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(), TypeCheckKind::kNumberOrOddball, feedback); } @@ -297,13 +298,13 @@ class UseInfo { ? CheckForMinusZeroMode::kDontCheckForMinusZero : CheckForMinusZeroMode::kCheckForMinusZero; } - const VectorSlotPair& feedback() const { return feedback_; } + const FeedbackSource& feedback() const { return feedback_; } private: MachineRepresentation representation_; Truncation truncation_; TypeCheckKind type_check_; - VectorSlotPair feedback_; + FeedbackSource feedback_; }; // Contains logic related to changing the representation of values for constants @@ -395,7 +396,12 @@ class V8_EXPORT_PRIVATE RepresentationChanger final { Node* InsertChangeTaggedSignedToInt32(Node* node); Node* InsertChangeTaggedToFloat64(Node* node); Node* InsertChangeUint32ToFloat64(Node* node); + Node* InsertChangeCompressedPointerToTaggedPointer(Node* node); + Node* InsertChangeCompressedSignedToTaggedSigned(Node* node); Node* InsertChangeCompressedToTagged(Node* node); + Node* InsertCheckedFloat64ToInt32(Node* node, CheckForMinusZeroMode check, + const FeedbackSource& feedback, + Node* use_node); Node* InsertConversion(Node* node, const Operator* op, Node* use_node); Node* InsertTruncateInt64ToInt32(Node* node); Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason); diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc index 84d74b4685..3b335f9712 100644 --- a/deps/v8/src/compiler/schedule.cc +++ b/deps/v8/src/compiler/schedule.cc @@ -163,6 +163,11 @@ BasicBlock* Schedule::GetBlockById(BasicBlock::Id block_id) { return all_blocks_[block_id.ToSize()]; } +void Schedule::ClearBlockById(BasicBlock::Id block_id) { + DCHECK(block_id.ToSize() < all_blocks_.size()); + all_blocks_[block_id.ToSize()] = nullptr; +} + bool Schedule::SameBasicBlock(Node* a, Node* b) const { BasicBlock* block = this->block(a); return block != nullptr && block == this->block(b); @@ -210,7 +215,6 @@ bool IsPotentiallyThrowingCall(IrOpcode::Value opcode) { JS_OP_LIST(BUILD_BLOCK_JS_CASE) #undef BUILD_BLOCK_JS_CASE case IrOpcode::kCall: - case IrOpcode::kCallWithCallerSavedRegisters: return true; default: return false; @@ -321,9 +325,6 @@ void Schedule::EnsureCFGWellFormedness() { if (block != end_) { EnsureSplitEdgeForm(block); } - if (block->deferred()) { - EnsureDeferredCodeSingleEntryPoint(block); - } } } @@ -356,6 +357,7 @@ void Schedule::EliminateRedundantPhiNodes() { } if (!inputs_equal) continue; node->ReplaceUses(first_input); + node->Kill(); block->RemoveNode(block->begin() + node_pos); --node_pos; reached_fixed_point = false; @@ -376,43 +378,6 @@ void Schedule::EnsureSplitEdgeForm(BasicBlock* block) { #endif } -void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) { - // If a deferred block has multiple predecessors, they have to - // all be deferred. Otherwise, we can run into a situation where a range - // that spills only in deferred blocks inserts its spill in the block, but - // other ranges need moves inserted by ResolveControlFlow in the predecessors, - // which may clobber the register of this range. - // To ensure that, when a deferred block has multiple predecessors, and some - // are not deferred, we add a non-deferred block to collect all such edges. - - DCHECK(block->deferred() && block->PredecessorCount() > 1); - bool all_deferred = true; - for (auto current_pred = block->predecessors().begin(); - current_pred != block->predecessors().end(); ++current_pred) { - BasicBlock* pred = *current_pred; - if (!pred->deferred()) { - all_deferred = false; - break; - } - } - - if (all_deferred) return; - BasicBlock* merger = NewBasicBlock(); - merger->set_control(BasicBlock::kGoto); - merger->successors().push_back(block); - for (auto current_pred = block->predecessors().begin(); - current_pred != block->predecessors().end(); ++current_pred) { - BasicBlock* pred = *current_pred; - merger->predecessors().push_back(pred); - pred->successors().clear(); - pred->successors().push_back(merger); - } - merger->set_deferred(false); - block->predecessors().clear(); - block->predecessors().push_back(merger); - MovePhis(block, merger); -} - void Schedule::MovePhis(BasicBlock* from, BasicBlock* to) { for (size_t i = 0; i < from->NodeCount();) { Node* node = from->NodeAt(i); @@ -481,6 +446,7 @@ void Schedule::SetBlockForNode(BasicBlock* block, Node* node) { std::ostream& operator<<(std::ostream& os, const Schedule& s) { for (BasicBlock* block : ((s.RpoBlockCount() == 0) ? *s.all_blocks() : *s.rpo_order())) { + if (block == nullptr) continue; if (block->rpo_number() == -1) { os << "--- BLOCK id:" << block->id().ToInt(); } else { diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h index aae2cd3ad8..ea42951d50 100644 --- a/deps/v8/src/compiler/schedule.h +++ b/deps/v8/src/compiler/schedule.h @@ -200,6 +200,7 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) { bool IsScheduled(Node* node); BasicBlock* GetBlockById(BasicBlock::Id block_id); + void ClearBlockById(BasicBlock::Id block_id); size_t BasicBlockCount() const { return all_blocks_.size(); } size_t RpoBlockCount() const { return rpo_order_.size(); } @@ -280,8 +281,6 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) { void EliminateRedundantPhiNodes(); // Ensure split-edge form for a hand-assembled schedule. void EnsureSplitEdgeForm(BasicBlock* block); - // Ensure entry into a deferred block happens from a single hot block. - void EnsureDeferredCodeSingleEntryPoint(BasicBlock* block); // Move Phi operands to newly created merger blocks void MovePhis(BasicBlock* from, BasicBlock* to); // Copy deferred block markers down as far as possible diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc index 25919bb3b3..bf23e436f6 100644 --- a/deps/v8/src/compiler/scheduler.cc +++ b/deps/v8/src/compiler/scheduler.cc @@ -359,7 +359,6 @@ class CFGBuilder : public ZoneObject { // JS opcodes are just like calls => fall through. #undef BUILD_BLOCK_JS_CASE case IrOpcode::kCall: - case IrOpcode::kCallWithCallerSavedRegisters: if (NodeProperties::IsExceptionalCall(node)) { BuildBlocksForSuccessors(node); } @@ -404,7 +403,6 @@ class CFGBuilder : public ZoneObject { // JS opcodes are just like calls => fall through. #undef CONNECT_BLOCK_JS_CASE case IrOpcode::kCall: - case IrOpcode::kCallWithCallerSavedRegisters: if (NodeProperties::IsExceptionalCall(node)) { scheduler_->UpdatePlacement(node, Scheduler::kFixed); ConnectCall(node); @@ -820,7 +818,7 @@ class SpecialRPONumberer : public ZoneObject { if (num_loops > static_cast<int>(loops_.size())) { // Otherwise, compute the loop information from the backedges in order // to perform a traversal that groups loop bodies together. - ComputeLoopInfo(stack_, num_loops, &backedges_); + ComputeLoopInfo(&stack_, num_loops, &backedges_); // Initialize the "loop stack". Note the entry could be a loop header. LoopInfo* loop = @@ -962,9 +960,8 @@ class SpecialRPONumberer : public ZoneObject { } // Computes loop membership from the backedges of the control flow graph. - void ComputeLoopInfo( - ZoneVector<SpecialRPOStackFrame>& queue, // NOLINT(runtime/references) - size_t num_loops, ZoneVector<Backedge>* backedges) { + void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>* queue, + size_t num_loops, ZoneVector<Backedge>* backedges) { // Extend existing loop membership vectors. for (LoopInfo& loop : loops_) { loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()), @@ -993,19 +990,19 @@ class SpecialRPONumberer : public ZoneObject { if (!loops_[loop_num].members->Contains(member->id().ToInt())) { loops_[loop_num].members->Add(member->id().ToInt()); } - queue[queue_length++].block = member; + (*queue)[queue_length++].block = member; } // Propagate loop membership backwards. All predecessors of M up to the // loop header H are members of the loop too. O(|blocks between M and H|). while (queue_length > 0) { - BasicBlock* block = queue[--queue_length].block; + BasicBlock* block = (*queue)[--queue_length].block; for (size_t i = 0; i < block->PredecessorCount(); i++) { BasicBlock* pred = block->PredecessorAt(i); if (pred != header) { if (!loops_[loop_num].members->Contains(pred->id().ToInt())) { loops_[loop_num].members->Add(pred->id().ToInt()); - queue[queue_length++].block = pred; + (*queue)[queue_length++].block = pred; } } } diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc index 5597850b06..20d405b775 100644 --- a/deps/v8/src/compiler/serializer-for-background-compilation.cc +++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc @@ -11,13 +11,13 @@ #include "src/compiler/bytecode-analysis.h" #include "src/compiler/compilation-dependencies.h" #include "src/compiler/js-heap-broker.h" -#include "src/compiler/vector-slot-pair.h" #include "src/handles/handles-inl.h" #include "src/ic/call-optimization.h" #include "src/interpreter/bytecode-array-iterator.h" #include "src/objects/code.h" #include "src/objects/js-array-inl.h" #include "src/objects/js-regexp-inl.h" +#include "src/objects/literal-objects-inl.h" #include "src/objects/shared-function-info-inl.h" #include "src/zone/zone-containers.h" #include "src/zone/zone.h" @@ -38,63 +38,21 @@ namespace compiler { V(Throw) #define CLEAR_ACCUMULATOR_LIST(V) \ - V(Add) \ - V(AddSmi) \ - V(BitwiseAnd) \ - V(BitwiseAndSmi) \ - V(BitwiseNot) \ - V(BitwiseOr) \ - V(BitwiseOrSmi) \ - V(BitwiseXor) \ - V(BitwiseXorSmi) \ V(CallRuntime) \ V(CloneObject) \ V(CreateArrayFromIterable) \ - V(CreateArrayLiteral) \ V(CreateEmptyArrayLiteral) \ V(CreateEmptyObjectLiteral) \ V(CreateMappedArguments) \ - V(CreateObjectLiteral) \ - V(CreateRegExpLiteral) \ V(CreateRestParameter) \ V(CreateUnmappedArguments) \ - V(Dec) \ V(DeletePropertySloppy) \ V(DeletePropertyStrict) \ - V(Div) \ - V(DivSmi) \ - V(Exp) \ - V(ExpSmi) \ V(ForInContinue) \ V(ForInEnumerate) \ - V(ForInNext) \ V(ForInStep) \ - V(Inc) \ - V(LdaLookupSlot) \ - V(LdaLookupSlotInsideTypeof) \ V(LogicalNot) \ - V(Mod) \ - V(ModSmi) \ - V(Mul) \ - V(MulSmi) \ - V(Negate) \ V(SetPendingMessage) \ - V(ShiftLeft) \ - V(ShiftLeftSmi) \ - V(ShiftRight) \ - V(ShiftRightLogical) \ - V(ShiftRightLogicalSmi) \ - V(ShiftRightSmi) \ - V(StaLookupSlot) \ - V(Sub) \ - V(SubSmi) \ - V(TestEqual) \ - V(TestEqualStrict) \ - V(TestGreaterThan) \ - V(TestGreaterThanOrEqual) \ - V(TestInstanceOf) \ - V(TestLessThan) \ - V(TestLessThanOrEqual) \ V(TestNull) \ V(TestReferenceEqual) \ V(TestTypeOf) \ @@ -102,8 +60,6 @@ namespace compiler { V(TestUndetectable) \ V(ToBooleanLogicalNot) \ V(ToName) \ - V(ToNumber) \ - V(ToNumeric) \ V(ToString) \ V(TypeOf) @@ -130,15 +86,13 @@ namespace compiler { V(JumpIfTrue) \ V(JumpIfTrueConstant) \ V(JumpIfUndefined) \ - V(JumpIfUndefinedConstant) + V(JumpIfUndefinedConstant) \ + V(JumpIfUndefinedOrNull) \ + V(JumpIfUndefinedOrNullConstant) #define IGNORED_BYTECODE_LIST(V) \ - V(CallNoFeedback) \ V(IncBlockCounter) \ - V(LdaNamedPropertyNoFeedback) \ V(StackCheck) \ - V(StaNamedPropertyNoFeedback) \ - V(ThrowReferenceErrorIfHole) \ V(ThrowSuperAlreadyCalledIfNotHole) \ V(ThrowSuperNotCalledIfHole) @@ -147,9 +101,50 @@ namespace compiler { V(Illegal) \ V(Wide) +#define BINARY_OP_LIST(V) \ + V(Add) \ + V(AddSmi) \ + V(BitwiseAnd) \ + V(BitwiseAndSmi) \ + V(BitwiseOr) \ + V(BitwiseOrSmi) \ + V(BitwiseXor) \ + V(BitwiseXorSmi) \ + V(Div) \ + V(DivSmi) \ + V(Exp) \ + V(ExpSmi) \ + V(Mod) \ + V(ModSmi) \ + V(Mul) \ + V(MulSmi) \ + V(ShiftLeft) \ + V(ShiftLeftSmi) \ + V(ShiftRight) \ + V(ShiftRightSmi) \ + V(ShiftRightLogical) \ + V(ShiftRightLogicalSmi) \ + V(Sub) \ + V(SubSmi) + +#define UNARY_OP_LIST(V) \ + V(BitwiseNot) \ + V(Dec) \ + V(Inc) \ + V(Negate) + +#define COMPARE_OP_LIST(V) \ + V(TestEqual) \ + V(TestEqualStrict) \ + V(TestGreaterThan) \ + V(TestGreaterThanOrEqual) \ + V(TestLessThan) \ + V(TestLessThanOrEqual) + #define SUPPORTED_BYTECODE_LIST(V) \ V(CallAnyReceiver) \ V(CallJSRuntime) \ + V(CallNoFeedback) \ V(CallProperty) \ V(CallProperty0) \ V(CallProperty1) \ @@ -161,12 +156,18 @@ namespace compiler { V(CallWithSpread) \ V(Construct) \ V(ConstructWithSpread) \ + V(CreateArrayLiteral) \ V(CreateBlockContext) \ V(CreateCatchContext) \ V(CreateClosure) \ V(CreateEvalContext) \ V(CreateFunctionContext) \ + V(CreateObjectLiteral) \ + V(CreateRegExpLiteral) \ V(CreateWithContext) \ + V(ForInNext) \ + V(ForInPrepare) \ + V(GetIterator) \ V(GetSuperConstructor) \ V(GetTemplateObject) \ V(InvokeIntrinsic) \ @@ -184,7 +185,10 @@ namespace compiler { V(LdaLookupContextSlotInsideTypeof) \ V(LdaLookupGlobalSlot) \ V(LdaLookupGlobalSlotInsideTypeof) \ + V(LdaLookupSlot) \ + V(LdaLookupSlotInsideTypeof) \ V(LdaNamedProperty) \ + V(LdaNamedPropertyNoFeedback) \ V(LdaNull) \ V(Ldar) \ V(LdaSmi) \ @@ -198,21 +202,31 @@ namespace compiler { V(Return) \ V(StaContextSlot) \ V(StaCurrentContextSlot) \ + V(StaDataPropertyInLiteral) \ V(StaGlobal) \ V(StaInArrayLiteral) \ V(StaKeyedProperty) \ + V(StaLookupSlot) \ V(StaModuleVariable) \ V(StaNamedOwnProperty) \ V(StaNamedProperty) \ + V(StaNamedPropertyNoFeedback) \ V(Star) \ V(SwitchOnGeneratorState) \ V(SwitchOnSmiNoFeedback) \ V(TestIn) \ + V(TestInstanceOf) \ + V(ThrowReferenceErrorIfHole) \ + V(ToNumber) \ + V(ToNumeric) \ + BINARY_OP_LIST(V) \ + COMPARE_OP_LIST(V) \ CLEAR_ACCUMULATOR_LIST(V) \ CLEAR_ENVIRONMENT_LIST(V) \ CONDITIONAL_JUMPS_LIST(V) \ IGNORED_BYTECODE_LIST(V) \ KILL_ENVIRONMENT_LIST(V) \ + UNARY_OP_LIST(V) \ UNCONDITIONAL_JUMPS_LIST(V) \ UNREACHABLE_BYTECODE_LIST(V) @@ -247,6 +261,8 @@ class Hints { public: explicit Hints(Zone* zone); + static Hints SingleConstant(Handle<Object> constant, Zone* zone); + const ConstantsSet& constants() const; const MapsSet& maps() const; const BlueprintsSet& function_blueprints() const; @@ -340,7 +356,7 @@ class SerializerForBackgroundCompilation { const HintsVector& arguments, SerializerForBackgroundCompilationFlags flags); - bool BailoutOnUninitialized(FeedbackSlot slot); + bool BailoutOnUninitialized(ProcessedFeedback const& feedback); void TraverseBytecode(); @@ -349,55 +365,87 @@ class SerializerForBackgroundCompilation { SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE) #undef DECLARE_VISIT_BYTECODE + // Returns whether the callee with the given SFI should be processed further, + // i.e. whether it's inlineable. + bool ProcessSFIForCallOrConstruct(Handle<SharedFunctionInfo> shared, + const HintsVector& arguments, + SpeculationMode speculation_mode); + // Returns whether {function} should be serialized for compilation. + bool ProcessCalleeForCallOrConstruct(Handle<JSFunction> function, + const HintsVector& arguments, + SpeculationMode speculation_mode); void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target, const HintsVector& arguments, FeedbackSlot slot, bool with_spread = false); - void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator, - ConvertReceiverMode receiver_mode, + void ProcessCallVarArgs(ConvertReceiverMode receiver_mode, + Hints const& callee, interpreter::Register first_reg, + int reg_count, FeedbackSlot slot, bool with_spread = false); void ProcessApiCall(Handle<SharedFunctionInfo> target, const HintsVector& arguments); - void ProcessReceiverMapForApiCall( - FunctionTemplateInfoRef& target, // NOLINT(runtime/references) - Handle<Map> receiver); + void ProcessReceiverMapForApiCall(FunctionTemplateInfoRef target, + Handle<Map> receiver); void ProcessBuiltinCall(Handle<SharedFunctionInfo> target, - const HintsVector& arguments); + const HintsVector& arguments, + SpeculationMode speculation_mode); void ProcessJump(interpreter::BytecodeArrayIterator* iterator); void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key, - FeedbackSlot slot, AccessMode mode); - void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator, - AccessMode mode); - void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name, - FeedbackSlot slot, AccessMode mode); + FeedbackSlot slot, AccessMode access_mode, + bool honor_bailout_on_uninitialized); + void ProcessNamedPropertyAccess(Hints receiver, NameRef const& name, + FeedbackSlot slot, AccessMode access_mode); + void ProcessNamedAccess(Hints receiver, NamedAccessFeedback const& feedback, + AccessMode access_mode, Hints* new_accumulator_hints); + void ProcessElementAccess(Hints receiver, Hints key, + ElementAccessFeedback const& feedback, + AccessMode access_mode); + + void ProcessModuleVariableAccess( + interpreter::BytecodeArrayIterator* iterator); + + void ProcessHintsForObjectCreate(Hints const& prototype); void ProcessMapHintsForPromises(Hints const& receiver_hints); void ProcessHintsForPromiseResolve(Hints const& resolution_hints); + void ProcessHintsForHasInPrototypeChain(Hints const& instance_hints); void ProcessHintsForRegExpTest(Hints const& regexp_hints); PropertyAccessInfo ProcessMapForRegExpTest(MapRef map); void ProcessHintsForFunctionCall(Hints const& target_hints); + void ProcessHintsForFunctionBind(Hints const& receiver_hints); + void ProcessHintsForObjectGetPrototype(Hints const& object_hints); + void ProcessConstantForOrdinaryHasInstance(HeapObjectRef const& constructor, + bool* walk_prototypes); + void ProcessConstantForInstanceOf(ObjectRef const& constant, + bool* walk_prototypes); + void ProcessHintsForOrdinaryHasInstance(Hints const& constructor_hints, + Hints const& instance_hints); + + void ProcessGlobalAccess(FeedbackSlot slot, bool is_load); + + void ProcessCompareOperation(FeedbackSlot slot); + void ProcessForIn(FeedbackSlot slot); + void ProcessUnaryOrBinaryOperation(FeedbackSlot slot, + bool honor_bailout_on_uninitialized); + + PropertyAccessInfo ProcessMapForNamedPropertyAccess( + MapRef receiver_map, NameRef const& name, AccessMode access_mode, + base::Optional<JSObjectRef> receiver, Hints* new_accumulator_hints); + + void ProcessCreateContext(interpreter::BytecodeArrayIterator* iterator, + int scopeinfo_operand_index); - GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot); - NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess( - const MapHandles& maps, AccessMode mode, NameRef const& name); - ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess( - const MapHandles& maps, AccessMode mode, - KeyedAccessMode const& keyed_mode); - void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode, - base::Optional<NameRef> static_name); - void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name); - - void ProcessCreateContext(); enum ContextProcessingMode { kIgnoreSlot, kSerializeSlot, - kSerializeSlotAndAddToAccumulator }; - void ProcessContextAccess(const Hints& context_hints, int slot, int depth, - ContextProcessingMode mode); - void ProcessImmutableLoad(ContextRef& context, // NOLINT(runtime/references) - int slot, ContextProcessingMode mode); + void ProcessContextAccess(Hints const& context_hints, int slot, int depth, + ContextProcessingMode mode, + Hints* result_hints = nullptr); + void ProcessImmutableLoad(ContextRef const& context, int slot, + ContextProcessingMode mode, + Hints* new_accumulator_hints); void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator); void ProcessLdaLookupContextSlot( interpreter::BytecodeArrayIterator* iterator); @@ -420,8 +468,10 @@ class SerializerForBackgroundCompilation { void ContributeToJumpTargetEnvironment(int target_offset); void IncorporateJumpTargetEnvironment(int target_offset); + Handle<FeedbackVector> feedback_vector() const; Handle<BytecodeArray> bytecode_array() const; - BytecodeAnalysis const& GetBytecodeAnalysis(bool serialize); + BytecodeAnalysis const& GetBytecodeAnalysis( + SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); JSHeapBroker* broker() const { return broker_; } CompilationDependencies* dependencies() const { return dependencies_; } @@ -496,6 +546,12 @@ bool Hints::Equals(Hints const& other) const { } #endif +Hints Hints::SingleConstant(Handle<Object> constant, Zone* zone) { + Hints result(zone); + result.AddConstant(constant); + return result; +} + const ConstantsSet& Hints::constants() const { return constants_; } const MapsSet& Hints::maps() const { return maps_; } @@ -628,7 +684,7 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject { // Appends the hints for the given register range to {dst} (in order). void ExportRegisterHints(interpreter::Register first, size_t count, - HintsVector& dst); // NOLINT(runtime/references) + HintsVector* dst); private: friend std::ostream& operator<<(std::ostream& out, const Environment& env); @@ -693,8 +749,8 @@ SerializerForBackgroundCompilation::Environment::Environment( } // Pad the rest with "undefined". - Hints undefined_hint(zone); - undefined_hint.AddConstant(isolate->factory()->undefined_value()); + Hints undefined_hint = + Hints::SingleConstant(isolate->factory()->undefined_value(), zone); for (size_t i = arguments.size(); i < param_count; ++i) { ephemeral_hints_[i] = undefined_hint; } @@ -826,7 +882,7 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation( } bool SerializerForBackgroundCompilation::BailoutOnUninitialized( - FeedbackSlot slot) { + ProcessedFeedback const& feedback) { DCHECK(!environment()->IsDead()); if (!(flags() & SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized)) { @@ -837,16 +893,7 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized( // OSR entry point. TODO(neis): Support OSR? return false; } - FeedbackNexus nexus(environment()->function().feedback_vector(), slot); - if (!slot.IsInvalid() && nexus.IsUninitialized()) { - FeedbackSource source(nexus); - if (broker()->HasFeedback(source)) { - DCHECK_EQ(broker()->GetFeedback(source)->kind(), - ProcessedFeedback::kInsufficient); - } else { - broker()->SetFeedback(source, - new (broker()->zone()) InsufficientFeedback()); - } + if (feedback.IsInsufficient()) { environment()->Kill(); return true; } @@ -856,15 +903,14 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized( Hints SerializerForBackgroundCompilation::Run() { TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run"); SharedFunctionInfoRef shared(broker(), environment()->function().shared()); - FeedbackVectorRef feedback_vector( - broker(), environment()->function().feedback_vector()); - if (shared.IsSerializedForCompilation(feedback_vector)) { + FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector()); + if (shared.IsSerializedForCompilation(feedback_vector_ref)) { TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo " << Brief(*shared.object()) << ", bailing out.\n"); return Hints(zone()); } - shared.SetSerializedForCompilation(feedback_vector); + shared.SetSerializedForCompilation(feedback_vector_ref); // We eagerly call the {EnsureSourcePositionsAvailable} for all serialized // SFIs while still on the main thread. Source positions will later be used @@ -875,7 +921,7 @@ Hints SerializerForBackgroundCompilation::Run() { shared.object()); } - feedback_vector.SerializeSlots(); + feedback_vector_ref.Serialize(); TraverseBytecode(); return environment()->return_value_hints(); } @@ -909,6 +955,11 @@ class ExceptionHandlerMatcher { std::set<int>::const_iterator handlers_iterator_; }; +Handle<FeedbackVector> SerializerForBackgroundCompilation::feedback_vector() + const { + return environment()->function().feedback_vector(); +} + Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array() const { return handle(environment()->function().shared()->GetBytecodeArray(), @@ -916,22 +967,28 @@ Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array() } BytecodeAnalysis const& SerializerForBackgroundCompilation::GetBytecodeAnalysis( - bool serialize) { + SerializationPolicy policy) { return broker()->GetBytecodeAnalysis( bytecode_array(), osr_offset(), flags() & SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness, - serialize); + policy); } void SerializerForBackgroundCompilation::TraverseBytecode() { - BytecodeAnalysis const& bytecode_analysis = GetBytecodeAnalysis(true); + BytecodeAnalysis const& bytecode_analysis = + GetBytecodeAnalysis(SerializationPolicy::kSerializeIfNeeded); BytecodeArrayRef(broker(), bytecode_array()).SerializeForCompilation(); BytecodeArrayIterator iterator(bytecode_array()); ExceptionHandlerMatcher handler_matcher(iterator, bytecode_array()); + bool has_one_shot_bytecode = false; for (; !iterator.done(); iterator.Advance()) { + has_one_shot_bytecode = + has_one_shot_bytecode || + interpreter::Bytecodes::IsOneShotBytecode(iterator.current_bytecode()); + int const current_offset = iterator.current_offset(); IncorporateJumpTargetEnvironment(current_offset); @@ -970,6 +1027,21 @@ void SerializerForBackgroundCompilation::TraverseBytecode() { } } } + + if (has_one_shot_bytecode) { + broker()->isolate()->CountUsage( + v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode); + } +} + +void SerializerForBackgroundCompilation::VisitGetIterator( + BytecodeArrayIterator* iterator) { + AccessMode mode = AccessMode::kLoad; + Hints const& receiver = + environment()->register_hints(iterator->GetRegisterOperand(0)); + Handle<Name> name = broker()->isolate()->factory()->iterator_symbol(); + FeedbackSlot slot = iterator->GetSlotOperand(1); + ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode); } void SerializerForBackgroundCompilation::VisitGetSuperConstructor( @@ -995,11 +1067,11 @@ void SerializerForBackgroundCompilation::VisitGetTemplateObject( ObjectRef description( broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate())); FeedbackSlot slot = iterator->GetSlotOperand(1); - FeedbackVectorRef feedback_vector( - broker(), environment()->function().feedback_vector()); + FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector()); SharedFunctionInfoRef shared(broker(), environment()->function().shared()); JSArrayRef template_object = - shared.GetTemplateObject(description, feedback_vector, slot, true); + shared.GetTemplateObject(description, feedback_vector_ref, slot, + SerializationPolicy::kSerializeIfNeeded); environment()->accumulator_hints().Clear(); environment()->accumulator_hints().AddConstant(template_object.object()); } @@ -1058,25 +1130,92 @@ void SerializerForBackgroundCompilation::VisitInvokeIntrinsic( Runtime::FunctionId functionId = iterator->GetIntrinsicIdOperand(0); // For JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve and // JSNativeContextSpecialization::ReduceJSResolvePromise. - if (functionId == Runtime::kInlineAsyncFunctionResolve) { - interpreter::Register first_reg = iterator->GetRegisterOperand(1); - size_t reg_count = iterator->GetRegisterCountOperand(2); - CHECK_EQ(reg_count, 3); - HintsVector arguments(zone()); - environment()->ExportRegisterHints(first_reg, reg_count, arguments); - Hints const& resolution_hints = arguments[1]; // The resolution object. - ProcessHintsForPromiseResolve(resolution_hints); - environment()->accumulator_hints().Clear(); - return; + switch (functionId) { + case Runtime::kInlineAsyncFunctionResolve: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kAsyncFunctionResolve)); + interpreter::Register first_reg = iterator->GetRegisterOperand(1); + size_t reg_count = iterator->GetRegisterCountOperand(2); + CHECK_EQ(reg_count, 3); + HintsVector arguments(zone()); + environment()->ExportRegisterHints(first_reg, reg_count, &arguments); + Hints const& resolution_hints = arguments[1]; // The resolution object. + ProcessHintsForPromiseResolve(resolution_hints); + environment()->accumulator_hints().Clear(); + return; + } + case Runtime::kInlineAsyncGeneratorReject: + case Runtime::kAsyncGeneratorReject: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kAsyncGeneratorReject)); + break; + } + case Runtime::kInlineAsyncGeneratorResolve: + case Runtime::kAsyncGeneratorResolve: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kAsyncGeneratorResolve)); + break; + } + case Runtime::kInlineAsyncGeneratorYield: + case Runtime::kAsyncGeneratorYield: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kAsyncGeneratorYield)); + break; + } + case Runtime::kInlineAsyncGeneratorAwaitUncaught: + case Runtime::kAsyncGeneratorAwaitUncaught: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kAsyncGeneratorAwaitUncaught)); + break; + } + case Runtime::kInlineAsyncGeneratorAwaitCaught: + case Runtime::kAsyncGeneratorAwaitCaught: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kAsyncGeneratorAwaitCaught)); + break; + } + case Runtime::kInlineAsyncFunctionAwaitUncaught: + case Runtime::kAsyncFunctionAwaitUncaught: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kAsyncFunctionAwaitUncaught)); + break; + } + case Runtime::kInlineAsyncFunctionAwaitCaught: + case Runtime::kAsyncFunctionAwaitCaught: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kAsyncFunctionAwaitCaught)); + break; + } + case Runtime::kInlineAsyncFunctionReject: + case Runtime::kAsyncFunctionReject: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kAsyncFunctionReject)); + break; + } + case Runtime::kAsyncFunctionResolve: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kAsyncFunctionResolve)); + break; + } + case Runtime::kInlineCopyDataProperties: + case Runtime::kCopyDataProperties: { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kCopyDataProperties)); + break; + } + default: { + break; + } } environment()->ClearEphemeralHints(); } void SerializerForBackgroundCompilation::VisitLdaConstant( BytecodeArrayIterator* iterator) { + ObjectRef object( + broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate())); environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().AddConstant( - iterator->GetConstantForIndexOperand(0, broker()->isolate())); + environment()->accumulator_hints().AddConstant(object.object()); } void SerializerForBackgroundCompilation::VisitPushContext( @@ -1088,7 +1227,7 @@ void SerializerForBackgroundCompilation::VisitPushContext( saved_context_hints.Clear(); saved_context_hints.Add(current_context_hints); - // New Context is in the accumulator. Put those hints into the current context + // New context is in the accumulator. Put those hints into the current context // register hints. current_context_hints.Clear(); current_context_hints.Add(environment()->accumulator_hints()); @@ -1104,19 +1243,21 @@ void SerializerForBackgroundCompilation::VisitPopContext( } void SerializerForBackgroundCompilation::ProcessImmutableLoad( - ContextRef& context_ref, int slot, ContextProcessingMode mode) { - DCHECK(mode == kSerializeSlot || mode == kSerializeSlotAndAddToAccumulator); - base::Optional<ObjectRef> slot_value = context_ref.get(slot, true); + ContextRef const& context_ref, int slot, ContextProcessingMode mode, + Hints* result_hints) { + DCHECK_EQ(mode, kSerializeSlot); + base::Optional<ObjectRef> slot_value = + context_ref.get(slot, SerializationPolicy::kSerializeIfNeeded); - // Also, put the object into the constant hints for the accumulator. - if (mode == kSerializeSlotAndAddToAccumulator && slot_value.has_value()) { - environment()->accumulator_hints().AddConstant(slot_value.value().object()); + // If requested, record the object as a hint for the result value. + if (result_hints != nullptr && slot_value.has_value()) { + result_hints->AddConstant(slot_value.value().object()); } } void SerializerForBackgroundCompilation::ProcessContextAccess( - const Hints& context_hints, int slot, int depth, - ContextProcessingMode mode) { + Hints const& context_hints, int slot, int depth, ContextProcessingMode mode, + Hints* result_hints) { // This function is for JSContextSpecialization::ReduceJSLoadContext and // ReduceJSStoreContext. Those reductions attempt to eliminate as many // loads as possible by making use of constant Context objects. In the @@ -1127,9 +1268,10 @@ void SerializerForBackgroundCompilation::ProcessContextAccess( // Walk this context to the given depth and serialize the slot found. ContextRef context_ref(broker(), x); size_t remaining_depth = depth; - context_ref = context_ref.previous(&remaining_depth, true); + context_ref = context_ref.previous( + &remaining_depth, SerializationPolicy::kSerializeIfNeeded); if (remaining_depth == 0 && mode != kIgnoreSlot) { - ProcessImmutableLoad(context_ref, slot, mode); + ProcessImmutableLoad(context_ref, slot, mode, result_hints); } } } @@ -1137,9 +1279,10 @@ void SerializerForBackgroundCompilation::ProcessContextAccess( if (x.distance <= static_cast<unsigned int>(depth)) { ContextRef context_ref(broker(), x.context); size_t remaining_depth = depth - x.distance; - context_ref = context_ref.previous(&remaining_depth, true); + context_ref = context_ref.previous( + &remaining_depth, SerializationPolicy::kSerializeIfNeeded); if (remaining_depth == 0 && mode != kIgnoreSlot) { - ProcessImmutableLoad(context_ref, slot, mode); + ProcessImmutableLoad(context_ref, slot, mode, result_hints); } } } @@ -1147,67 +1290,92 @@ void SerializerForBackgroundCompilation::ProcessContextAccess( void SerializerForBackgroundCompilation::VisitLdaContextSlot( BytecodeArrayIterator* iterator) { - Hints& context_hints = + Hints const& context_hints = environment()->register_hints(iterator->GetRegisterOperand(0)); const int slot = iterator->GetIndexOperand(1); const int depth = iterator->GetUnsignedImmediateOperand(2); + Hints new_accumulator_hints(zone()); + ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot, + &new_accumulator_hints); environment()->accumulator_hints().Clear(); - ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot); + environment()->accumulator_hints().Add(new_accumulator_hints); } void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot( BytecodeArrayIterator* iterator) { const int slot = iterator->GetIndexOperand(0); const int depth = 0; - Hints& context_hints = environment()->current_context_hints(); + Hints const& context_hints = environment()->current_context_hints(); + Hints new_accumulator_hints(zone()); + ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot, + &new_accumulator_hints); environment()->accumulator_hints().Clear(); - ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot); + environment()->accumulator_hints().Add(new_accumulator_hints); } void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot( BytecodeArrayIterator* iterator) { const int slot = iterator->GetIndexOperand(1); const int depth = iterator->GetUnsignedImmediateOperand(2); - Hints& context_hints = + Hints const& context_hints = environment()->register_hints(iterator->GetRegisterOperand(0)); + Hints new_accumulator_hints(zone()); + ProcessContextAccess(context_hints, slot, depth, kSerializeSlot, + &new_accumulator_hints); environment()->accumulator_hints().Clear(); - ProcessContextAccess(context_hints, slot, depth, - kSerializeSlotAndAddToAccumulator); + environment()->accumulator_hints().Add(new_accumulator_hints); } void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot( BytecodeArrayIterator* iterator) { const int slot = iterator->GetIndexOperand(0); const int depth = 0; - Hints& context_hints = environment()->current_context_hints(); + Hints const& context_hints = environment()->current_context_hints(); + Hints new_accumulator_hints(zone()); + ProcessContextAccess(context_hints, slot, depth, kSerializeSlot, + &new_accumulator_hints); environment()->accumulator_hints().Clear(); - ProcessContextAccess(context_hints, slot, depth, - kSerializeSlotAndAddToAccumulator); + environment()->accumulator_hints().Add(new_accumulator_hints); } -void SerializerForBackgroundCompilation::VisitLdaModuleVariable( +void SerializerForBackgroundCompilation::ProcessModuleVariableAccess( BytecodeArrayIterator* iterator) { + const int slot = Context::EXTENSION_INDEX; const int depth = iterator->GetUnsignedImmediateOperand(1); + Hints const& context_hints = environment()->current_context_hints(); - // TODO(mvstanton): If we have a constant module, should we serialize the - // cell as well? Then we could put the value in the accumulator. - environment()->accumulator_hints().Clear(); - ProcessContextAccess(environment()->current_context_hints(), - Context::EXTENSION_INDEX, depth, kSerializeSlot); + Hints result_hints(zone()); + ProcessContextAccess(context_hints, slot, depth, kSerializeSlot, + &result_hints); + for (Handle<Object> constant : result_hints.constants()) { + ObjectRef object(broker(), constant); + // For JSTypedLowering::BuildGetModuleCell. + if (object.IsSourceTextModule()) object.AsSourceTextModule().Serialize(); + } +} + +void SerializerForBackgroundCompilation::VisitLdaModuleVariable( + BytecodeArrayIterator* iterator) { + ProcessModuleVariableAccess(iterator); } void SerializerForBackgroundCompilation::VisitStaModuleVariable( BytecodeArrayIterator* iterator) { - const int depth = iterator->GetUnsignedImmediateOperand(1); - ProcessContextAccess(environment()->current_context_hints(), - Context::EXTENSION_INDEX, depth, kSerializeSlot); + ProcessModuleVariableAccess(iterator); +} + +void SerializerForBackgroundCompilation::VisitStaLookupSlot( + BytecodeArrayIterator* iterator) { + ObjectRef(broker(), + iterator->GetConstantForIndexOperand(0, broker()->isolate())); + environment()->accumulator_hints().Clear(); } void SerializerForBackgroundCompilation::VisitStaContextSlot( BytecodeArrayIterator* iterator) { const int slot = iterator->GetIndexOperand(1); const int depth = iterator->GetUnsignedImmediateOperand(2); - Hints& register_hints = + Hints const& register_hints = environment()->register_hints(iterator->GetRegisterOperand(0)); ProcessContextAccess(register_hints, slot, depth, kIgnoreSlot); } @@ -1216,7 +1384,7 @@ void SerializerForBackgroundCompilation::VisitStaCurrentContextSlot( BytecodeArrayIterator* iterator) { const int slot = iterator->GetIndexOperand(0); const int depth = 0; - Hints& context_hints = environment()->current_context_hints(); + Hints const& context_hints = environment()->current_context_hints(); ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot); } @@ -1242,35 +1410,80 @@ void SerializerForBackgroundCompilation::VisitMov( environment()->register_hints(dst).Add(environment()->register_hints(src)); } +void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral( + BytecodeArrayIterator* iterator) { + Handle<String> constant_pattern = Handle<String>::cast( + iterator->GetConstantForIndexOperand(0, broker()->isolate())); + StringRef description(broker(), constant_pattern); + environment()->accumulator_hints().Clear(); +} + +void SerializerForBackgroundCompilation::VisitCreateArrayLiteral( + BytecodeArrayIterator* iterator) { + Handle<ArrayBoilerplateDescription> array_boilerplate_description = + Handle<ArrayBoilerplateDescription>::cast( + iterator->GetConstantForIndexOperand(0, broker()->isolate())); + ArrayBoilerplateDescriptionRef description(broker(), + array_boilerplate_description); + environment()->accumulator_hints().Clear(); +} + +void SerializerForBackgroundCompilation::VisitCreateObjectLiteral( + BytecodeArrayIterator* iterator) { + Handle<ObjectBoilerplateDescription> constant_properties = + Handle<ObjectBoilerplateDescription>::cast( + iterator->GetConstantForIndexOperand(0, broker()->isolate())); + ObjectBoilerplateDescriptionRef description(broker(), constant_properties); + environment()->accumulator_hints().Clear(); +} + void SerializerForBackgroundCompilation::VisitCreateFunctionContext( BytecodeArrayIterator* iterator) { - ProcessCreateContext(); + ProcessCreateContext(iterator, 0); } void SerializerForBackgroundCompilation::VisitCreateBlockContext( BytecodeArrayIterator* iterator) { - ProcessCreateContext(); + ProcessCreateContext(iterator, 0); } void SerializerForBackgroundCompilation::VisitCreateEvalContext( BytecodeArrayIterator* iterator) { - ProcessCreateContext(); + ProcessCreateContext(iterator, 0); } void SerializerForBackgroundCompilation::VisitCreateWithContext( BytecodeArrayIterator* iterator) { - ProcessCreateContext(); + ProcessCreateContext(iterator, 1); } void SerializerForBackgroundCompilation::VisitCreateCatchContext( BytecodeArrayIterator* iterator) { - ProcessCreateContext(); + ProcessCreateContext(iterator, 1); +} + +void SerializerForBackgroundCompilation::VisitForInNext( + BytecodeArrayIterator* iterator) { + FeedbackSlot slot = iterator->GetSlotOperand(3); + ProcessForIn(slot); +} + +void SerializerForBackgroundCompilation::VisitForInPrepare( + BytecodeArrayIterator* iterator) { + FeedbackSlot slot = iterator->GetSlotOperand(1); + ProcessForIn(slot); } -void SerializerForBackgroundCompilation::ProcessCreateContext() { +void SerializerForBackgroundCompilation::ProcessCreateContext( + interpreter::BytecodeArrayIterator* iterator, int scopeinfo_operand_index) { + Handle<ScopeInfo> scope_info = + Handle<ScopeInfo>::cast(iterator->GetConstantForIndexOperand( + scopeinfo_operand_index, broker()->isolate())); + ScopeInfoRef scope_info_ref(broker(), scope_info); + + Hints const& current_context_hints = environment()->current_context_hints(); Hints& accumulator_hints = environment()->accumulator_hints(); accumulator_hints.Clear(); - Hints& current_context_hints = environment()->current_context_hints(); // For each constant context, we must create a virtual context from // it of distance one. @@ -1291,31 +1504,33 @@ void SerializerForBackgroundCompilation::ProcessCreateContext() { void SerializerForBackgroundCompilation::VisitCreateClosure( BytecodeArrayIterator* iterator) { + environment()->accumulator_hints().Clear(); + Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast( iterator->GetConstantForIndexOperand(0, broker()->isolate())); - Handle<FeedbackCell> feedback_cell = - environment()->function().feedback_vector()->GetClosureFeedbackCell( - iterator->GetIndexOperand(1)); + feedback_vector()->GetClosureFeedbackCell(iterator->GetIndexOperand(1)); FeedbackCellRef feedback_cell_ref(broker(), feedback_cell); Handle<Object> cell_value(feedback_cell->value(), broker()->isolate()); ObjectRef cell_value_ref(broker(), cell_value); - environment()->accumulator_hints().Clear(); if (cell_value->IsFeedbackVector()) { - // Gather the context hints from the current context register hint - // structure. FunctionBlueprint blueprint(shared, Handle<FeedbackVector>::cast(cell_value), environment()->current_context_hints()); - environment()->accumulator_hints().AddFunctionBlueprint(blueprint); } } void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver( BytecodeArrayIterator* iterator) { - ProcessCallVarArgs(iterator, ConvertReceiverMode::kNullOrUndefined); + const Hints& callee = + environment()->register_hints(iterator->GetRegisterOperand(0)); + interpreter::Register first_reg = iterator->GetRegisterOperand(1); + int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); + FeedbackSlot slot = iterator->GetSlotOperand(3); + ProcessCallVarArgs(ConvertReceiverMode::kNullOrUndefined, callee, first_reg, + reg_count, slot); } void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0( @@ -1324,9 +1539,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0( environment()->register_hints(iterator->GetRegisterOperand(0)); FeedbackSlot slot = iterator->GetSlotOperand(1); - Hints receiver(zone()); - receiver.AddConstant(broker()->isolate()->factory()->undefined_value()); - + Hints receiver = Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), zone()); HintsVector parameters({receiver}, zone()); ProcessCallOrConstruct(callee, base::nullopt, parameters, slot); } @@ -1339,9 +1553,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1( environment()->register_hints(iterator->GetRegisterOperand(1)); FeedbackSlot slot = iterator->GetSlotOperand(2); - Hints receiver(zone()); - receiver.AddConstant(broker()->isolate()->factory()->undefined_value()); - + Hints receiver = Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), zone()); HintsVector parameters({receiver, arg0}, zone()); ProcessCallOrConstruct(callee, base::nullopt, parameters, slot); } @@ -1356,21 +1569,42 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2( environment()->register_hints(iterator->GetRegisterOperand(2)); FeedbackSlot slot = iterator->GetSlotOperand(3); - Hints receiver(zone()); - receiver.AddConstant(broker()->isolate()->factory()->undefined_value()); - + Hints receiver = Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), zone()); HintsVector parameters({receiver, arg0, arg1}, zone()); ProcessCallOrConstruct(callee, base::nullopt, parameters, slot); } void SerializerForBackgroundCompilation::VisitCallAnyReceiver( BytecodeArrayIterator* iterator) { - ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny); + const Hints& callee = + environment()->register_hints(iterator->GetRegisterOperand(0)); + interpreter::Register first_reg = iterator->GetRegisterOperand(1); + int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); + FeedbackSlot slot = iterator->GetSlotOperand(3); + ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count, + slot); +} + +void SerializerForBackgroundCompilation::VisitCallNoFeedback( + BytecodeArrayIterator* iterator) { + const Hints& callee = + environment()->register_hints(iterator->GetRegisterOperand(0)); + interpreter::Register first_reg = iterator->GetRegisterOperand(1); + int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); + ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count, + FeedbackSlot::Invalid()); } void SerializerForBackgroundCompilation::VisitCallProperty( BytecodeArrayIterator* iterator) { - ProcessCallVarArgs(iterator, ConvertReceiverMode::kNullOrUndefined); + const Hints& callee = + environment()->register_hints(iterator->GetRegisterOperand(0)); + interpreter::Register first_reg = iterator->GetRegisterOperand(1); + int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); + FeedbackSlot slot = iterator->GetSlotOperand(3); + ProcessCallVarArgs(ConvertReceiverMode::kNotNullOrUndefined, callee, + first_reg, reg_count, slot); } void SerializerForBackgroundCompilation::VisitCallProperty0( @@ -1417,17 +1651,28 @@ void SerializerForBackgroundCompilation::VisitCallProperty2( void SerializerForBackgroundCompilation::VisitCallWithSpread( BytecodeArrayIterator* iterator) { - ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny, true); + const Hints& callee = + environment()->register_hints(iterator->GetRegisterOperand(0)); + interpreter::Register first_reg = iterator->GetRegisterOperand(1); + int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); + FeedbackSlot slot = iterator->GetSlotOperand(3); + ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count, + slot, true); } void SerializerForBackgroundCompilation::VisitCallJSRuntime( BytecodeArrayIterator* iterator) { - environment()->accumulator_hints().Clear(); - - // BytecodeGraphBuilder::VisitCallJSRuntime needs the {runtime_index} - // slot in the native context to be serialized. const int runtime_index = iterator->GetNativeContextIndexOperand(0); - broker()->native_context().get(runtime_index, true); + ObjectRef constant = + broker() + ->target_native_context() + .get(runtime_index, SerializationPolicy::kSerializeIfNeeded) + .value(); + Hints callee = Hints::SingleConstant(constant.object(), zone()); + interpreter::Register first_reg = iterator->GetRegisterOperand(1); + int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); + ProcessCallVarArgs(ConvertReceiverMode::kNullOrUndefined, callee, first_reg, + reg_count, FeedbackSlot::Invalid()); } Hints SerializerForBackgroundCompilation::RunChildSerializer( @@ -1456,107 +1701,168 @@ Hints SerializerForBackgroundCompilation::RunChildSerializer( return child_serializer.Run(); } +bool SerializerForBackgroundCompilation::ProcessSFIForCallOrConstruct( + Handle<SharedFunctionInfo> shared, const HintsVector& arguments, + SpeculationMode speculation_mode) { + if (shared->IsApiFunction()) { + ProcessApiCall(shared, arguments); + DCHECK(!shared->IsInlineable()); + } else if (shared->HasBuiltinId()) { + ProcessBuiltinCall(shared, arguments, speculation_mode); + DCHECK(!shared->IsInlineable()); + } + return shared->IsInlineable(); +} + +bool SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct( + Handle<JSFunction> function, const HintsVector& arguments, + SpeculationMode speculation_mode) { + JSFunctionRef(broker(), function).Serialize(); + + Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate()); + + return ProcessSFIForCallOrConstruct(shared, arguments, speculation_mode) && + function->has_feedback_vector(); +} + namespace { -base::Optional<HeapObjectRef> GetHeapObjectFeedback( - JSHeapBroker* broker, Handle<FeedbackVector> feedback_vector, - FeedbackSlot slot) { - if (slot.IsInvalid()) return base::nullopt; - FeedbackNexus nexus(feedback_vector, slot); - VectorSlotPair feedback(feedback_vector, slot, nexus.ic_state()); - DCHECK(feedback.IsValid()); - if (nexus.IsUninitialized()) return base::nullopt; - HeapObject object; - if (!nexus.GetFeedback()->GetHeapObject(&object)) return base::nullopt; - return HeapObjectRef(broker, handle(object, broker->isolate())); +// Returns the innermost bound target, if it's a JSFunction and inserts +// all bound arguments and {original_arguments} into {expanded_arguments} +// in the appropriate order. +MaybeHandle<JSFunction> UnrollBoundFunction( + JSBoundFunctionRef const& bound_function, JSHeapBroker* broker, + const HintsVector& original_arguments, HintsVector* expanded_arguments) { + DCHECK(expanded_arguments->empty()); + + JSReceiverRef target = bound_function.AsJSReceiver(); + HintsVector reversed_bound_arguments(broker->zone()); + for (; target.IsJSBoundFunction(); + target = target.AsJSBoundFunction().bound_target_function()) { + for (int i = target.AsJSBoundFunction().bound_arguments().length() - 1; + i >= 0; --i) { + Hints arg = Hints::SingleConstant( + target.AsJSBoundFunction().bound_arguments().get(i).object(), + broker->zone()); + reversed_bound_arguments.push_back(arg); + } + Hints arg = Hints::SingleConstant( + target.AsJSBoundFunction().bound_this().object(), broker->zone()); + reversed_bound_arguments.push_back(arg); + } + + if (!target.IsJSFunction()) return MaybeHandle<JSFunction>(); + + expanded_arguments->insert(expanded_arguments->end(), + reversed_bound_arguments.rbegin(), + reversed_bound_arguments.rend()); + expanded_arguments->insert(expanded_arguments->end(), + original_arguments.begin(), + original_arguments.end()); + + return target.AsJSFunction().object(); } } // namespace void SerializerForBackgroundCompilation::ProcessCallOrConstruct( Hints callee, base::Optional<Hints> new_target, const HintsVector& arguments, FeedbackSlot slot, bool with_spread) { - // TODO(neis): Make this part of ProcessFeedback*? - if (BailoutOnUninitialized(slot)) return; - - // Incorporate feedback into hints. - base::Optional<HeapObjectRef> feedback = GetHeapObjectFeedback( - broker(), environment()->function().feedback_vector(), slot); - if (feedback.has_value() && feedback->map().is_callable()) { - if (new_target.has_value()) { - // Construct; feedback is new_target, which often is also the callee. - new_target->AddConstant(feedback->object()); - callee.AddConstant(feedback->object()); - } else { - // Call; feedback is callee. - callee.AddConstant(feedback->object()); + SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation; + if (!slot.IsInvalid()) { + FeedbackSource source(feedback_vector(), slot); + ProcessedFeedback const& feedback = + broker()->ProcessFeedbackForCall(source); + if (BailoutOnUninitialized(feedback)) return; + + // Incorporate feedback into hints copy to simplify processing. + if (!feedback.IsInsufficient()) { + speculation_mode = feedback.AsCall().speculation_mode(); + base::Optional<HeapObjectRef> target = feedback.AsCall().target(); + if (target.has_value() && target->map().is_callable()) { + // TODO(mvstanton): if the map isn't callable then we have an allocation + // site, and it may make sense to add the Array JSFunction constant. + if (new_target.has_value()) { + // Construct; feedback is new_target, which often is also the callee. + new_target->AddConstant(target->object()); + callee.AddConstant(target->object()); + } else { + // Call; target is callee. + callee.AddConstant(target->object()); + } + } } } environment()->accumulator_hints().Clear(); + // For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct. for (auto hint : callee.constants()) { - if (!hint->IsJSFunction()) continue; - - Handle<JSFunction> function = Handle<JSFunction>::cast(hint); - JSFunctionRef(broker(), function).Serialize(); - - Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate()); - - if (shared->IsApiFunction()) { - ProcessApiCall(shared, arguments); - DCHECK(!shared->IsInlineable()); - } else if (shared->HasBuiltinId()) { - ProcessBuiltinCall(shared, arguments); - DCHECK(!shared->IsInlineable()); + const HintsVector* actual_arguments = &arguments; + Handle<JSFunction> function; + HintsVector expanded_arguments(zone()); + if (hint->IsJSBoundFunction()) { + JSBoundFunctionRef bound_function(broker(), + Handle<JSBoundFunction>::cast(hint)); + bound_function.Serialize(); + + MaybeHandle<JSFunction> maybe_function = UnrollBoundFunction( + bound_function, broker(), arguments, &expanded_arguments); + if (maybe_function.is_null()) continue; + function = maybe_function.ToHandleChecked(); + actual_arguments = &expanded_arguments; + } else if (hint->IsJSFunction()) { + function = Handle<JSFunction>::cast(hint); + } else { + continue; } - if (!shared->IsInlineable() || !function->has_feedback_vector()) continue; - - environment()->accumulator_hints().Add(RunChildSerializer( - CompilationSubject(function, broker()->isolate(), zone()), new_target, - arguments, with_spread)); + if (ProcessCalleeForCallOrConstruct(function, *actual_arguments, + speculation_mode)) { + environment()->accumulator_hints().Add(RunChildSerializer( + CompilationSubject(function, broker()->isolate(), zone()), new_target, + *actual_arguments, with_spread)); + } } + // For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct. for (auto hint : callee.function_blueprints()) { Handle<SharedFunctionInfo> shared = hint.shared(); - - if (shared->IsApiFunction()) { - ProcessApiCall(shared, arguments); - DCHECK(!shared->IsInlineable()); - } else if (shared->HasBuiltinId()) { - ProcessBuiltinCall(shared, arguments); - DCHECK(!shared->IsInlineable()); + if (!ProcessSFIForCallOrConstruct(shared, arguments, speculation_mode)) { + continue; } - if (!shared->IsInlineable()) continue; environment()->accumulator_hints().Add(RunChildSerializer( CompilationSubject(hint), new_target, arguments, with_spread)); } } void SerializerForBackgroundCompilation::ProcessCallVarArgs( - BytecodeArrayIterator* iterator, ConvertReceiverMode receiver_mode, + ConvertReceiverMode receiver_mode, Hints const& callee, + interpreter::Register first_reg, int reg_count, FeedbackSlot slot, bool with_spread) { - const Hints& callee = - environment()->register_hints(iterator->GetRegisterOperand(0)); - interpreter::Register first_reg = iterator->GetRegisterOperand(1); - int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); - FeedbackSlot slot = iterator->GetSlotOperand(3); - HintsVector arguments(zone()); // The receiver is either given in the first register or it is implicitly // the {undefined} value. if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - Hints receiver(zone()); - receiver.AddConstant(broker()->isolate()->factory()->undefined_value()); - arguments.push_back(receiver); + arguments.push_back(Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), zone())); } - environment()->ExportRegisterHints(first_reg, reg_count, arguments); + environment()->ExportRegisterHints(first_reg, reg_count, &arguments); ProcessCallOrConstruct(callee, base::nullopt, arguments, slot); } void SerializerForBackgroundCompilation::ProcessApiCall( Handle<SharedFunctionInfo> target, const HintsVector& arguments) { + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kCallFunctionTemplate_CheckAccess)); + ObjectRef(broker(), + broker()->isolate()->builtins()->builtin_handle( + Builtins::kCallFunctionTemplate_CheckCompatibleReceiver)); + ObjectRef( + broker(), + broker()->isolate()->builtins()->builtin_handle( + Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver)); + FunctionTemplateInfoRef target_template_info( broker(), handle(target->function_data(), broker()->isolate())); if (!target_template_info.has_call_code()) return; @@ -1576,7 +1882,7 @@ void SerializerForBackgroundCompilation::ProcessApiCall( if (hint->IsUndefined()) { // The receiver is the global proxy. Handle<JSGlobalProxy> global_proxy = - broker()->native_context().global_proxy_object().object(); + broker()->target_native_context().global_proxy_object().object(); ProcessReceiverMapForApiCall( target_template_info, handle(global_proxy->map(), broker()->isolate())); @@ -1596,40 +1902,62 @@ void SerializerForBackgroundCompilation::ProcessApiCall( } void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall( - FunctionTemplateInfoRef& target, Handle<Map> receiver) { - if (receiver->is_access_check_needed()) { - return; + FunctionTemplateInfoRef target, Handle<Map> receiver) { + if (!receiver->is_access_check_needed()) { + MapRef receiver_map(broker(), receiver); + TRACE_BROKER(broker(), "Serializing holder for target:" << target); + target.LookupHolderOfExpectedType(receiver_map, + SerializationPolicy::kSerializeIfNeeded); } +} - MapRef receiver_map(broker(), receiver); - TRACE_BROKER(broker(), "Serializing holder for target:" << target); - - target.LookupHolderOfExpectedType(receiver_map, true); +void SerializerForBackgroundCompilation::ProcessHintsForObjectCreate( + Hints const& prototype) { + for (Handle<Object> constant_handle : prototype.constants()) { + ObjectRef constant(broker(), constant_handle); + if (constant.IsJSObject()) constant.AsJSObject().SerializeObjectCreateMap(); + } } void SerializerForBackgroundCompilation::ProcessBuiltinCall( - Handle<SharedFunctionInfo> target, const HintsVector& arguments) { + Handle<SharedFunctionInfo> target, const HintsVector& arguments, + SpeculationMode speculation_mode) { DCHECK(target->HasBuiltinId()); const int builtin_id = target->builtin_id(); const char* name = Builtins::name(builtin_id); TRACE_BROKER(broker(), "Serializing for call to builtin " << name); switch (builtin_id) { + case Builtins::kObjectCreate: { + if (arguments.size() >= 2) { + ProcessHintsForObjectCreate(arguments[1]); + } else { + ProcessHintsForObjectCreate(Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), zone())); + } + break; + } case Builtins::kPromisePrototypeCatch: { // For JSCallReducer::ReducePromisePrototypeCatch. - CHECK_GE(arguments.size(), 1); - ProcessMapHintsForPromises(arguments[0]); + if (speculation_mode != SpeculationMode::kDisallowSpeculation) { + CHECK_GE(arguments.size(), 1); + ProcessMapHintsForPromises(arguments[0]); + } break; } case Builtins::kPromisePrototypeFinally: { // For JSCallReducer::ReducePromisePrototypeFinally. - CHECK_GE(arguments.size(), 1); - ProcessMapHintsForPromises(arguments[0]); + if (speculation_mode != SpeculationMode::kDisallowSpeculation) { + CHECK_GE(arguments.size(), 1); + ProcessMapHintsForPromises(arguments[0]); + } break; } case Builtins::kPromisePrototypeThen: { // For JSCallReducer::ReducePromisePrototypeThen. - CHECK_GE(arguments.size(), 1); - ProcessMapHintsForPromises(arguments[0]); + if (speculation_mode != SpeculationMode::kDisallowSpeculation) { + CHECK_GE(arguments.size(), 1); + ProcessMapHintsForPromises(arguments[0]); + } break; } case Builtins::kPromiseResolveTrampoline: @@ -1648,30 +1976,142 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall( ProcessHintsForPromiseResolve(resolution_hints); } break; - case Builtins::kRegExpPrototypeTest: { + case Builtins::kRegExpPrototypeTest: // For JSCallReducer::ReduceRegExpPrototypeTest. - if (arguments.size() >= 1) { + if (arguments.size() >= 1 && + speculation_mode != SpeculationMode::kDisallowSpeculation) { Hints const& regexp_hints = arguments[0]; ProcessHintsForRegExpTest(regexp_hints); } break; - } + case Builtins::kArrayEvery: + case Builtins::kArrayFilter: + case Builtins::kArrayForEach: + case Builtins::kArrayPrototypeFind: + case Builtins::kArrayPrototypeFindIndex: + case Builtins::kArrayMap: + case Builtins::kArrayReduce: + case Builtins::kArrayReduceRight: + case Builtins::kArraySome: + if (arguments.size() >= 2 && + speculation_mode != SpeculationMode::kDisallowSpeculation) { + Hints const& callback_hints = arguments[1]; + ProcessHintsForFunctionCall(callback_hints); + } + break; + case Builtins::kFunctionPrototypeApply: case Builtins::kFunctionPrototypeCall: + case Builtins::kPromiseConstructor: + // TODO(mslekova): Since the reducer for all these introduce a + // JSCall/JSConstruct that will again get optimized by the JSCallReducer, + // we basically might have to do all the serialization that we do for that + // here as well. The only difference is that the new JSCall/JSConstruct + // has speculation disabled, causing the JSCallReducer to do much less + // work. To account for that, ProcessCallOrConstruct should have a way of + // taking the speculation mode as an argument rather than getting that + // from the feedback. (Also applies to Reflect.apply and + // Reflect.construct.) if (arguments.size() >= 1) { - Hints const& target_hints = arguments[0]; - ProcessHintsForFunctionCall(target_hints); + ProcessHintsForFunctionCall(arguments[0]); + } + break; + case Builtins::kReflectApply: + case Builtins::kReflectConstruct: + if (arguments.size() >= 2) { + ProcessHintsForFunctionCall(arguments[1]); + } + break; + case Builtins::kObjectPrototypeIsPrototypeOf: + if (arguments.size() >= 2) { + ProcessHintsForHasInPrototypeChain(arguments[1]); } break; + case Builtins::kFunctionPrototypeHasInstance: + // For JSCallReducer::ReduceFunctionPrototypeHasInstance. + if (arguments.size() >= 2) { + ProcessHintsForOrdinaryHasInstance(arguments[0], arguments[1]); + } + break; + case Builtins::kFastFunctionPrototypeBind: + if (arguments.size() >= 1 && + speculation_mode != SpeculationMode::kDisallowSpeculation) { + ProcessHintsForFunctionBind(arguments[0]); + } + break; + case Builtins::kObjectGetPrototypeOf: + case Builtins::kReflectGetPrototypeOf: + if (arguments.size() >= 2) { + ProcessHintsForObjectGetPrototype(arguments[1]); + } else { + Hints undefined_hint = Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), zone()); + ProcessHintsForObjectGetPrototype(undefined_hint); + } + break; + case Builtins::kObjectPrototypeGetProto: + if (arguments.size() >= 1) { + ProcessHintsForObjectGetPrototype(arguments[0]); + } + break; + case Builtins::kMapIteratorPrototypeNext: + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kOrderedHashTableHealIndex)); + ObjectRef(broker(), + broker()->isolate()->factory()->empty_ordered_hash_map()); + break; + case Builtins::kSetIteratorPrototypeNext: + ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle( + Builtins::kOrderedHashTableHealIndex)); + ObjectRef(broker(), + broker()->isolate()->factory()->empty_ordered_hash_set()); + break; default: break; } } +void SerializerForBackgroundCompilation::ProcessHintsForOrdinaryHasInstance( + Hints const& constructor_hints, Hints const& instance_hints) { + bool walk_prototypes = false; + for (Handle<Object> constructor : constructor_hints.constants()) { + // For JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance. + if (constructor->IsHeapObject()) { + ProcessConstantForOrdinaryHasInstance( + HeapObjectRef(broker(), constructor), &walk_prototypes); + } + } + // For JSNativeContextSpecialization::ReduceJSHasInPrototypeChain. + if (walk_prototypes) ProcessHintsForHasInPrototypeChain(instance_hints); +} + +void SerializerForBackgroundCompilation::ProcessHintsForHasInPrototypeChain( + Hints const& instance_hints) { + auto processMap = [&](Handle<Map> map_handle) { + MapRef map(broker(), map_handle); + while (map.IsJSObjectMap()) { + map.SerializePrototype(); + map = map.prototype().map(); + } + }; + + for (auto hint : instance_hints.constants()) { + if (!hint->IsHeapObject()) continue; + Handle<HeapObject> object(Handle<HeapObject>::cast(hint)); + processMap(handle(object->map(), broker()->isolate())); + } + for (auto map_hint : instance_hints.maps()) { + processMap(map_hint); + } +} + void SerializerForBackgroundCompilation::ProcessHintsForPromiseResolve( Hints const& resolution_hints) { auto processMap = [&](Handle<Map> map) { - broker()->CreateAccessInfoForLoadingThen(MapRef(broker(), map), - dependencies()); + broker()->GetPropertyAccessInfo( + MapRef(broker(), map), + NameRef(broker(), broker()->isolate()->factory()->then_string()), + AccessMode::kLoad, dependencies(), + SerializationPolicy::kSerializeIfNeeded); }; for (auto hint : resolution_hints.constants()) { @@ -1701,15 +2141,18 @@ void SerializerForBackgroundCompilation::ProcessMapHintsForPromises( PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest( MapRef map) { - PropertyAccessInfo ai_exec = - broker()->CreateAccessInfoForLoadingExec(map, dependencies()); + PropertyAccessInfo ai_exec = broker()->GetPropertyAccessInfo( + map, NameRef(broker(), broker()->isolate()->factory()->exec_string()), + AccessMode::kLoad, dependencies(), + SerializationPolicy::kSerializeIfNeeded); Handle<JSObject> holder; if (ai_exec.IsDataConstant() && ai_exec.holder().ToHandle(&holder)) { // The property is on the prototype chain. JSObjectRef holder_ref(broker(), holder); - holder_ref.GetOwnProperty(ai_exec.field_representation(), - ai_exec.field_index(), true); + holder_ref.GetOwnDataProperty(ai_exec.field_representation(), + ai_exec.field_index(), + SerializationPolicy::kSerializeIfNeeded); } return ai_exec; } @@ -1726,8 +2169,9 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest( if (ai_exec.IsDataConstant() && !ai_exec.holder().ToHandle(&holder)) { // The property is on the object itself. JSObjectRef holder_ref(broker(), regexp); - holder_ref.GetOwnProperty(ai_exec.field_representation(), - ai_exec.field_index(), true); + holder_ref.GetOwnDataProperty(ai_exec.field_representation(), + ai_exec.field_index(), + SerializationPolicy::kSerializeIfNeeded); } } @@ -1740,9 +2184,50 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest( void SerializerForBackgroundCompilation::ProcessHintsForFunctionCall( Hints const& target_hints) { for (auto constant : target_hints.constants()) { + if (constant->IsJSFunction()) JSFunctionRef(broker(), constant).Serialize(); + } +} + +namespace { +void ProcessMapForFunctionBind(MapRef map) { + map.SerializePrototype(); + int min_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex, + JSFunction::kNameDescriptorIndex) + + 1; + if (map.NumberOfOwnDescriptors() >= min_nof_descriptors) { + map.SerializeOwnDescriptor(JSFunction::kLengthDescriptorIndex); + map.SerializeOwnDescriptor(JSFunction::kNameDescriptorIndex); + } +} +} // namespace + +void SerializerForBackgroundCompilation::ProcessHintsForFunctionBind( + Hints const& receiver_hints) { + for (auto constant : receiver_hints.constants()) { if (!constant->IsJSFunction()) continue; - JSFunctionRef func(broker(), constant); - func.Serialize(); + JSFunctionRef function(broker(), constant); + function.Serialize(); + ProcessMapForFunctionBind(function.map()); + } + + for (auto map : receiver_hints.maps()) { + if (!map->IsJSFunctionMap()) continue; + MapRef map_ref(broker(), map); + ProcessMapForFunctionBind(map_ref); + } +} + +void SerializerForBackgroundCompilation::ProcessHintsForObjectGetPrototype( + Hints const& object_hints) { + for (auto constant : object_hints.constants()) { + if (!constant->IsHeapObject()) continue; + HeapObjectRef object(broker(), constant); + object.map().SerializePrototype(); + } + + for (auto map : object_hints.maps()) { + MapRef map_ref(broker(), map); + map_ref.SerializePrototype(); } } @@ -1791,79 +2276,77 @@ void SerializerForBackgroundCompilation::VisitSwitchOnSmiNoFeedback( void SerializerForBackgroundCompilation::VisitSwitchOnGeneratorState( interpreter::BytecodeArrayIterator* iterator) { - for (const auto& target : GetBytecodeAnalysis(false).resume_jump_targets()) { + for (const auto& target : GetBytecodeAnalysis().resume_jump_targets()) { ContributeToJumpTargetEnvironment(target.target_offset()); } } void SerializerForBackgroundCompilation::Environment::ExportRegisterHints( - interpreter::Register first, size_t count, HintsVector& dst) { + interpreter::Register first, size_t count, HintsVector* dst) { const int reg_base = first.index(); for (int i = 0; i < static_cast<int>(count); ++i) { - dst.push_back(register_hints(interpreter::Register(reg_base + i))); + dst->push_back(register_hints(interpreter::Register(reg_base + i))); } } void SerializerForBackgroundCompilation::VisitConstruct( BytecodeArrayIterator* iterator) { - const Hints& callee = + Hints const& new_target = environment()->accumulator_hints(); + Hints const& callee = environment()->register_hints(iterator->GetRegisterOperand(0)); interpreter::Register first_reg = iterator->GetRegisterOperand(1); size_t reg_count = iterator->GetRegisterCountOperand(2); FeedbackSlot slot = iterator->GetSlotOperand(3); - const Hints& new_target = environment()->accumulator_hints(); HintsVector arguments(zone()); - environment()->ExportRegisterHints(first_reg, reg_count, arguments); + environment()->ExportRegisterHints(first_reg, reg_count, &arguments); ProcessCallOrConstruct(callee, new_target, arguments, slot); } void SerializerForBackgroundCompilation::VisitConstructWithSpread( BytecodeArrayIterator* iterator) { - const Hints& callee = + Hints const& new_target = environment()->accumulator_hints(); + Hints const& callee = environment()->register_hints(iterator->GetRegisterOperand(0)); interpreter::Register first_reg = iterator->GetRegisterOperand(1); size_t reg_count = iterator->GetRegisterCountOperand(2); FeedbackSlot slot = iterator->GetSlotOperand(3); - const Hints& new_target = environment()->accumulator_hints(); HintsVector arguments(zone()); - environment()->ExportRegisterHints(first_reg, reg_count, arguments); + environment()->ExportRegisterHints(first_reg, reg_count, &arguments); ProcessCallOrConstruct(callee, new_target, arguments, slot, true); } -GlobalAccessFeedback const* -SerializerForBackgroundCompilation::ProcessFeedbackForGlobalAccess( - FeedbackSlot slot) { - if (slot.IsInvalid()) return nullptr; - if (environment()->function().feedback_vector().is_null()) return nullptr; - FeedbackSource source(environment()->function().feedback_vector(), slot); +void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot, + bool is_load) { + if (slot.IsInvalid() || feedback_vector().is_null()) return; + FeedbackSource source(feedback_vector(), slot); + ProcessedFeedback const& feedback = + broker()->ProcessFeedbackForGlobalAccess(source); - if (broker()->HasFeedback(source)) { - return broker()->GetGlobalAccessFeedback(source); + if (is_load) { + environment()->accumulator_hints().Clear(); + if (feedback.kind() == ProcessedFeedback::kGlobalAccess) { + // We may be able to contribute to accumulator constant hints. + base::Optional<ObjectRef> value = + feedback.AsGlobalAccess().GetConstantHint(); + if (value.has_value()) { + environment()->accumulator_hints().AddConstant(value->object()); + } + } else { + DCHECK(feedback.IsInsufficient()); + } } - - const GlobalAccessFeedback* feedback = - broker()->ProcessFeedbackForGlobalAccess(source); - broker()->SetFeedback(source, feedback); - return feedback; } void SerializerForBackgroundCompilation::VisitLdaGlobal( BytecodeArrayIterator* iterator) { + NameRef(broker(), + iterator->GetConstantForIndexOperand(0, broker()->isolate())); FeedbackSlot slot = iterator->GetSlotOperand(1); - - environment()->accumulator_hints().Clear(); - GlobalAccessFeedback const* feedback = ProcessFeedbackForGlobalAccess(slot); - if (feedback != nullptr) { - // We may be able to contribute to accumulator constant hints. - base::Optional<ObjectRef> value = feedback->GetConstantHint(); - if (value.has_value()) { - environment()->accumulator_hints().AddConstant(value->object()); - } - } + ProcessGlobalAccess(slot, true); } void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof( @@ -1871,6 +2354,20 @@ void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof( VisitLdaGlobal(iterator); } +void SerializerForBackgroundCompilation::VisitLdaLookupSlot( + BytecodeArrayIterator* iterator) { + ObjectRef(broker(), + iterator->GetConstantForIndexOperand(0, broker()->isolate())); + environment()->accumulator_hints().Clear(); +} + +void SerializerForBackgroundCompilation::VisitLdaLookupSlotInsideTypeof( + BytecodeArrayIterator* iterator) { + ObjectRef(broker(), + iterator->GetConstantForIndexOperand(0, broker()->isolate())); + environment()->accumulator_hints().Clear(); +} + void SerializerForBackgroundCompilation::ProcessCheckContextExtensions( int depth) { // for BytecodeGraphBuilder::CheckContextExtensions. @@ -1900,18 +2397,22 @@ void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlotInsideTypeof( void SerializerForBackgroundCompilation::VisitStaGlobal( BytecodeArrayIterator* iterator) { + NameRef(broker(), + iterator->GetConstantForIndexOperand(0, broker()->isolate())); FeedbackSlot slot = iterator->GetSlotOperand(1); - ProcessFeedbackForGlobalAccess(slot); + ProcessGlobalAccess(slot, false); } void SerializerForBackgroundCompilation::ProcessLdaLookupContextSlot( BytecodeArrayIterator* iterator) { const int slot_index = iterator->GetIndexOperand(1); const int depth = iterator->GetUnsignedImmediateOperand(2); + NameRef(broker(), + iterator->GetConstantForIndexOperand(0, broker()->isolate())); ProcessCheckContextExtensions(depth); - Hints& context_hints = environment()->current_context_hints(); environment()->accumulator_hints().Clear(); - ProcessContextAccess(context_hints, slot_index, depth, kIgnoreSlot); + ProcessContextAccess(environment()->current_context_hints(), slot_index, + depth, kIgnoreSlot); } void SerializerForBackgroundCompilation::VisitLdaLookupContextSlot( @@ -1924,6 +2425,7 @@ void SerializerForBackgroundCompilation::VisitLdaLookupContextSlotInsideTypeof( ProcessLdaLookupContextSlot(iterator); } +// TODO(neis): Avoid duplicating this. namespace { template <class MapContainer> MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) { @@ -1939,220 +2441,334 @@ MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) { } } // namespace -ElementAccessFeedback const* -SerializerForBackgroundCompilation::ProcessFeedbackMapsForElementAccess( - const MapHandles& maps, AccessMode mode, - KeyedAccessMode const& keyed_mode) { - ElementAccessFeedback const* result = - broker()->ProcessFeedbackMapsForElementAccess(maps, keyed_mode); - for (ElementAccessFeedback::MapIterator it = result->all_maps(broker()); - !it.done(); it.advance()) { - switch (mode) { - case AccessMode::kHas: - case AccessMode::kLoad: - it.current().SerializeForElementLoad(); - break; - case AccessMode::kStore: - it.current().SerializeForElementStore(); - break; - case AccessMode::kStoreInLiteral: - // This operation is fairly local and simple, nothing to serialize. - break; - } +void SerializerForBackgroundCompilation::ProcessCompareOperation( + FeedbackSlot slot) { + if (slot.IsInvalid() || feedback_vector().is_null()) return; + FeedbackSource source(environment()->function().feedback_vector(), slot); + ProcessedFeedback const& feedback = + broker()->ProcessFeedbackForCompareOperation(source); + if (BailoutOnUninitialized(feedback)) return; + environment()->accumulator_hints().Clear(); +} + +void SerializerForBackgroundCompilation::ProcessForIn(FeedbackSlot slot) { + if (slot.IsInvalid() || feedback_vector().is_null()) return; + FeedbackSource source(feedback_vector(), slot); + ProcessedFeedback const& feedback = broker()->ProcessFeedbackForForIn(source); + if (BailoutOnUninitialized(feedback)) return; + environment()->accumulator_hints().Clear(); +} + +void SerializerForBackgroundCompilation::ProcessUnaryOrBinaryOperation( + FeedbackSlot slot, bool honor_bailout_on_uninitialized) { + if (slot.IsInvalid() || feedback_vector().is_null()) return; + FeedbackSource source(feedback_vector(), slot); + // Internally V8 uses binary op feedback also for unary ops. + ProcessedFeedback const& feedback = + broker()->ProcessFeedbackForBinaryOperation(source); + if (honor_bailout_on_uninitialized && BailoutOnUninitialized(feedback)) { + return; } - return result; + environment()->accumulator_hints().Clear(); } -NamedAccessFeedback const* -SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess( - const MapHandles& maps, AccessMode mode, NameRef const& name) { - ZoneVector<PropertyAccessInfo> access_infos(broker()->zone()); - for (Handle<Map> map : maps) { - MapRef map_ref(broker(), map); - ProcessMapForNamedPropertyAccess(map_ref, name); - AccessInfoFactory access_info_factory(broker(), dependencies(), - broker()->zone()); - PropertyAccessInfo info(access_info_factory.ComputePropertyAccessInfo( - map, name.object(), mode)); - access_infos.push_back(info); - - // TODO(turbofan): We want to take receiver hints into account as well, - // not only the feedback maps. - // For JSNativeContextSpecialization::InlinePropertySetterCall - // and InlinePropertyGetterCall. - if (info.IsAccessorConstant() && !info.constant().is_null()) { - if (info.constant()->IsJSFunction()) { - // For JSCallReducer::ReduceCallApiFunction. - Handle<SharedFunctionInfo> sfi( - handle(Handle<JSFunction>::cast(info.constant())->shared(), - broker()->isolate())); - if (sfi->IsApiFunction()) { - FunctionTemplateInfoRef fti_ref( - broker(), handle(sfi->get_api_func_data(), broker()->isolate())); - if (fti_ref.has_call_code()) fti_ref.SerializeCallCode(); - ProcessReceiverMapForApiCall(fti_ref, map); - } - } else { +PropertyAccessInfo +SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess( + MapRef receiver_map, NameRef const& name, AccessMode access_mode, + base::Optional<JSObjectRef> receiver, Hints* new_accumulator_hints) { + // For JSNativeContextSpecialization::InferReceiverRootMap + receiver_map.SerializeRootMap(); + + // For JSNativeContextSpecialization::ReduceNamedAccess. + if (receiver_map.IsMapOfTargetGlobalProxy()) { + broker()->target_native_context().global_proxy_object().GetPropertyCell( + name, SerializationPolicy::kSerializeIfNeeded); + } + + PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( + receiver_map, name, access_mode, dependencies(), + SerializationPolicy::kSerializeIfNeeded); + + // For JSNativeContextSpecialization::InlinePropertySetterCall + // and InlinePropertyGetterCall. + if (access_info.IsAccessorConstant() && !access_info.constant().is_null()) { + if (access_info.constant()->IsJSFunction()) { + JSFunctionRef function(broker(), access_info.constant()); + + // For JSCallReducer::ReduceJSCall. + function.Serialize(); + + // For JSCallReducer::ReduceCallApiFunction. + Handle<SharedFunctionInfo> sfi = function.shared().object(); + if (sfi->IsApiFunction()) { FunctionTemplateInfoRef fti_ref( - broker(), Handle<FunctionTemplateInfo>::cast(info.constant())); + broker(), handle(sfi->get_api_func_data(), broker()->isolate())); if (fti_ref.has_call_code()) fti_ref.SerializeCallCode(); + ProcessReceiverMapForApiCall(fti_ref, receiver_map.object()); } + } else if (access_info.constant()->IsJSBoundFunction()) { + JSBoundFunctionRef function(broker(), access_info.constant()); + + // For JSCallReducer::ReduceJSCall. + function.Serialize(); + } else { + FunctionTemplateInfoRef fti(broker(), access_info.constant()); + if (fti.has_call_code()) fti.SerializeCallCode(); } } - DCHECK(!access_infos.empty()); - return new (broker()->zone()) NamedAccessFeedback(name, access_infos); -} + // For PropertyAccessBuilder::TryBuildLoadConstantDataField + if (access_mode == AccessMode::kLoad) { + if (access_info.IsDataConstant()) { + base::Optional<JSObjectRef> holder; + Handle<JSObject> prototype; + if (access_info.holder().ToHandle(&prototype)) { + holder = JSObjectRef(broker(), prototype); + } else { + CHECK_IMPLIES(receiver.has_value(), + receiver->map().equals(receiver_map)); + holder = receiver; + } -void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess( - FeedbackSlot slot, AccessMode mode, base::Optional<NameRef> static_name) { - if (slot.IsInvalid()) return; - if (environment()->function().feedback_vector().is_null()) return; + if (holder.has_value()) { + base::Optional<ObjectRef> constant(holder->GetOwnDataProperty( + access_info.field_representation(), access_info.field_index(), + SerializationPolicy::kSerializeIfNeeded)); + if (constant.has_value()) { + new_accumulator_hints->AddConstant(constant->object()); + } + } + } + } + + return access_info; +} - FeedbackNexus nexus(environment()->function().feedback_vector(), slot); - FeedbackSource source(nexus); - if (broker()->HasFeedback(source)) return; +void SerializerForBackgroundCompilation::VisitLdaKeyedProperty( + BytecodeArrayIterator* iterator) { + Hints const& key = environment()->accumulator_hints(); + Hints const& receiver = + environment()->register_hints(iterator->GetRegisterOperand(0)); + FeedbackSlot slot = iterator->GetSlotOperand(1); + ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kLoad, true); +} - if (nexus.ic_state() == UNINITIALIZED) { - broker()->SetFeedback(source, - new (broker()->zone()) InsufficientFeedback()); +void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess( + Hints const& receiver, Hints const& key, FeedbackSlot slot, + AccessMode access_mode, bool honor_bailout_on_uninitialized) { + if (slot.IsInvalid() || feedback_vector().is_null()) return; + FeedbackSource source(feedback_vector(), slot); + ProcessedFeedback const& feedback = + broker()->ProcessFeedbackForPropertyAccess(source, access_mode, + base::nullopt); + if (honor_bailout_on_uninitialized && BailoutOnUninitialized(feedback)) { return; } - MapHandles maps; - if (nexus.ExtractMaps(&maps) == 0) { // Megamorphic. - broker()->SetFeedback(source, nullptr); - return; + Hints new_accumulator_hints(zone()); + switch (feedback.kind()) { + case ProcessedFeedback::kElementAccess: + ProcessElementAccess(receiver, key, feedback.AsElementAccess(), + access_mode); + break; + case ProcessedFeedback::kNamedAccess: + ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode, + &new_accumulator_hints); + break; + case ProcessedFeedback::kInsufficient: + break; + default: + UNREACHABLE(); } - maps = GetRelevantReceiverMaps(broker()->isolate(), maps); - if (maps.empty()) { - broker()->SetFeedback(source, - new (broker()->zone()) InsufficientFeedback()); - return; + if (access_mode == AccessMode::kLoad) { + environment()->accumulator_hints().Clear(); + environment()->accumulator_hints().Add(new_accumulator_hints); + } else { + DCHECK(new_accumulator_hints.IsEmpty()); + } +} + +void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess( + Hints receiver, NameRef const& name, FeedbackSlot slot, + AccessMode access_mode) { + if (slot.IsInvalid() || feedback_vector().is_null()) return; + FeedbackSource source(feedback_vector(), slot); + ProcessedFeedback const& feedback = + broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name); + if (BailoutOnUninitialized(feedback)) return; + + Hints new_accumulator_hints(zone()); + switch (feedback.kind()) { + case ProcessedFeedback::kNamedAccess: + DCHECK(name.equals(feedback.AsNamedAccess().name())); + ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode, + &new_accumulator_hints); + break; + case ProcessedFeedback::kInsufficient: + break; + default: + UNREACHABLE(); } - ProcessedFeedback const* processed = nullptr; - base::Optional<NameRef> name = - static_name.has_value() ? static_name : broker()->GetNameFeedback(nexus); - if (name.has_value()) { - processed = ProcessFeedbackMapsForNamedAccess(maps, mode, *name); - } else if (nexus.GetKeyType() == ELEMENT) { - DCHECK_NE(nexus.ic_state(), MEGAMORPHIC); - processed = ProcessFeedbackMapsForElementAccess( - maps, mode, KeyedAccessMode::FromNexus(nexus)); + if (access_mode == AccessMode::kLoad) { + environment()->accumulator_hints().Clear(); + environment()->accumulator_hints().Add(new_accumulator_hints); + } else { + DCHECK(new_accumulator_hints.IsEmpty()); } - broker()->SetFeedback(source, processed); } -void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess( - Hints const& receiver, Hints const& key, FeedbackSlot slot, - AccessMode mode) { - if (BailoutOnUninitialized(slot)) return; - ProcessFeedbackForPropertyAccess(slot, mode, base::nullopt); +void SerializerForBackgroundCompilation::ProcessNamedAccess( + Hints receiver, NamedAccessFeedback const& feedback, AccessMode access_mode, + Hints* new_accumulator_hints) { + for (Handle<Map> map : feedback.AsNamedAccess().maps()) { + MapRef map_ref(broker(), map); + ProcessMapForNamedPropertyAccess(map_ref, feedback.name(), access_mode, + base::nullopt, new_accumulator_hints); + } + + for (Handle<Map> map : + GetRelevantReceiverMaps(broker()->isolate(), receiver.maps())) { + MapRef map_ref(broker(), map); + ProcessMapForNamedPropertyAccess(map_ref, feedback.name(), access_mode, + base::nullopt, new_accumulator_hints); + } + + JSGlobalProxyRef global_proxy = + broker()->target_native_context().global_proxy_object(); + for (Handle<Object> hint : receiver.constants()) { + ObjectRef object(broker(), hint); + if (access_mode == AccessMode::kLoad && object.IsJSObject()) { + MapRef map_ref = object.AsJSObject().map(); + ProcessMapForNamedPropertyAccess(map_ref, feedback.name(), access_mode, + object.AsJSObject(), + new_accumulator_hints); + } + // For JSNativeContextSpecialization::ReduceNamedAccessFromNexus. + if (object.equals(global_proxy)) { + // TODO(neis): Record accumulator hint? Also for string.length and maybe + // more. + global_proxy.GetPropertyCell(feedback.name(), + SerializationPolicy::kSerializeIfNeeded); + } + // For JSNativeContextSpecialization::ReduceJSLoadNamed. + if (access_mode == AccessMode::kLoad && object.IsJSFunction() && + feedback.name().equals(ObjectRef( + broker(), broker()->isolate()->factory()->prototype_string()))) { + JSFunctionRef function = object.AsJSFunction(); + function.Serialize(); + if (new_accumulator_hints != nullptr && function.has_prototype()) { + new_accumulator_hints->AddConstant(function.prototype().object()); + } + } + } +} + +void SerializerForBackgroundCompilation::ProcessElementAccess( + Hints receiver, Hints key, ElementAccessFeedback const& feedback, + AccessMode access_mode) { + for (auto const& group : feedback.transition_groups()) { + for (Handle<Map> map_handle : group) { + MapRef map(broker(), map_handle); + switch (access_mode) { + case AccessMode::kHas: + case AccessMode::kLoad: + map.SerializeForElementLoad(); + break; + case AccessMode::kStore: + map.SerializeForElementStore(); + break; + case AccessMode::kStoreInLiteral: + // This operation is fairly local and simple, nothing to serialize. + break; + } + } + } for (Handle<Object> hint : receiver.constants()) { ObjectRef receiver_ref(broker(), hint); + // For JSNativeContextSpecialization::InferReceiverRootMap + if (receiver_ref.IsHeapObject()) { + receiver_ref.AsHeapObject().map().SerializeRootMap(); + } + // For JSNativeContextSpecialization::ReduceElementAccess. if (receiver_ref.IsJSTypedArray()) { receiver_ref.AsJSTypedArray().Serialize(); } - // For JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant. - if (mode == AccessMode::kLoad || mode == AccessMode::kHas) { + // For JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant. + if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) { for (Handle<Object> hint : key.constants()) { ObjectRef key_ref(broker(), hint); // TODO(neis): Do this for integer-HeapNumbers too? if (key_ref.IsSmi() && key_ref.AsSmi() >= 0) { base::Optional<ObjectRef> element = - receiver_ref.GetOwnConstantElement(key_ref.AsSmi(), true); + receiver_ref.GetOwnConstantElement( + key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded); if (!element.has_value() && receiver_ref.IsJSArray()) { // We didn't find a constant element, but if the receiver is a // cow-array we can exploit the fact that any future write to the // element will replace the whole elements storage. - receiver_ref.AsJSArray().GetOwnCowElement(key_ref.AsSmi(), true); + receiver_ref.AsJSArray().GetOwnCowElement( + key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded); } } } } } - environment()->accumulator_hints().Clear(); -} - -void SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess( - MapRef const& map, NameRef const& name) { - // For JSNativeContextSpecialization::ReduceNamedAccess. - if (map.IsMapOfCurrentGlobalProxy()) { - broker()->native_context().global_proxy_object().GetPropertyCell(name, - true); + // For JSNativeContextSpecialization::InferReceiverRootMap + for (Handle<Map> map : receiver.maps()) { + MapRef map_ref(broker(), map); + map_ref.SerializeRootMap(); } } -void SerializerForBackgroundCompilation::VisitLdaKeyedProperty( +void SerializerForBackgroundCompilation::VisitLdaNamedProperty( BytecodeArrayIterator* iterator) { - Hints const& key = environment()->accumulator_hints(); Hints const& receiver = environment()->register_hints(iterator->GetRegisterOperand(0)); - FeedbackSlot slot = iterator->GetSlotOperand(1); - ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kLoad); + NameRef name(broker(), + iterator->GetConstantForIndexOperand(1, broker()->isolate())); + FeedbackSlot slot = iterator->GetSlotOperand(2); + ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kLoad); } -void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess( - Hints const& receiver, NameRef const& name, FeedbackSlot slot, - AccessMode mode) { - if (BailoutOnUninitialized(slot)) return; - ProcessFeedbackForPropertyAccess(slot, mode, name); - - for (Handle<Map> map : - GetRelevantReceiverMaps(broker()->isolate(), receiver.maps())) { - ProcessMapForNamedPropertyAccess(MapRef(broker(), map), name); - } - - JSGlobalProxyRef global_proxy = - broker()->native_context().global_proxy_object(); - - for (Handle<Object> hint : receiver.constants()) { - ObjectRef object(broker(), hint); - // For JSNativeContextSpecialization::ReduceNamedAccessFromNexus. - if (object.equals(global_proxy)) { - global_proxy.GetPropertyCell(name, true); - } - // For JSNativeContextSpecialization::ReduceJSLoadNamed. - if (mode == AccessMode::kLoad && object.IsJSFunction() && - name.equals(ObjectRef( - broker(), broker()->isolate()->factory()->prototype_string()))) { - object.AsJSFunction().Serialize(); - } - } - - environment()->accumulator_hints().Clear(); +// TODO(neis): Do feedback-independent serialization also for *NoFeedback +// bytecodes. +void SerializerForBackgroundCompilation::VisitLdaNamedPropertyNoFeedback( + BytecodeArrayIterator* iterator) { + NameRef(broker(), + iterator->GetConstantForIndexOperand(1, broker()->isolate())); } -void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess( - BytecodeArrayIterator* iterator, AccessMode mode) { +void SerializerForBackgroundCompilation::VisitStaNamedProperty( + BytecodeArrayIterator* iterator) { Hints const& receiver = environment()->register_hints(iterator->GetRegisterOperand(0)); - Handle<Name> name = Handle<Name>::cast( - iterator->GetConstantForIndexOperand(1, broker()->isolate())); + NameRef name(broker(), + iterator->GetConstantForIndexOperand(1, broker()->isolate())); FeedbackSlot slot = iterator->GetSlotOperand(2); - ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode); -} - -void SerializerForBackgroundCompilation::VisitLdaNamedProperty( - BytecodeArrayIterator* iterator) { - ProcessNamedPropertyAccess(iterator, AccessMode::kLoad); + ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kStore); } -void SerializerForBackgroundCompilation::VisitStaNamedProperty( +void SerializerForBackgroundCompilation::VisitStaNamedPropertyNoFeedback( BytecodeArrayIterator* iterator) { - ProcessNamedPropertyAccess(iterator, AccessMode::kStore); + NameRef(broker(), + iterator->GetConstantForIndexOperand(1, broker()->isolate())); } void SerializerForBackgroundCompilation::VisitStaNamedOwnProperty( BytecodeArrayIterator* iterator) { - ProcessNamedPropertyAccess(iterator, AccessMode::kStoreInLiteral); + Hints const& receiver = + environment()->register_hints(iterator->GetRegisterOperand(0)); + NameRef name(broker(), + iterator->GetConstantForIndexOperand(1, broker()->isolate())); + FeedbackSlot slot = iterator->GetSlotOperand(2); + ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kStoreInLiteral); } void SerializerForBackgroundCompilation::VisitTestIn( @@ -2161,7 +2777,113 @@ void SerializerForBackgroundCompilation::VisitTestIn( Hints const& key = environment()->register_hints(iterator->GetRegisterOperand(0)); FeedbackSlot slot = iterator->GetSlotOperand(1); - ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kHas); + ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kHas, false); +} + +// For JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance. +void SerializerForBackgroundCompilation::ProcessConstantForOrdinaryHasInstance( + HeapObjectRef const& constructor, bool* walk_prototypes) { + if (constructor.IsJSBoundFunction()) { + constructor.AsJSBoundFunction().Serialize(); + ProcessConstantForInstanceOf( + constructor.AsJSBoundFunction().bound_target_function(), + walk_prototypes); + } else if (constructor.IsJSFunction()) { + constructor.AsJSFunction().Serialize(); + *walk_prototypes = + *walk_prototypes || + (constructor.map().has_prototype_slot() && + constructor.AsJSFunction().has_prototype() && + !constructor.AsJSFunction().PrototypeRequiresRuntimeLookup()); + } +} + +void SerializerForBackgroundCompilation::ProcessConstantForInstanceOf( + ObjectRef const& constructor, bool* walk_prototypes) { + if (!constructor.IsHeapObject()) return; + HeapObjectRef constructor_heap_object = constructor.AsHeapObject(); + + PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( + constructor_heap_object.map(), + NameRef(broker(), broker()->isolate()->factory()->has_instance_symbol()), + AccessMode::kLoad, dependencies(), + SerializationPolicy::kSerializeIfNeeded); + + if (access_info.IsNotFound()) { + ProcessConstantForOrdinaryHasInstance(constructor_heap_object, + walk_prototypes); + } else if (access_info.IsDataConstant()) { + Handle<JSObject> holder; + bool found_on_proto = access_info.holder().ToHandle(&holder); + JSObjectRef holder_ref = found_on_proto ? JSObjectRef(broker(), holder) + : constructor.AsJSObject(); + base::Optional<ObjectRef> constant = holder_ref.GetOwnDataProperty( + access_info.field_representation(), access_info.field_index(), + SerializationPolicy::kSerializeIfNeeded); + CHECK(constant.has_value()); + if (constant->IsJSFunction()) { + JSFunctionRef function = constant->AsJSFunction(); + function.Serialize(); + if (function.shared().HasBuiltinId() && + function.shared().builtin_id() == + Builtins::kFunctionPrototypeHasInstance) { + // For JSCallReducer::ReduceFunctionPrototypeHasInstance. + ProcessConstantForOrdinaryHasInstance(constructor_heap_object, + walk_prototypes); + } + } + } +} + +void SerializerForBackgroundCompilation::VisitTestInstanceOf( + BytecodeArrayIterator* iterator) { + Hints const& lhs = + environment()->register_hints(iterator->GetRegisterOperand(0)); + Hints rhs = environment()->accumulator_hints(); + FeedbackSlot slot = iterator->GetSlotOperand(1); + Hints new_accumulator_hints(zone()); + + if (slot.IsInvalid() || feedback_vector().is_null()) return; + FeedbackSource source(feedback_vector(), slot); + ProcessedFeedback const& feedback = + broker()->ProcessFeedbackForInstanceOf(source); + + // Incorporate feedback (about rhs) into hints copy to simplify processing. + if (!feedback.IsInsufficient()) { + InstanceOfFeedback const& rhs_feedback = feedback.AsInstanceOf(); + if (rhs_feedback.value().has_value()) { + Handle<JSObject> constructor = rhs_feedback.value()->object(); + rhs.AddConstant(constructor); + } + } + + bool walk_prototypes = false; + for (Handle<Object> constant : rhs.constants()) { + ProcessConstantForInstanceOf(ObjectRef(broker(), constant), + &walk_prototypes); + } + if (walk_prototypes) ProcessHintsForHasInPrototypeChain(lhs); + + environment()->accumulator_hints().Clear(); + environment()->accumulator_hints().Add(new_accumulator_hints); +} + +void SerializerForBackgroundCompilation::VisitToNumeric( + BytecodeArrayIterator* iterator) { + FeedbackSlot slot = iterator->GetSlotOperand(0); + ProcessUnaryOrBinaryOperation(slot, false); +} + +void SerializerForBackgroundCompilation::VisitToNumber( + BytecodeArrayIterator* iterator) { + FeedbackSlot slot = iterator->GetSlotOperand(0); + ProcessUnaryOrBinaryOperation(slot, false); +} + +void SerializerForBackgroundCompilation::VisitThrowReferenceErrorIfHole( + BytecodeArrayIterator* iterator) { + ObjectRef(broker(), + iterator->GetConstantForIndexOperand(0, broker()->isolate())); } void SerializerForBackgroundCompilation::VisitStaKeyedProperty( @@ -2171,7 +2893,7 @@ void SerializerForBackgroundCompilation::VisitStaKeyedProperty( Hints const& key = environment()->register_hints(iterator->GetRegisterOperand(1)); FeedbackSlot slot = iterator->GetSlotOperand(2); - ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStore); + ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStore, true); } void SerializerForBackgroundCompilation::VisitStaInArrayLiteral( @@ -2181,7 +2903,19 @@ void SerializerForBackgroundCompilation::VisitStaInArrayLiteral( Hints const& key = environment()->register_hints(iterator->GetRegisterOperand(1)); FeedbackSlot slot = iterator->GetSlotOperand(2); - ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStoreInLiteral); + ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStoreInLiteral, + true); +} + +void SerializerForBackgroundCompilation::VisitStaDataPropertyInLiteral( + BytecodeArrayIterator* iterator) { + Hints const& receiver = + environment()->register_hints(iterator->GetRegisterOperand(0)); + Hints const& key = + environment()->register_hints(iterator->GetRegisterOperand(1)); + FeedbackSlot slot = iterator->GetSlotOperand(3); + ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStoreInLiteral, + false); } #define DEFINE_CLEAR_ENVIRONMENT(name, ...) \ @@ -2239,14 +2973,44 @@ UNREACHABLE_BYTECODE_LIST(DEFINE_UNREACHABLE) KILL_ENVIRONMENT_LIST(DEFINE_KILL) #undef DEFINE_KILL -#undef CLEAR_ENVIRONMENT_LIST -#undef KILL_ENVIRONMENT_LIST +#define DEFINE_BINARY_OP(name, ...) \ + void SerializerForBackgroundCompilation::Visit##name( \ + BytecodeArrayIterator* iterator) { \ + FeedbackSlot slot = iterator->GetSlotOperand(1); \ + ProcessUnaryOrBinaryOperation(slot, true); \ + } +BINARY_OP_LIST(DEFINE_BINARY_OP) +#undef DEFINE_BINARY_OP + +#define DEFINE_COMPARE_OP(name, ...) \ + void SerializerForBackgroundCompilation::Visit##name( \ + BytecodeArrayIterator* iterator) { \ + FeedbackSlot slot = iterator->GetSlotOperand(1); \ + ProcessCompareOperation(slot); \ + } +COMPARE_OP_LIST(DEFINE_COMPARE_OP) +#undef DEFINE_COMPARE_OP + +#define DEFINE_UNARY_OP(name, ...) \ + void SerializerForBackgroundCompilation::Visit##name( \ + BytecodeArrayIterator* iterator) { \ + FeedbackSlot slot = iterator->GetSlotOperand(0); \ + ProcessUnaryOrBinaryOperation(slot, true); \ + } +UNARY_OP_LIST(DEFINE_UNARY_OP) +#undef DEFINE_UNARY_OP + +#undef BINARY_OP_LIST #undef CLEAR_ACCUMULATOR_LIST -#undef UNCONDITIONAL_JUMPS_LIST +#undef CLEAR_ENVIRONMENT_LIST +#undef COMPARE_OP_LIST #undef CONDITIONAL_JUMPS_LIST #undef IGNORED_BYTECODE_LIST -#undef UNREACHABLE_BYTECODE_LIST +#undef KILL_ENVIRONMENT_LIST #undef SUPPORTED_BYTECODE_LIST +#undef UNARY_OP_LIST +#undef UNCONDITIONAL_JUMPS_LIST +#undef UNREACHABLE_BYTECODE_LIST } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc index 6deba2b002..783f3bcc11 100644 --- a/deps/v8/src/compiler/simd-scalar-lowering.cc +++ b/deps/v8/src/compiler/simd-scalar-lowering.cc @@ -138,6 +138,7 @@ void SimdScalarLowering::LowerGraph() { V(F32x4AddHoriz) \ V(F32x4Sub) \ V(F32x4Mul) \ + V(F32x4Div) \ V(F32x4Min) \ V(F32x4Max) @@ -1207,6 +1208,7 @@ void SimdScalarLowering::LowerNode(Node* node) { F32X4_BINOP_CASE(Add) F32X4_BINOP_CASE(Sub) F32X4_BINOP_CASE(Mul) + F32X4_BINOP_CASE(Div) F32X4_BINOP_CASE(Min) F32X4_BINOP_CASE(Max) #undef F32X4_BINOP_CASE @@ -1390,7 +1392,7 @@ void SimdScalarLowering::LowerNode(Node* node) { int input_num_lanes = NumLanes(input_rep_type); Node** rep = GetReplacements(node->InputAt(0)); Node** rep_node = zone()->NewArray<Node*>(num_lanes); - Node* true_node = mcgraph_->Int32Constant(-1); + Node* true_node = mcgraph_->Int32Constant(1); Node* false_node = mcgraph_->Int32Constant(0); Node* tmp_result = false_node; if (node->opcode() == IrOpcode::kS1x4AllTrue || diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc index b028a76bb0..1ca7bfe707 100644 --- a/deps/v8/src/compiler/simplified-lowering.cc +++ b/deps/v8/src/compiler/simplified-lowering.cc @@ -92,7 +92,7 @@ MachineRepresentation MachineRepresentationFromArrayType( } UseInfo CheckedUseInfoAsWord32FromHint( - NumberOperationHint hint, const VectorSlotPair& feedback = VectorSlotPair(), + NumberOperationHint hint, const FeedbackSource& feedback = FeedbackSource(), IdentifyZeros identify_zeros = kDistinguishZeros) { switch (hint) { case NumberOperationHint::kSignedSmall: @@ -109,7 +109,7 @@ UseInfo CheckedUseInfoAsWord32FromHint( } UseInfo CheckedUseInfoAsFloat64FromHint( - NumberOperationHint hint, const VectorSlotPair& feedback, + NumberOperationHint hint, const FeedbackSource& feedback, IdentifyZeros identify_zeros = kDistinguishZeros) { switch (hint) { case NumberOperationHint::kSignedSmall: @@ -1092,7 +1092,7 @@ class RepresentationSelector { if (lower()) DeferReplacement(node, node->InputAt(0)); } else { VisitUnop(node, - UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()), + UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()), MachineRepresentation::kTaggedPointer); } } @@ -1299,9 +1299,7 @@ class RepresentationSelector { if (base_taggedness == kTaggedBase && CanBeTaggedOrCompressedPointer(field_representation)) { Type value_type = NodeProperties::GetType(value); - if (field_representation == MachineRepresentation::kTaggedSigned || - value_representation == MachineRepresentation::kTaggedSigned || - field_representation == MachineRepresentation::kCompressedSigned || + if (value_representation == MachineRepresentation::kTaggedSigned || value_representation == MachineRepresentation::kCompressedSigned) { // Write barriers are only for stores of heap objects. return kNoWriteBarrier; @@ -1444,13 +1442,13 @@ class RepresentationSelector { !right_feedback_type.Maybe(Type::MinusZero())) { left_identify_zeros = kIdentifyZeros; } - UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(), + UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(), left_identify_zeros); // For CheckedInt32Add and CheckedInt32Sub, we don't need to do // a minus zero check for the right hand side, since we already // know that the left hand side is a proper Signed32 value, // potentially guarded by a check. - UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(), + UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(), kIdentifyZeros); VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32, Type::Signed32()); @@ -1483,7 +1481,7 @@ class RepresentationSelector { // default case => Float64Add/Sub VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros, - VectorSlotPair()), + FeedbackSource()), MachineRepresentation::kFloat64, Type::Number()); if (lower()) { ChangeToPureOp(node, Float64Op(node)); @@ -1546,9 +1544,9 @@ class RepresentationSelector { // right hand side doesn't matter anyways, so in particular there's // no observable difference between a 0 and a -0 then. UseInfo const lhs_use = CheckedUseInfoAsWord32FromHint( - hint, VectorSlotPair(), truncation.identify_zeros()); + hint, FeedbackSource(), truncation.identify_zeros()); UseInfo const rhs_use = CheckedUseInfoAsWord32FromHint( - hint, VectorSlotPair(), kIdentifyZeros); + hint, FeedbackSource(), kIdentifyZeros); if (truncation.IsUsedAsWord32()) { VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kWord32); if (lower()) DeferReplacement(node, lowering->Int32Mod(node)); @@ -1589,9 +1587,9 @@ class RepresentationSelector { // right hand side doesn't matter anyways, so in particular there's // no observable difference between a 0 and a -0 then. UseInfo const lhs_use = UseInfo::CheckedNumberOrOddballAsFloat64( - truncation.identify_zeros(), VectorSlotPair()); + truncation.identify_zeros(), FeedbackSource()); UseInfo const rhs_use = UseInfo::CheckedNumberOrOddballAsFloat64( - kIdentifyZeros, VectorSlotPair()); + kIdentifyZeros, FeedbackSource()); VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kFloat64, Type::Number()); if (lower()) ChangeToPureOp(node, Float64Op(node)); @@ -1931,7 +1929,7 @@ class RepresentationSelector { case NumberOperationHint::kSignedSmall: if (propagate()) { VisitBinop(node, - CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(), + CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(), kIdentifyZeros), MachineRepresentation::kBit); } else if (retype()) { @@ -1944,7 +1942,7 @@ class RepresentationSelector { IsNodeRepresentationTagged(rhs)) { VisitBinop(node, UseInfo::CheckedSignedSmallAsTaggedSigned( - VectorSlotPair(), kIdentifyZeros), + FeedbackSource(), kIdentifyZeros), MachineRepresentation::kBit); ChangeToPureOp( node, changer_->TaggedSignedOperatorFor(node->opcode())); @@ -1952,7 +1950,7 @@ class RepresentationSelector { } else { VisitBinop(node, CheckedUseInfoAsWord32FromHint( - hint, VectorSlotPair(), kIdentifyZeros), + hint, FeedbackSource(), kIdentifyZeros), MachineRepresentation::kBit); ChangeToPureOp(node, Int32Op(node)); } @@ -1969,7 +1967,7 @@ class RepresentationSelector { V8_FALLTHROUGH; case NumberOperationHint::kNumber: VisitBinop(node, - CheckedUseInfoAsFloat64FromHint(hint, VectorSlotPair(), + CheckedUseInfoAsFloat64FromHint(hint, FeedbackSource(), kIdentifyZeros), MachineRepresentation::kBit); if (lower()) ChangeToPureOp(node, Float64Op(node)); @@ -2054,7 +2052,7 @@ class RepresentationSelector { // Checked float64 x float64 => float64 VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros, - VectorSlotPair()), + FeedbackSource()), MachineRepresentation::kFloat64, Type::Number()); if (lower()) ChangeToPureOp(node, Float64Op(node)); return; @@ -2150,7 +2148,7 @@ class RepresentationSelector { // default case => Float64Div VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros, - VectorSlotPair()), + FeedbackSource()), MachineRepresentation::kFloat64, Type::Number()); if (lower()) ChangeToPureOp(node, Float64Op(node)); return; @@ -2320,7 +2318,7 @@ class RepresentationSelector { if (lower()) { node->RemoveInput(1); NodeProperties::ChangeOp( - node, simplified()->CheckedUint32ToInt32(VectorSlotPair())); + node, simplified()->CheckedUint32ToInt32(FeedbackSource())); } return; } @@ -2707,14 +2705,14 @@ class RepresentationSelector { case IrOpcode::kSpeculativeBigIntAdd: { if (truncation.IsUsedAsWord64()) { VisitBinop(node, - UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}), + UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), MachineRepresentation::kWord64); if (lower()) { ChangeToPureOp(node, lowering->machine()->Int64Add()); } } else { VisitBinop(node, - UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}), + UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}), MachineRepresentation::kTaggedPointer); if (lower()) { NodeProperties::ChangeOp(node, lowering->simplified()->BigIntAdd()); @@ -2725,7 +2723,7 @@ class RepresentationSelector { case IrOpcode::kSpeculativeBigIntNegate: { if (truncation.IsUsedAsWord64()) { VisitUnop(node, - UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}), + UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), MachineRepresentation::kWord64); if (lower()) { ChangeUnaryToPureBinaryOp(node, lowering->machine()->Int64Sub(), 0, @@ -2733,7 +2731,7 @@ class RepresentationSelector { } } else { VisitUnop(node, - UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}), + UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}), MachineRepresentation::kTaggedPointer); if (lower()) { ChangeToPureOp(node, lowering->simplified()->BigIntNegate()); @@ -2822,7 +2820,7 @@ class RepresentationSelector { MachineRepresentation::kTaggedPointer); } else { VisitUnop(node, - UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()), + UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()), MachineRepresentation::kTaggedPointer); } if (lower()) DeferReplacement(node, node->InputAt(0)); @@ -3417,12 +3415,12 @@ class RepresentationSelector { } case IrOpcode::kTransitionElementsKind: { return VisitUnop( - node, UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()), + node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()), MachineRepresentation::kNone); } case IrOpcode::kCompareMaps: return VisitUnop( - node, UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()), + node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()), MachineRepresentation::kBit); case IrOpcode::kEnsureWritableFastElements: return VisitBinop(node, UseInfo::AnyTagged(), diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc index c3cca499ac..885a86286e 100644 --- a/deps/v8/src/compiler/simplified-operator-reducer.cc +++ b/deps/v8/src/compiler/simplified-operator-reducer.cc @@ -106,6 +106,11 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) { if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) { return Replace(m.InputAt(0)); } + if (m.IsChangeCompressedSignedToTaggedSigned()) { + Node* new_node = graph()->NewNode( + simplified()->ChangeCompressedSignedToInt32(), m.InputAt(0)); + return Replace(new_node); + } break; } case IrOpcode::kChangeTaggedToUint32: { @@ -143,6 +148,40 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) { } break; } + case IrOpcode::kChangeTaggedSignedToCompressedSigned: { + DCHECK(COMPRESS_POINTERS_BOOL); + NodeMatcher m(node->InputAt(0)); + if (m.IsChangeInt31ToTaggedSigned()) { + Node* new_node = graph()->NewNode( + simplified()->ChangeInt31ToCompressedSigned(), m.InputAt(0)); + return Replace(new_node); + } else if (m.IsCheckedInt32ToTaggedSigned()) { + // Create a new checked node that outputs CompressedSigned values, with + // an explicit decompression after it. + Node* new_checked = graph()->CloneNode(m.node()); + NodeProperties::ChangeOp( + new_checked, simplified()->CheckedInt32ToCompressedSigned( + CheckParametersOf(m.node()->op()).feedback())); + Node* new_decompression = graph()->NewNode( + machine()->ChangeCompressedSignedToTaggedSigned(), new_checked); + + // For all uses of the old checked node, instead insert the new "checked + // + decompression". Also, update control and effect. + ReplaceWithValue(m.node(), new_decompression, new_checked, new_checked); + + // In the current node, we can skip the decompression since we are going + // to have a Decompression + Compression combo. + return Replace(new_checked); + } + break; + } + case IrOpcode::kChangeCompressedSignedToInt32: { + NodeMatcher m(node->InputAt(0)); + if (m.IsCheckedInt32ToCompressedSigned()) { + return Replace(m.InputAt(0)); + } + break; + } case IrOpcode::kCheckedTaggedToInt32: case IrOpcode::kCheckedTaggedSignedToInt32: { NodeMatcher m(node->InputAt(0)); @@ -152,6 +191,14 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) { } break; } + case IrOpcode::kCheckedTaggedToTaggedPointer: { + NodeMatcher m(node->InputAt(0)); + if (m.IsChangeCompressedPointerToTaggedPointer()) { + RelaxEffectsAndControls(node); + return Replace(m.node()); + } + break; + } case IrOpcode::kCheckIf: { HeapObjectMatcher m(node->InputAt(0)); if (m.Is(factory()->true_value())) { @@ -267,6 +314,10 @@ MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const { return jsgraph()->machine(); } +SimplifiedOperatorBuilder* SimplifiedOperatorReducer::simplified() const { + return jsgraph()->simplified(); +} + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc index 4f83635422..6b86a95e01 100644 --- a/deps/v8/src/compiler/simplified-operator.cc +++ b/deps/v8/src/compiler/simplified-operator.cc @@ -31,13 +31,34 @@ std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) { UNREACHABLE(); } +std::ostream& operator<<(std::ostream& os, + ConstFieldInfo const& const_field_info) { + if (const_field_info.IsConst()) { + return os << "const (field owner: " << const_field_info.owner_map.address() + << ")"; + } else { + return os << "mutable"; + } + UNREACHABLE(); +} + +bool operator==(ConstFieldInfo const& lhs, ConstFieldInfo const& rhs) { + return lhs.owner_map.address() == rhs.owner_map.address(); +} + +size_t hash_value(ConstFieldInfo const& const_field_info) { + return (size_t)const_field_info.owner_map.address(); +} + bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) { // On purpose we don't include the write barrier kind here, as this method is // really only relevant for eliminating loads and they don't care about the // write barrier mode. return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset && lhs.map.address() == rhs.map.address() && - lhs.machine_type == rhs.machine_type; + lhs.machine_type == rhs.machine_type && + lhs.const_field_info == rhs.const_field_info && + lhs.is_store_in_literal == rhs.is_store_in_literal; } size_t hash_value(FieldAccess const& access) { @@ -45,7 +66,8 @@ size_t hash_value(FieldAccess const& access) { // really only relevant for eliminating loads and they don't care about the // write barrier mode. return base::hash_combine(access.base_is_tagged, access.offset, - access.machine_type); + access.machine_type, access.const_field_info, + access.is_store_in_literal); } size_t hash_value(LoadSensitivity load_sensitivity) { @@ -78,7 +100,10 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) { } #endif os << access.type << ", " << access.machine_type << ", " - << access.write_barrier_kind << ", " << access.constness; + << access.write_barrier_kind << ", " << access.const_field_info; + if (access.is_store_in_literal) { + os << " (store in literal)"; + } if (FLAG_untrusted_code_mitigations) { os << ", " << access.load_sensitivity; } @@ -199,7 +224,8 @@ std::ostream& operator<<(std::ostream& os, } size_t hash_value(const CheckFloat64HoleParameters& params) { - return base::hash_combine(params.mode(), params.feedback()); + FeedbackSource::Hash feedback_hash; + return base::hash_combine(params.mode(), feedback_hash(params.feedback())); } bool operator==(CheckFloat64HoleParameters const& lhs, @@ -249,7 +275,8 @@ bool operator==(CheckMapsParameters const& lhs, } size_t hash_value(CheckMapsParameters const& p) { - return base::hash_combine(p.flags(), p.maps(), p.feedback()); + FeedbackSource::Hash feedback_hash; + return base::hash_combine(p.flags(), p.maps(), feedback_hash(p.feedback())); } std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) { @@ -305,7 +332,8 @@ bool operator==(const GrowFastElementsParameters& lhs, } inline size_t hash_value(const GrowFastElementsParameters& params) { - return base::hash_combine(params.mode(), params.feedback()); + FeedbackSource::Hash feedback_hash; + return base::hash_combine(params.mode(), feedback_hash(params.feedback())); } std::ostream& operator<<(std::ostream& os, @@ -550,7 +578,8 @@ bool operator==(NumberOperationParameters const& lhs, } size_t hash_value(NumberOperationParameters const& p) { - return base::hash_combine(p.hint(), p.feedback()); + FeedbackSource::Hash feedback_hash; + return base::hash_combine(p.hint(), feedback_hash(p.feedback())); } std::ostream& operator<<(std::ostream& os, NumberOperationParameters const& p) { @@ -619,7 +648,8 @@ std::ostream& operator<<(std::ostream& os, } size_t hash_value(const CheckTaggedInputParameters& params) { - return base::hash_combine(params.mode(), params.feedback()); + FeedbackSource::Hash feedback_hash; + return base::hash_combine(params.mode(), feedback_hash(params.feedback())); } bool operator==(CheckTaggedInputParameters const& lhs, @@ -645,7 +675,8 @@ std::ostream& operator<<(std::ostream& os, } size_t hash_value(const CheckMinusZeroParameters& params) { - return base::hash_combine(params.mode(), params.feedback()); + FeedbackSource::Hash feedback_hash; + return base::hash_combine(params.mode(), feedback_hash(params.feedback())); } bool operator==(CheckMinusZeroParameters const& lhs, @@ -878,7 +909,7 @@ struct SimplifiedOperatorGlobalCache final { : Operator1<CheckParameters>( \ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, \ #Name, value_input_count, 1, 1, value_output_count, 1, 0, \ - CheckParameters(VectorSlotPair())) {} \ + CheckParameters(FeedbackSource())) {} \ }; \ Name##Operator k##Name; CHECKED_WITH_FEEDBACK_OP_LIST(CHECKED_WITH_FEEDBACK) @@ -886,16 +917,16 @@ struct SimplifiedOperatorGlobalCache final { #define CHECKED_BOUNDS(Name) \ struct Name##Operator final : public Operator1<CheckBoundsParameters> { \ - Name##Operator(VectorSlotPair feedback, CheckBoundsParameters::Mode mode) \ + Name##Operator(FeedbackSource feedback, CheckBoundsParameters::Mode mode) \ : Operator1<CheckBoundsParameters>( \ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, \ #Name, 2, 1, 1, 1, 1, 0, \ CheckBoundsParameters(feedback, mode)) {} \ }; \ Name##Operator k##Name##Deopting = { \ - VectorSlotPair(), CheckBoundsParameters::kDeoptOnOutOfBounds}; \ + FeedbackSource(), CheckBoundsParameters::kDeoptOnOutOfBounds}; \ Name##Operator k##Name##Aborting = { \ - VectorSlotPair(), CheckBoundsParameters::kAbortOnOutOfBounds}; + FeedbackSource(), CheckBoundsParameters::kAbortOnOutOfBounds}; CHECKED_BOUNDS_OP_LIST(CHECKED_BOUNDS) #undef CHECKED_BOUNDS @@ -905,7 +936,7 @@ struct SimplifiedOperatorGlobalCache final { : Operator1<CheckIfParameters>( IrOpcode::kCheckIf, Operator::kFoldable | Operator::kNoThrow, "CheckIf", 1, 1, 1, 0, 1, 0, - CheckIfParameters(kDeoptimizeReason, VectorSlotPair())) {} + CheckIfParameters(kDeoptimizeReason, FeedbackSource())) {} }; #define CHECK_IF(Name, message) \ CheckIfOperator<DeoptimizeReason::k##Name> kCheckIf##Name; @@ -970,7 +1001,7 @@ struct SimplifiedOperatorGlobalCache final { IrOpcode::kCheckedFloat64ToInt32, Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt32", 1, 1, 1, 1, 1, 0, - CheckMinusZeroParameters(kMode, VectorSlotPair())) {} + CheckMinusZeroParameters(kMode, FeedbackSource())) {} }; CheckedFloat64ToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero> kCheckedFloat64ToInt32CheckForMinusZeroOperator; @@ -985,7 +1016,7 @@ struct SimplifiedOperatorGlobalCache final { IrOpcode::kCheckedFloat64ToInt64, Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt64", 1, 1, 1, 1, 1, 0, - CheckMinusZeroParameters(kMode, VectorSlotPair())) {} + CheckMinusZeroParameters(kMode, FeedbackSource())) {} }; CheckedFloat64ToInt64Operator<CheckForMinusZeroMode::kCheckForMinusZero> kCheckedFloat64ToInt64CheckForMinusZeroOperator; @@ -1000,7 +1031,7 @@ struct SimplifiedOperatorGlobalCache final { IrOpcode::kCheckedTaggedToInt32, Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToInt32", 1, 1, 1, 1, 1, 0, - CheckMinusZeroParameters(kMode, VectorSlotPair())) {} + CheckMinusZeroParameters(kMode, FeedbackSource())) {} }; CheckedTaggedToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero> kCheckedTaggedToInt32CheckForMinusZeroOperator; @@ -1015,7 +1046,7 @@ struct SimplifiedOperatorGlobalCache final { IrOpcode::kCheckedTaggedToInt64, Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToInt64", 1, 1, 1, 1, 1, 0, - CheckMinusZeroParameters(kMode, VectorSlotPair())) {} + CheckMinusZeroParameters(kMode, FeedbackSource())) {} }; CheckedTaggedToInt64Operator<CheckForMinusZeroMode::kCheckForMinusZero> kCheckedTaggedToInt64CheckForMinusZeroOperator; @@ -1030,7 +1061,7 @@ struct SimplifiedOperatorGlobalCache final { IrOpcode::kCheckedTaggedToFloat64, Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToFloat64", 1, 1, 1, 1, 1, 0, - CheckTaggedInputParameters(kMode, VectorSlotPair())) {} + CheckTaggedInputParameters(kMode, FeedbackSource())) {} }; CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumber> kCheckedTaggedToFloat64NumberOperator; @@ -1045,7 +1076,7 @@ struct SimplifiedOperatorGlobalCache final { IrOpcode::kCheckedTruncateTaggedToWord32, Operator::kFoldable | Operator::kNoThrow, "CheckedTruncateTaggedToWord32", 1, 1, 1, 1, 1, 0, - CheckTaggedInputParameters(kMode, VectorSlotPair())) {} + CheckTaggedInputParameters(kMode, FeedbackSource())) {} }; CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumber> kCheckedTruncateTaggedToWord32NumberOperator; @@ -1077,7 +1108,7 @@ struct SimplifiedOperatorGlobalCache final { IrOpcode::kCheckFloat64Hole, Operator::kFoldable | Operator::kNoThrow, "CheckFloat64Hole", 1, 1, 1, 1, 1, 0, - CheckFloat64HoleParameters(kMode, VectorSlotPair())) {} + CheckFloat64HoleParameters(kMode, FeedbackSource())) {} }; CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kAllowReturnHole> kCheckFloat64HoleAllowReturnHoleOperator; @@ -1100,7 +1131,7 @@ struct SimplifiedOperatorGlobalCache final { GrowFastElementsOperator() : Operator1(IrOpcode::kMaybeGrowFastElements, Operator::kNoThrow, "MaybeGrowFastElements", 4, 1, 1, 1, 1, 0, - GrowFastElementsParameters(kMode, VectorSlotPair())) {} + GrowFastElementsParameters(kMode, FeedbackSource())) {} }; GrowFastElementsOperator<GrowFastElementsMode::kDoubleElements> @@ -1145,7 +1176,7 @@ struct SimplifiedOperatorGlobalCache final { IrOpcode::kSpeculativeToNumber, Operator::kFoldable | Operator::kNoThrow, "SpeculativeToNumber", 1, 1, 1, 1, 1, 0, - NumberOperationParameters(kHint, VectorSlotPair())) {} + NumberOperationParameters(kHint, FeedbackSource())) {} }; SpeculativeToNumberOperator<NumberOperationHint::kSignedSmall> kSpeculativeToNumberSignedSmallOperator; @@ -1179,7 +1210,7 @@ GET_FROM_CACHE(LoadFieldByIndex) #define GET_FROM_CACHE_WITH_FEEDBACK(Name, value_input_count, \ value_output_count) \ const Operator* SimplifiedOperatorBuilder::Name( \ - const VectorSlotPair& feedback) { \ + const FeedbackSource& feedback) { \ if (!feedback.IsValid()) { \ return &cache_.k##Name; \ } \ @@ -1193,7 +1224,7 @@ CHECKED_WITH_FEEDBACK_OP_LIST(GET_FROM_CACHE_WITH_FEEDBACK) #define GET_FROM_CACHE_WITH_FEEDBACK(Name) \ const Operator* SimplifiedOperatorBuilder::Name( \ - const VectorSlotPair& feedback, CheckBoundsParameters::Mode mode) { \ + const FeedbackSource& feedback, CheckBoundsParameters::Mode mode) { \ if (!feedback.IsValid()) { \ switch (mode) { \ case CheckBoundsParameters::kDeoptOnOutOfBounds: \ @@ -1242,7 +1273,7 @@ const Operator* SimplifiedOperatorBuilder::AssertType(Type type) { } const Operator* SimplifiedOperatorBuilder::CheckIf( - DeoptimizeReason reason, const VectorSlotPair& feedback) { + DeoptimizeReason reason, const FeedbackSource& feedback) { if (!feedback.IsValid()) { switch (reason) { #define CHECK_IF(Name, message) \ @@ -1280,7 +1311,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul( } const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32( - CheckForMinusZeroMode mode, const VectorSlotPair& feedback) { + CheckForMinusZeroMode mode, const FeedbackSource& feedback) { if (!feedback.IsValid()) { switch (mode) { case CheckForMinusZeroMode::kCheckForMinusZero: @@ -1296,7 +1327,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32( } const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt64( - CheckForMinusZeroMode mode, const VectorSlotPair& feedback) { + CheckForMinusZeroMode mode, const FeedbackSource& feedback) { if (!feedback.IsValid()) { switch (mode) { case CheckForMinusZeroMode::kCheckForMinusZero: @@ -1312,7 +1343,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt64( } const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32( - CheckForMinusZeroMode mode, const VectorSlotPair& feedback) { + CheckForMinusZeroMode mode, const FeedbackSource& feedback) { if (!feedback.IsValid()) { switch (mode) { case CheckForMinusZeroMode::kCheckForMinusZero: @@ -1328,7 +1359,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32( } const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt64( - CheckForMinusZeroMode mode, const VectorSlotPair& feedback) { + CheckForMinusZeroMode mode, const FeedbackSource& feedback) { if (!feedback.IsValid()) { switch (mode) { case CheckForMinusZeroMode::kCheckForMinusZero: @@ -1344,7 +1375,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt64( } const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64( - CheckTaggedInputMode mode, const VectorSlotPair& feedback) { + CheckTaggedInputMode mode, const FeedbackSource& feedback) { if (!feedback.IsValid()) { switch (mode) { case CheckTaggedInputMode::kNumber: @@ -1360,7 +1391,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64( } const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32( - CheckTaggedInputMode mode, const VectorSlotPair& feedback) { + CheckTaggedInputMode mode, const FeedbackSource& feedback) { if (!feedback.IsValid()) { switch (mode) { case CheckTaggedInputMode::kNumber: @@ -1377,7 +1408,7 @@ const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32( const Operator* SimplifiedOperatorBuilder::CheckMaps( CheckMapsFlags flags, ZoneHandleSet<Map> maps, - const VectorSlotPair& feedback) { + const FeedbackSource& feedback) { CheckMapsParameters const parameters(flags, maps, feedback); return new (zone()) Operator1<CheckMapsParameters>( // -- IrOpcode::kCheckMaps, // opcode @@ -1422,7 +1453,7 @@ const Operator* SimplifiedOperatorBuilder::ConvertReceiver( } const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole( - CheckFloat64HoleMode mode, VectorSlotPair const& feedback) { + CheckFloat64HoleMode mode, FeedbackSource const& feedback) { if (!feedback.IsValid()) { switch (mode) { case CheckFloat64HoleMode::kAllowReturnHole: @@ -1454,7 +1485,7 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate( } const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber( - NumberOperationHint hint, const VectorSlotPair& feedback) { + NumberOperationHint hint, const FeedbackSource& feedback) { if (!feedback.IsValid()) { switch (hint) { case NumberOperationHint::kSignedSmall: @@ -1480,7 +1511,7 @@ const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() { } const Operator* SimplifiedOperatorBuilder::MaybeGrowFastElements( - GrowFastElementsMode mode, const VectorSlotPair& feedback) { + GrowFastElementsMode mode, const FeedbackSource& feedback) { if (!feedback.IsValid()) { switch (mode) { case GrowFastElementsMode::kDoubleElements: @@ -1556,7 +1587,10 @@ bool operator==(CheckParameters const& lhs, CheckParameters const& rhs) { return lhs.feedback() == rhs.feedback(); } -size_t hash_value(CheckParameters const& p) { return hash_value(p.feedback()); } +size_t hash_value(CheckParameters const& p) { + FeedbackSource::Hash feedback_hash; + return feedback_hash(p.feedback()); +} std::ostream& operator<<(std::ostream& os, CheckParameters const& p) { return os << p.feedback(); @@ -1605,7 +1639,8 @@ bool operator==(CheckIfParameters const& lhs, CheckIfParameters const& rhs) { } size_t hash_value(CheckIfParameters const& p) { - return base::hash_combine(p.reason(), p.feedback()); + FeedbackSource::Hash feedback_hash; + return base::hash_combine(p.reason(), feedback_hash(p.feedback())); } std::ostream& operator<<(std::ostream& os, CheckIfParameters const& p) { diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h index bdac796adf..58e9bfdffb 100644 --- a/deps/v8/src/compiler/simplified-operator.h +++ b/deps/v8/src/compiler/simplified-operator.h @@ -10,9 +10,9 @@ #include "src/base/compiler-specific.h" #include "src/codegen/machine-type.h" #include "src/common/globals.h" +#include "src/compiler/feedback-source.h" #include "src/compiler/operator.h" #include "src/compiler/types.h" -#include "src/compiler/vector-slot-pair.h" #include "src/compiler/write-barrier-kind.h" #include "src/deoptimizer/deoptimize-reason.h" #include "src/handles/handles.h" @@ -44,6 +44,27 @@ size_t hash_value(LoadSensitivity); std::ostream& operator<<(std::ostream&, LoadSensitivity); +struct ConstFieldInfo { + // the map that introduced the const field, if any. An access is considered + // mutable iff the handle is null. + MaybeHandle<Map> owner_map; + + ConstFieldInfo() : owner_map(MaybeHandle<Map>()) {} + explicit ConstFieldInfo(Handle<Map> owner_map) : owner_map(owner_map) {} + + bool IsConst() const { return !owner_map.is_null(); } + + // No const field owner, i.e., a mutable field + static ConstFieldInfo None() { return ConstFieldInfo(); } +}; + +V8_EXPORT_PRIVATE bool operator==(ConstFieldInfo const&, ConstFieldInfo const&); + +size_t hash_value(ConstFieldInfo const&); + +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, + ConstFieldInfo const&); + // An access descriptor for loads/stores of fixed structures like field // accesses of heap objects. Accesses from either tagged or untagged base // pointers are supported; untagging is done automatically during lowering. @@ -56,7 +77,9 @@ struct FieldAccess { MachineType machine_type; // machine type of the field. WriteBarrierKind write_barrier_kind; // write barrier hint. LoadSensitivity load_sensitivity; // load safety for poisoning. - PropertyConstness constness; // whether the field is assigned only once + ConstFieldInfo const_field_info; // the constness of this access, and the + // field owner map, if the access is const + bool is_store_in_literal; // originates from a kStoreInLiteral access FieldAccess() : base_is_tagged(kTaggedBase), @@ -65,13 +88,15 @@ struct FieldAccess { machine_type(MachineType::None()), write_barrier_kind(kFullWriteBarrier), load_sensitivity(LoadSensitivity::kUnsafe), - constness(PropertyConstness::kMutable) {} + const_field_info(ConstFieldInfo::None()), + is_store_in_literal(false) {} FieldAccess(BaseTaggedness base_is_tagged, int offset, MaybeHandle<Name> name, MaybeHandle<Map> map, Type type, MachineType machine_type, WriteBarrierKind write_barrier_kind, LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe, - PropertyConstness constness = PropertyConstness::kMutable) + ConstFieldInfo const_field_info = ConstFieldInfo::None(), + bool is_store_in_literal = false) : base_is_tagged(base_is_tagged), offset(offset), name(name), @@ -80,7 +105,8 @@ struct FieldAccess { machine_type(machine_type), write_barrier_kind(write_barrier_kind), load_sensitivity(load_sensitivity), - constness(constness) {} + const_field_info(const_field_info), + is_store_in_literal(is_store_in_literal) {} int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; } }; @@ -175,13 +201,13 @@ ConvertReceiverMode ConvertReceiverModeOf(Operator const* op) // fails, then speculation on that CallIC slot will be disabled. class CheckParameters final { public: - explicit CheckParameters(const VectorSlotPair& feedback) + explicit CheckParameters(const FeedbackSource& feedback) : feedback_(feedback) {} - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } private: - VectorSlotPair feedback_; + FeedbackSource feedback_; }; bool operator==(CheckParameters const&, CheckParameters const&); @@ -196,7 +222,7 @@ class CheckBoundsParameters final { public: enum Mode { kAbortOnOutOfBounds, kDeoptOnOutOfBounds }; - CheckBoundsParameters(const VectorSlotPair& feedback, Mode mode) + CheckBoundsParameters(const FeedbackSource& feedback, Mode mode) : check_parameters_(feedback), mode_(mode) {} Mode mode() const { return mode_; } @@ -219,15 +245,15 @@ CheckBoundsParameters const& CheckBoundsParametersOf(Operator const*) class CheckIfParameters final { public: explicit CheckIfParameters(DeoptimizeReason reason, - const VectorSlotPair& feedback) + const FeedbackSource& feedback) : reason_(reason), feedback_(feedback) {} - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } DeoptimizeReason reason() const { return reason_; } private: DeoptimizeReason reason_; - VectorSlotPair feedback_; + FeedbackSource feedback_; }; bool operator==(CheckIfParameters const&, CheckIfParameters const&); @@ -251,15 +277,15 @@ std::ostream& operator<<(std::ostream&, CheckFloat64HoleMode); class CheckFloat64HoleParameters { public: CheckFloat64HoleParameters(CheckFloat64HoleMode mode, - VectorSlotPair const& feedback) + FeedbackSource const& feedback) : mode_(mode), feedback_(feedback) {} CheckFloat64HoleMode mode() const { return mode_; } - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } private: CheckFloat64HoleMode mode_; - VectorSlotPair feedback_; + FeedbackSource feedback_; }; CheckFloat64HoleParameters const& CheckFloat64HoleParametersOf(Operator const*) @@ -286,15 +312,15 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, CheckTaggedInputMode); class CheckTaggedInputParameters { public: CheckTaggedInputParameters(CheckTaggedInputMode mode, - const VectorSlotPair& feedback) + const FeedbackSource& feedback) : mode_(mode), feedback_(feedback) {} CheckTaggedInputMode mode() const { return mode_; } - const VectorSlotPair& feedback() const { return feedback_; } + const FeedbackSource& feedback() const { return feedback_; } private: CheckTaggedInputMode mode_; - VectorSlotPair feedback_; + FeedbackSource feedback_; }; const CheckTaggedInputParameters& CheckTaggedInputParametersOf(const Operator*) @@ -324,15 +350,15 @@ CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) class CheckMinusZeroParameters { public: CheckMinusZeroParameters(CheckForMinusZeroMode mode, - const VectorSlotPair& feedback) + const FeedbackSource& feedback) : mode_(mode), feedback_(feedback) {} CheckForMinusZeroMode mode() const { return mode_; } - const VectorSlotPair& feedback() const { return feedback_; } + const FeedbackSource& feedback() const { return feedback_; } private: CheckForMinusZeroMode mode_; - VectorSlotPair feedback_; + FeedbackSource feedback_; }; V8_EXPORT_PRIVATE const CheckMinusZeroParameters& CheckMinusZeroParametersOf( @@ -363,17 +389,17 @@ std::ostream& operator<<(std::ostream&, CheckMapsFlags); class CheckMapsParameters final { public: CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps, - const VectorSlotPair& feedback) + const FeedbackSource& feedback) : flags_(flags), maps_(maps), feedback_(feedback) {} CheckMapsFlags flags() const { return flags_; } ZoneHandleSet<Map> const& maps() const { return maps_; } - VectorSlotPair const& feedback() const { return feedback_; } + FeedbackSource const& feedback() const { return feedback_; } private: CheckMapsFlags const flags_; ZoneHandleSet<Map> const maps_; - VectorSlotPair const feedback_; + FeedbackSource const feedback_; }; bool operator==(CheckMapsParameters const&, CheckMapsParameters const&); @@ -406,15 +432,15 @@ std::ostream& operator<<(std::ostream&, GrowFastElementsMode); class GrowFastElementsParameters { public: GrowFastElementsParameters(GrowFastElementsMode mode, - const VectorSlotPair& feedback) + const FeedbackSource& feedback) : mode_(mode), feedback_(feedback) {} GrowFastElementsMode mode() const { return mode_; } - const VectorSlotPair& feedback() const { return feedback_; } + const FeedbackSource& feedback() const { return feedback_; } private: GrowFastElementsMode mode_; - VectorSlotPair feedback_; + FeedbackSource feedback_; }; bool operator==(const GrowFastElementsParameters&, @@ -490,15 +516,15 @@ V8_EXPORT_PRIVATE NumberOperationHint NumberOperationHintOf(const Operator* op) class NumberOperationParameters { public: NumberOperationParameters(NumberOperationHint hint, - const VectorSlotPair& feedback) + const FeedbackSource& feedback) : hint_(hint), feedback_(feedback) {} NumberOperationHint hint() const { return hint_; } - const VectorSlotPair& feedback() const { return feedback_; } + const FeedbackSource& feedback() const { return feedback_; } private: NumberOperationHint hint_; - VectorSlotPair feedback_; + FeedbackSource feedback_; }; size_t hash_value(NumberOperationParameters const&); @@ -692,7 +718,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* FindOrderedHashMapEntryForInt32Key(); const Operator* SpeculativeToNumber(NumberOperationHint hint, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* StringToNumber(); const Operator* PlainPrimitiveToNumber(); @@ -730,67 +756,67 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* CompareMaps(ZoneHandleSet<Map>); const Operator* MapGuard(ZoneHandleSet<Map> maps); - const Operator* CheckBounds(const VectorSlotPair& feedback); + const Operator* CheckBounds(const FeedbackSource& feedback); const Operator* CheckEqualsInternalizedString(); const Operator* CheckEqualsSymbol(); - const Operator* CheckFloat64Hole(CheckFloat64HoleMode, VectorSlotPair const&); + const Operator* CheckFloat64Hole(CheckFloat64HoleMode, FeedbackSource const&); const Operator* CheckHeapObject(); const Operator* CheckIf(DeoptimizeReason deoptimize_reason, - const VectorSlotPair& feedback = VectorSlotPair()); + const FeedbackSource& feedback = FeedbackSource()); const Operator* CheckInternalizedString(); const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>, - const VectorSlotPair& = VectorSlotPair()); + const FeedbackSource& = FeedbackSource()); const Operator* CheckNotTaggedHole(); - const Operator* CheckNumber(const VectorSlotPair& feedback); + const Operator* CheckNumber(const FeedbackSource& feedback); const Operator* CheckReceiver(); const Operator* CheckReceiverOrNullOrUndefined(); - const Operator* CheckSmi(const VectorSlotPair& feedback); - const Operator* CheckString(const VectorSlotPair& feedback); + const Operator* CheckSmi(const FeedbackSource& feedback); + const Operator* CheckString(const FeedbackSource& feedback); const Operator* CheckSymbol(); const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* CheckedFloat64ToInt64(CheckForMinusZeroMode, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* CheckedInt32Add(); const Operator* CheckedInt32Div(); const Operator* CheckedInt32Mod(); const Operator* CheckedInt32Mul(CheckForMinusZeroMode); const Operator* CheckedInt32Sub(); const Operator* CheckedInt32ToCompressedSigned( - const VectorSlotPair& feedback); - const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback); - const Operator* CheckedInt64ToInt32(const VectorSlotPair& feedback); - const Operator* CheckedInt64ToTaggedSigned(const VectorSlotPair& feedback); - const Operator* CheckedTaggedSignedToInt32(const VectorSlotPair& feedback); + const FeedbackSource& feedback); + const Operator* CheckedInt32ToTaggedSigned(const FeedbackSource& feedback); + const Operator* CheckedInt64ToInt32(const FeedbackSource& feedback); + const Operator* CheckedInt64ToTaggedSigned(const FeedbackSource& feedback); + const Operator* CheckedTaggedSignedToInt32(const FeedbackSource& feedback); const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* CheckedTaggedToInt64(CheckForMinusZeroMode, - const VectorSlotPair& feedback); - const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback); - const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback); - const Operator* CheckBigInt(const VectorSlotPair& feedback); + const FeedbackSource& feedback); + const Operator* CheckedTaggedToTaggedPointer(const FeedbackSource& feedback); + const Operator* CheckedTaggedToTaggedSigned(const FeedbackSource& feedback); + const Operator* CheckBigInt(const FeedbackSource& feedback); const Operator* CheckedCompressedToTaggedPointer( - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* CheckedCompressedToTaggedSigned( - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* CheckedTaggedToCompressedPointer( - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* CheckedTaggedToCompressedSigned( - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); const Operator* CheckedUint32Div(); const Operator* CheckedUint32Mod(); - const Operator* CheckedUint32Bounds(const VectorSlotPair& feedback, + const Operator* CheckedUint32Bounds(const FeedbackSource& feedback, CheckBoundsParameters::Mode mode); - const Operator* CheckedUint32ToInt32(const VectorSlotPair& feedback); - const Operator* CheckedUint32ToTaggedSigned(const VectorSlotPair& feedback); - const Operator* CheckedUint64Bounds(const VectorSlotPair& feedback); - const Operator* CheckedUint64ToInt32(const VectorSlotPair& feedback); - const Operator* CheckedUint64ToTaggedSigned(const VectorSlotPair& feedback); + const Operator* CheckedUint32ToInt32(const FeedbackSource& feedback); + const Operator* CheckedUint32ToTaggedSigned(const FeedbackSource& feedback); + const Operator* CheckedUint64Bounds(const FeedbackSource& feedback); + const Operator* CheckedUint64ToInt32(const FeedbackSource& feedback); + const Operator* CheckedUint64ToTaggedSigned(const FeedbackSource& feedback); const Operator* ConvertReceiver(ConvertReceiverMode); @@ -839,7 +865,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final // maybe-grow-fast-elements object, elements, index, length const Operator* MaybeGrowFastElements(GrowFastElementsMode mode, - const VectorSlotPair& feedback); + const FeedbackSource& feedback); // transition-elements-kind object, from-map, to-map const Operator* TransitionElementsKind(ElementsTransition transition); diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc index b71bcd7e66..bd53fb895f 100644 --- a/deps/v8/src/compiler/store-store-elimination.cc +++ b/deps/v8/src/compiler/store-store-elimination.cc @@ -10,7 +10,6 @@ #include "src/compiler/all-nodes.h" #include "src/compiler/js-graph.h" #include "src/compiler/node-properties.h" -#include "src/compiler/simplified-operator.h" namespace v8 { namespace internal { @@ -42,163 +41,7 @@ namespace compiler { #define DCHECK_EXTRA(condition, fmt, ...) ((void)0) #endif -// Store-store elimination. -// -// The aim of this optimization is to detect the following pattern in the -// effect graph: -// -// - StoreField[+24, kRepTagged](263, ...) -// -// ... lots of nodes from which the field at offset 24 of the object -// returned by node #263 cannot be observed ... -// -// - StoreField[+24, kRepTagged](263, ...) -// -// In such situations, the earlier StoreField cannot be observed, and can be -// eliminated. This optimization should work for any offset and input node, of -// course. -// -// The optimization also works across splits. It currently does not work for -// loops, because we tend to put a stack check in loops, and like deopts, -// stack checks can observe anything. - -// Assumption: every byte of a JS object is only ever accessed through one -// offset. For instance, byte 15 of a given object may be accessed using a -// two-byte read at offset 14, or a four-byte read at offset 12, but never -// both in the same program. -// -// This implementation needs all dead nodes removed from the graph, and the -// graph should be trimmed. - -namespace { - -using StoreOffset = uint32_t; - -struct UnobservableStore { - NodeId id_; - StoreOffset offset_; - - bool operator==(const UnobservableStore) const; - bool operator<(const UnobservableStore) const; -}; - -} // namespace - -namespace { - -// Instances of UnobservablesSet are immutable. They represent either a set of -// UnobservableStores, or the "unvisited empty set". -// -// We apply some sharing to save memory. The class UnobservablesSet is only a -// pointer wide, and a copy does not use any heap (or temp_zone) memory. Most -// changes to an UnobservablesSet might allocate in the temp_zone. -// -// The size of an instance should be the size of a pointer, plus additional -// space in the zone in the case of non-unvisited UnobservablesSets. Copying -// an UnobservablesSet allocates no memory. -class UnobservablesSet final { - public: - static UnobservablesSet Unvisited(); - static UnobservablesSet VisitedEmpty(Zone* zone); - UnobservablesSet(); // unvisited - UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default; - - UnobservablesSet Intersect(const UnobservablesSet& other, Zone* zone) const; - UnobservablesSet Add(UnobservableStore obs, Zone* zone) const; - UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const; - - const ZoneSet<UnobservableStore>* set() const { return set_; } - - bool IsUnvisited() const { return set_ == nullptr; } - bool IsEmpty() const { return set_ == nullptr || set_->empty(); } - bool Contains(UnobservableStore obs) const { - return set_ != nullptr && (set_->find(obs) != set_->end()); - } - - bool operator==(const UnobservablesSet&) const; - bool operator!=(const UnobservablesSet&) const; - - private: - explicit UnobservablesSet(const ZoneSet<UnobservableStore>* set) - : set_(set) {} - const ZoneSet<UnobservableStore>* set_; -}; - -} // namespace - -namespace { - -class RedundantStoreFinder final { - public: - RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter, - Zone* temp_zone); - - void Find(); - - const ZoneSet<Node*>& to_remove_const() { return to_remove_; } - - void Visit(Node* node); - - private: - void VisitEffectfulNode(Node* node); - UnobservablesSet RecomputeUseIntersection(Node* node); - UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses); - static bool CannotObserveStoreField(Node* node); - - void MarkForRevisit(Node* node); - bool HasBeenVisited(Node* node); - - JSGraph* jsgraph() const { return jsgraph_; } - Isolate* isolate() { return jsgraph()->isolate(); } - Zone* temp_zone() const { return temp_zone_; } - ZoneVector<UnobservablesSet>& unobservable() { return unobservable_; } - UnobservablesSet& unobservable_for_id(NodeId id) { - DCHECK_LT(id, unobservable().size()); - return unobservable()[id]; - } - ZoneSet<Node*>& to_remove() { return to_remove_; } - - JSGraph* const jsgraph_; - TickCounter* const tick_counter_; - Zone* const temp_zone_; - - ZoneStack<Node*> revisit_; - ZoneVector<bool> in_revisit_; - // Maps node IDs to UnobservableNodeSets. - ZoneVector<UnobservablesSet> unobservable_; - ZoneSet<Node*> to_remove_; - const UnobservablesSet unobservables_visited_empty_; -}; - -// To safely cast an offset from a FieldAccess, which has a potentially wider -// range (namely int). -StoreOffset ToOffset(int offset) { - CHECK_LE(0, offset); - return static_cast<StoreOffset>(offset); -} - -StoreOffset ToOffset(const FieldAccess& access) { - return ToOffset(access.offset); -} - -unsigned int RepSizeOf(MachineRepresentation rep) { - return 1u << ElementSizeLog2Of(rep); -} -unsigned int RepSizeOf(FieldAccess access) { - return RepSizeOf(access.machine_type.representation()); -} - -bool AtMostTagged(FieldAccess access) { - return RepSizeOf(access) <= RepSizeOf(MachineRepresentation::kTagged); -} - -bool AtLeastTagged(FieldAccess access) { - return RepSizeOf(access) >= RepSizeOf(MachineRepresentation::kTagged); -} - -} // namespace - -void RedundantStoreFinder::Find() { +void StoreStoreElimination::RedundantStoreFinder::Find() { Visit(jsgraph()->graph()->end()); while (!revisit_.empty()) { @@ -222,7 +65,7 @@ void RedundantStoreFinder::Find() { #endif } -void RedundantStoreFinder::MarkForRevisit(Node* node) { +void StoreStoreElimination::RedundantStoreFinder::MarkForRevisit(Node* node) { DCHECK_LT(node->id(), in_revisit_.size()); if (!in_revisit_[node->id()]) { revisit_.push(node); @@ -230,7 +73,7 @@ void RedundantStoreFinder::MarkForRevisit(Node* node) { } } -bool RedundantStoreFinder::HasBeenVisited(Node* node) { +bool StoreStoreElimination::RedundantStoreFinder::HasBeenVisited(Node* node) { return !unobservable_for_id(node->id()).IsUnvisited(); } @@ -241,7 +84,6 @@ void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter, finder.Find(); // Remove superfluous nodes - for (Node* node : finder.to_remove_const()) { if (FLAG_trace_store_elimination) { PrintF("StoreStoreElimination::Run: Eliminating node #%d:%s\n", @@ -254,11 +96,9 @@ void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter, } } -// Recompute unobservables-set for a node. Will also mark superfluous nodes -// as to be removed. - -UnobservablesSet RedundantStoreFinder::RecomputeSet( - Node* node, const UnobservablesSet& uses) { +StoreStoreElimination::UnobservablesSet +StoreStoreElimination::RedundantStoreFinder::RecomputeSet( + Node* node, const StoreStoreElimination::UnobservablesSet& uses) { switch (node->op()->opcode()) { case IrOpcode::kStoreField: { Node* stored_to = node->InputAt(0); @@ -266,40 +106,21 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet( StoreOffset offset = ToOffset(access); UnobservableStore observation = {stored_to->id(), offset}; - bool isNotObservable = uses.Contains(observation); + bool is_not_observable = uses.Contains(observation); - if (isNotObservable && AtMostTagged(access)) { + if (is_not_observable) { TRACE(" #%d is StoreField[+%d,%s](#%d), unobservable", node->id(), offset, MachineReprToString(access.machine_type.representation()), stored_to->id()); to_remove().insert(node); return uses; - } else if (isNotObservable && !AtMostTagged(access)) { - TRACE( - " #%d is StoreField[+%d,%s](#%d), repeated in future but too " - "big to optimize away", - node->id(), offset, - MachineReprToString(access.machine_type.representation()), - stored_to->id()); - return uses; - } else if (!isNotObservable && AtLeastTagged(access)) { + } else { TRACE(" #%d is StoreField[+%d,%s](#%d), observable, recording in set", node->id(), offset, MachineReprToString(access.machine_type.representation()), stored_to->id()); return uses.Add(observation, temp_zone()); - } else if (!isNotObservable && !AtLeastTagged(access)) { - TRACE( - " #%d is StoreField[+%d,%s](#%d), observable but too small to " - "record", - node->id(), offset, - MachineReprToString(access.machine_type.representation()), - stored_to->id()); - return uses; - } else { - UNREACHABLE(); } - break; } case IrOpcode::kLoadField: { Node* loaded_from = node->InputAt(0); @@ -314,7 +135,6 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet( loaded_from->id(), offset); return uses.RemoveSameOffset(offset, temp_zone()); - break; } default: if (CannotObserveStoreField(node)) { @@ -330,36 +150,16 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet( UNREACHABLE(); } -bool RedundantStoreFinder::CannotObserveStoreField(Node* node) { - return node->opcode() == IrOpcode::kLoadElement || - node->opcode() == IrOpcode::kLoad || - node->opcode() == IrOpcode::kStore || - node->opcode() == IrOpcode::kEffectPhi || - node->opcode() == IrOpcode::kStoreElement || - node->opcode() == IrOpcode::kUnsafePointerAdd || - node->opcode() == IrOpcode::kRetain; +bool StoreStoreElimination::RedundantStoreFinder::CannotObserveStoreField( + Node* node) { + IrOpcode::Value opcode = node->opcode(); + return opcode == IrOpcode::kLoadElement || opcode == IrOpcode::kLoad || + opcode == IrOpcode::kStore || opcode == IrOpcode::kEffectPhi || + opcode == IrOpcode::kStoreElement || + opcode == IrOpcode::kUnsafePointerAdd || opcode == IrOpcode::kRetain; } -// Initialize unobservable_ with js_graph->graph->NodeCount() empty sets. -RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph, - TickCounter* tick_counter, - Zone* temp_zone) - : jsgraph_(js_graph), - tick_counter_(tick_counter), - temp_zone_(temp_zone), - revisit_(temp_zone), - in_revisit_(js_graph->graph()->NodeCount(), temp_zone), - unobservable_(js_graph->graph()->NodeCount(), - UnobservablesSet::Unvisited(), temp_zone), - to_remove_(temp_zone), - unobservables_visited_empty_(UnobservablesSet::VisitedEmpty(temp_zone)) {} - -void RedundantStoreFinder::Visit(Node* node) { - // All effectful nodes should be reachable from End via a sequence of - // control, then a sequence of effect edges. In VisitEffectfulNode we mark - // all effect inputs for revisiting (if they might have stale state); here - // we mark all control inputs at least once. - +void StoreStoreElimination::RedundantStoreFinder::Visit(Node* node) { if (!HasBeenVisited(node)) { for (int i = 0; i < node->op()->ControlInputCount(); i++) { Node* control_input = NodeProperties::GetControlInput(node, i); @@ -369,29 +169,32 @@ void RedundantStoreFinder::Visit(Node* node) { } } - bool isEffectful = (node->op()->EffectInputCount() >= 1); - if (isEffectful) { + bool is_effectful = node->op()->EffectInputCount() >= 1; + if (is_effectful) { + // mark all effect inputs for revisiting (if they might have stale state). VisitEffectfulNode(node); DCHECK(HasBeenVisited(node)); - } - - if (!HasBeenVisited(node)) { + } else if (!HasBeenVisited(node)) { // Mark as visited. unobservable_for_id(node->id()) = unobservables_visited_empty_; } } -void RedundantStoreFinder::VisitEffectfulNode(Node* node) { +void StoreStoreElimination::RedundantStoreFinder::VisitEffectfulNode( + Node* node) { if (HasBeenVisited(node)) { TRACE("- Revisiting: #%d:%s", node->id(), node->op()->mnemonic()); } - UnobservablesSet after_set = RecomputeUseIntersection(node); - UnobservablesSet before_set = RecomputeSet(node, after_set); + StoreStoreElimination::UnobservablesSet after_set = + RecomputeUseIntersection(node); + StoreStoreElimination::UnobservablesSet before_set = + RecomputeSet(node, after_set); DCHECK(!before_set.IsUnvisited()); - UnobservablesSet stored_for_node = unobservable_for_id(node->id()); + StoreStoreElimination::UnobservablesSet stores_for_node = + unobservable_for_id(node->id()); bool cur_set_changed = - (stored_for_node.IsUnvisited() || stored_for_node != before_set); + stores_for_node.IsUnvisited() || stores_for_node != before_set; if (!cur_set_changed) { // We will not be able to update the part of this chain above any more. // Exit. @@ -409,81 +212,78 @@ void RedundantStoreFinder::VisitEffectfulNode(Node* node) { } } -// Compute the intersection of the UnobservablesSets of all effect uses and -// return it. This function only works if {node} has an effect use. -// -// The result UnobservablesSet will always be visited. -UnobservablesSet RedundantStoreFinder::RecomputeUseIntersection(Node* node) { +StoreStoreElimination::UnobservablesSet +StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection( + Node* node) { + // There were no effect uses. Break early. + if (node->op()->EffectOutputCount() == 0) { + IrOpcode::Value opcode = node->opcode(); + // List of opcodes that may end this effect chain. The opcodes are not + // important to the soundness of this optimization; this serves as a + // general sanity check. Add opcodes to this list as it suits you. + // + // Everything is observable after these opcodes; return the empty set. + DCHECK_EXTRA( + opcode == IrOpcode::kReturn || opcode == IrOpcode::kTerminate || + opcode == IrOpcode::kDeoptimize || opcode == IrOpcode::kThrow, + "for #%d:%s", node->id(), node->op()->mnemonic()); + USE(opcode); + + return unobservables_visited_empty_; + } + // {first} == true indicates that we haven't looked at any elements yet. // {first} == false indicates that cur_set is the intersection of at least one // thing. - bool first = true; - UnobservablesSet cur_set = UnobservablesSet::Unvisited(); // irrelevant - + StoreStoreElimination::UnobservablesSet cur_set = + StoreStoreElimination::UnobservablesSet::Unvisited(); // irrelevant for (Edge edge : node->use_edges()) { - // Skip non-effect edges if (!NodeProperties::IsEffectEdge(edge)) { continue; } + // Intersect with the new use node. Node* use = edge.from(); - UnobservablesSet new_set = unobservable_for_id(use->id()); - // Include new_set in the intersection. + StoreStoreElimination::UnobservablesSet new_set = + unobservable_for_id(use->id()); if (first) { - // Intersection of a one-element set is that one element first = false; cur_set = new_set; + if (cur_set.IsUnvisited()) { + cur_set = unobservables_visited_empty_; + } } else { - // Take the intersection of cur_set and new_set. - cur_set = cur_set.Intersect(new_set, temp_zone()); + cur_set = + cur_set.Intersect(new_set, unobservables_visited_empty_, temp_zone()); } - } - if (first) { - // There were no effect uses. - auto opcode = node->op()->opcode(); - // List of opcodes that may end this effect chain. The opcodes are not - // important to the soundness of this optimization; this serves as a - // general sanity check. Add opcodes to this list as it suits you. - // - // Everything is observable after these opcodes; return the empty set. - DCHECK_EXTRA( - opcode == IrOpcode::kReturn || opcode == IrOpcode::kTerminate || - opcode == IrOpcode::kDeoptimize || opcode == IrOpcode::kThrow, - "for #%d:%s", node->id(), node->op()->mnemonic()); - USE(opcode); // silence warning about unused variable in release mode - - return unobservables_visited_empty_; - } else { - if (cur_set.IsUnvisited()) { - cur_set = unobservables_visited_empty_; + // Break fast for the empty set since the intersection will always be empty. + if (cur_set.IsEmpty()) { + break; } - - return cur_set; } -} -UnobservablesSet UnobservablesSet::Unvisited() { return UnobservablesSet(); } + DCHECK(!cur_set.IsUnvisited()); + return cur_set; +} -UnobservablesSet::UnobservablesSet() : set_(nullptr) {} +StoreStoreElimination::UnobservablesSet::UnobservablesSet() : set_(nullptr) {} -UnobservablesSet UnobservablesSet::VisitedEmpty(Zone* zone) { - // Create a new empty UnobservablesSet. This allocates in the zone, and - // can probably be optimized to use a global singleton. +StoreStoreElimination::UnobservablesSet +StoreStoreElimination::UnobservablesSet::VisitedEmpty(Zone* zone) { ZoneSet<UnobservableStore>* empty_set = new (zone->New(sizeof(ZoneSet<UnobservableStore>))) ZoneSet<UnobservableStore>(zone); - return UnobservablesSet(empty_set); + return StoreStoreElimination::UnobservablesSet(empty_set); } -// Computes the intersection of two UnobservablesSets. May return -// UnobservablesSet::Unvisited() instead of an empty UnobservablesSet for -// speed. -UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other, - Zone* zone) const { +StoreStoreElimination::UnobservablesSet +StoreStoreElimination::UnobservablesSet::Intersect( + const StoreStoreElimination::UnobservablesSet& other, + const StoreStoreElimination::UnobservablesSet& empty, Zone* zone) const { if (IsEmpty() || other.IsEmpty()) { - return Unvisited(); + return empty; } else { ZoneSet<UnobservableStore>* intersection = new (zone->New(sizeof(ZoneSet<UnobservableStore>))) @@ -493,14 +293,15 @@ UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other, other.set()->end(), std::inserter(*intersection, intersection->end())); - return UnobservablesSet(intersection); + return StoreStoreElimination::UnobservablesSet(intersection); } } -UnobservablesSet UnobservablesSet::Add(UnobservableStore obs, - Zone* zone) const { - bool present = (set()->find(obs) != set()->end()); - if (present) { +StoreStoreElimination::UnobservablesSet +StoreStoreElimination::UnobservablesSet::Add(UnobservableStore obs, + Zone* zone) const { + bool found = set()->find(obs) != set()->end(); + if (found) { return *this; } else { // Make a new empty set. @@ -514,12 +315,13 @@ UnobservablesSet UnobservablesSet::Add(UnobservableStore obs, DCHECK(inserted); USE(inserted); // silence warning about unused variable - return UnobservablesSet(new_set); + return StoreStoreElimination::UnobservablesSet(new_set); } } -UnobservablesSet UnobservablesSet::RemoveSameOffset(StoreOffset offset, - Zone* zone) const { +StoreStoreElimination::UnobservablesSet +StoreStoreElimination::UnobservablesSet::RemoveSameOffset(StoreOffset offset, + Zone* zone) const { // Make a new empty set. ZoneSet<UnobservableStore>* new_set = new (zone->New(sizeof(ZoneSet<UnobservableStore>))) @@ -531,30 +333,7 @@ UnobservablesSet UnobservablesSet::RemoveSameOffset(StoreOffset offset, } } - return UnobservablesSet(new_set); -} - -// Used for debugging. -bool UnobservablesSet::operator==(const UnobservablesSet& other) const { - if (IsUnvisited() || other.IsUnvisited()) { - return IsEmpty() && other.IsEmpty(); - } else { - // Both pointers guaranteed not to be nullptrs. - return *set() == *other.set(); - } -} - -bool UnobservablesSet::operator!=(const UnobservablesSet& other) const { - return !(*this == other); -} - -bool UnobservableStore::operator==(const UnobservableStore other) const { - return (id_ == other.id_) && (offset_ == other.offset_); -} - - -bool UnobservableStore::operator<(const UnobservableStore other) const { - return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_); + return StoreStoreElimination::UnobservablesSet(new_set); } #undef TRACE diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h index 646640a310..7704938fc0 100644 --- a/deps/v8/src/compiler/store-store-elimination.h +++ b/deps/v8/src/compiler/store-store-elimination.h @@ -7,6 +7,7 @@ #include "src/compiler/common-operator.h" #include "src/compiler/js-graph.h" +#include "src/compiler/simplified-operator.h" #include "src/zone/zone-containers.h" namespace v8 { @@ -16,10 +17,203 @@ class TickCounter; namespace compiler { +// Store-store elimination. +// +// The aim of this optimization is to detect the following pattern in the +// effect graph: +// +// - StoreField[+24, kRepTagged](263, ...) +// +// ... lots of nodes from which the field at offset 24 of the object +// returned by node #263 cannot be observed ... +// +// - StoreField[+24, kRepTagged](263, ...) +// +// In such situations, the earlier StoreField cannot be observed, and can be +// eliminated. This optimization should work for any offset and input node, of +// course. +// +// The optimization also works across splits. It currently does not work for +// loops, because we tend to put a stack check in loops, and like deopts, +// stack checks can observe anything. + +// Assumption: every byte of a JS object is only ever accessed through one +// offset. For instance, byte 15 of a given object may be accessed using a +// two-byte read at offset 14, or a four-byte read at offset 12, but never +// both in the same program. +// +// This implementation needs all dead nodes removed from the graph, and the +// graph should be trimmed. class StoreStoreElimination final { public: static void Run(JSGraph* js_graph, TickCounter* tick_counter, Zone* temp_zone); + + private: + using StoreOffset = uint32_t; + + struct UnobservableStore { + NodeId id_; + StoreOffset offset_; + + bool operator==(const UnobservableStore other) const { + return (id_ == other.id_) && (offset_ == other.offset_); + } + + bool operator<(const UnobservableStore other) const { + return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_); + } + }; + + // Instances of UnobservablesSet are immutable. They represent either a set of + // UnobservableStores, or the "unvisited empty set". + // + // We apply some sharing to save memory. The class UnobservablesSet is only a + // pointer wide, and a copy does not use any heap (or temp_zone) memory. Most + // changes to an UnobservablesSet might allocate in the temp_zone. + // + // The size of an instance should be the size of a pointer, plus additional + // space in the zone in the case of non-unvisited UnobservablesSets. Copying + // an UnobservablesSet allocates no memory. + class UnobservablesSet final { + public: + // Creates a new UnobservablesSet, with the null set. + static UnobservablesSet Unvisited() { return UnobservablesSet(); } + + // Create a new empty UnobservablesSet. This allocates in the zone, and + // can probably be optimized to use a global singleton. + static UnobservablesSet VisitedEmpty(Zone* zone); + UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default; + + // Computes the intersection of two UnobservablesSets. If one of the sets is + // empty, will return empty. + UnobservablesSet Intersect(const UnobservablesSet& other, + const UnobservablesSet& empty, Zone* zone) const; + + // Returns a set that it is the current one, plus the observation obs passed + // as parameter. If said obs it's already in the set, we don't have to + // create a new one. + UnobservablesSet Add(UnobservableStore obs, Zone* zone) const; + + // Returns a set that it is the current one, except for all of the + // observations with offset off. This is done by creating a new set and + // copying all observations with different offsets. + // This can probably be done better if the observations are stored first by + // offset and then by node. + // We are removing all nodes with offset off since different nodes may + // alias one another, and we currently we don't have the means to know if + // two nodes are definitely the same value. + UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const; + + const ZoneSet<UnobservableStore>* set() const { return set_; } + + bool IsUnvisited() const { return set_ == nullptr; } + bool IsEmpty() const { return set_ == nullptr || set_->empty(); } + bool Contains(UnobservableStore obs) const { + return set_ != nullptr && (set_->find(obs) != set_->end()); + } + + bool operator==(const UnobservablesSet& other) const { + if (IsUnvisited() || other.IsUnvisited()) { + return IsEmpty() && other.IsEmpty(); + } else { + // Both pointers guaranteed not to be nullptrs. + return *set() == *(other.set()); + } + } + + bool operator!=(const UnobservablesSet& other) const { + return !(*this == other); + } + + private: + UnobservablesSet(); + explicit UnobservablesSet(const ZoneSet<UnobservableStore>* set) + : set_(set) {} + const ZoneSet<UnobservableStore>* set_; + }; + + class RedundantStoreFinder final { + public: + // Note that we Initialize unobservable_ with js_graph->graph->NodeCount() + // amount of empty sets. + RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter, + Zone* temp_zone) + : jsgraph_(js_graph), + tick_counter_(tick_counter), + temp_zone_(temp_zone), + revisit_(temp_zone), + in_revisit_(js_graph->graph()->NodeCount(), temp_zone), + unobservable_(js_graph->graph()->NodeCount(), + StoreStoreElimination::UnobservablesSet::Unvisited(), + temp_zone), + to_remove_(temp_zone), + unobservables_visited_empty_( + StoreStoreElimination::UnobservablesSet::VisitedEmpty( + temp_zone)) {} + + // Crawls from the end of the graph to the beginning, with the objective of + // finding redundant stores. + void Find(); + + // This method is used for const correctness to go through the final list of + // redundant stores that are replaced on the graph. + const ZoneSet<Node*>& to_remove_const() { return to_remove_; } + + private: + // Assumption: All effectful nodes are reachable from End via a sequence of + // control, then a sequence of effect edges. + // Visit goes through the control chain, visiting effectful nodes that it + // encounters. + void Visit(Node* node); + + // Marks effect inputs for visiting, if we are able to update this path of + // the graph. + void VisitEffectfulNode(Node* node); + + // Compute the intersection of the UnobservablesSets of all effect uses and + // return it. + // The result UnobservablesSet will never be null. + UnobservablesSet RecomputeUseIntersection(Node* node); + + // Recompute unobservables-set for a node. Will also mark superfluous nodes + // as to be removed. + UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses); + + // Returns true if node's opcode cannot observe StoreFields. + static bool CannotObserveStoreField(Node* node); + + void MarkForRevisit(Node* node); + bool HasBeenVisited(Node* node); + + // To safely cast an offset from a FieldAccess, which has a potentially + // wider range (namely int). + StoreOffset ToOffset(const FieldAccess& access) { + DCHECK_GE(access.offset, 0); + return static_cast<StoreOffset>(access.offset); + } + + JSGraph* jsgraph() const { return jsgraph_; } + Isolate* isolate() { return jsgraph()->isolate(); } + Zone* temp_zone() const { return temp_zone_; } + UnobservablesSet& unobservable_for_id(NodeId id) { + DCHECK_LT(id, unobservable_.size()); + return unobservable_[id]; + } + ZoneSet<Node*>& to_remove() { return to_remove_; } + + JSGraph* const jsgraph_; + TickCounter* const tick_counter_; + Zone* const temp_zone_; + + ZoneStack<Node*> revisit_; + ZoneVector<bool> in_revisit_; + + // Maps node IDs to UnobservableNodeSets. + ZoneVector<UnobservablesSet> unobservable_; + ZoneSet<Node*> to_remove_; + const UnobservablesSet unobservables_visited_empty_; + }; }; } // namespace compiler diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc index 5dbbad3dcd..6ba1b39431 100644 --- a/deps/v8/src/compiler/typer.cc +++ b/deps/v8/src/compiler/typer.cc @@ -10,6 +10,7 @@ #include "src/codegen/tick-counter.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph-reducer.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/js-operator.h" #include "src/compiler/linkage.h" #include "src/compiler/loop-variable-optimizer.h" @@ -787,7 +788,13 @@ Type Typer::Visitor::TypeParameter(Node* node) { return Type::NonInternal(); } -Type Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); } +Type Typer::Visitor::TypeOsrValue(Node* node) { + if (OsrValueIndexOf(node->op()) == Linkage::kOsrContextSpillSlotIndex) { + return Type::OtherInternal(); + } else { + return Type::Any(); + } +} Type Typer::Visitor::TypeRetain(Node* node) { UNREACHABLE(); } @@ -999,10 +1006,6 @@ Type Typer::Visitor::TypeTypedObjectState(Node* node) { Type Typer::Visitor::TypeCall(Node* node) { return Type::Any(); } -Type Typer::Visitor::TypeCallWithCallerSavedRegisters(Node* node) { - UNREACHABLE(); -} - Type Typer::Visitor::TypeProjection(Node* node) { Type const type = Operand(node, 0); if (type.Is(Type::None())) return Type::None(); @@ -1524,6 +1527,10 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) { return Type::NonInternal(); } JSFunctionRef function = fun.AsHeapConstant()->Ref().AsJSFunction(); + if (!function.serialized()) { + TRACE_BROKER_MISSING(t->broker(), "data for function " << function); + return Type::NonInternal(); + } if (!function.shared().HasBuiltinId()) { return Type::NonInternal(); } @@ -1564,6 +1571,7 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) { case Builtins::kMathPow: case Builtins::kMathMax: case Builtins::kMathMin: + case Builtins::kMathHypot: return Type::Number(); case Builtins::kMathImul: return Type::Signed32(); @@ -2364,6 +2372,8 @@ Type Typer::Visitor::TypeConstant(Handle<Object> value) { return Type::NewConstant(typer_->broker(), value, zone()); } +Type Typer::Visitor::TypeJSGetIterator(Node* node) { return Type::Any(); } + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc index d4267a75fe..018c54c3d5 100644 --- a/deps/v8/src/compiler/types.cc +++ b/deps/v8/src/compiler/types.cc @@ -324,7 +324,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { // Remaining instance types are unsupported for now. If any of them do // require bit set types, they should get kOtherInternal. - case MUTABLE_HEAP_NUMBER_TYPE: case FREE_SPACE_TYPE: case FILLER_TYPE: case ACCESS_CHECK_INFO_TYPE: @@ -365,7 +364,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case PROMISE_FULFILL_REACTION_JOB_TASK_TYPE: case PROMISE_REJECT_REACTION_JOB_TASK_TYPE: case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE: - case FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE: #define MAKE_TORQUE_CLASS_TYPE(V) case V: TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE) #undef MAKE_TORQUE_CLASS_TYPE diff --git a/deps/v8/src/compiler/vector-slot-pair.cc b/deps/v8/src/compiler/vector-slot-pair.cc deleted file mode 100644 index 97f53648a4..0000000000 --- a/deps/v8/src/compiler/vector-slot-pair.cc +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/compiler/vector-slot-pair.h" - -#include "src/objects/feedback-vector.h" - -namespace v8 { -namespace internal { - -VectorSlotPair::VectorSlotPair() = default; - -int VectorSlotPair::index() const { - return vector_.is_null() ? -1 : FeedbackVector::GetIndex(slot_); -} - -bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) { - return lhs.slot() == rhs.slot() && - lhs.vector().location() == rhs.vector().location() && - lhs.ic_state() == rhs.ic_state(); -} - -bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) { - return !(lhs == rhs); -} - -std::ostream& operator<<(std::ostream& os, const VectorSlotPair& p) { - if (p.IsValid()) { - return os << "VectorSlotPair(" << p.slot() << ", " - << InlineCacheState2String(p.ic_state()) << ")"; - } - return os << "VectorSlotPair(INVALID)"; -} - -size_t hash_value(VectorSlotPair const& p) { - return base::hash_combine(p.slot(), p.vector().location(), p.ic_state()); -} - -} // namespace internal -} // namespace v8 diff --git a/deps/v8/src/compiler/vector-slot-pair.h b/deps/v8/src/compiler/vector-slot-pair.h deleted file mode 100644 index 9944544a13..0000000000 --- a/deps/v8/src/compiler/vector-slot-pair.h +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_COMPILER_VECTOR_SLOT_PAIR_H_ -#define V8_COMPILER_VECTOR_SLOT_PAIR_H_ - -#include "src/common/globals.h" -#include "src/handles/handles.h" -#include "src/utils/utils.h" - -namespace v8 { -namespace internal { - -class FeedbackVector; - -// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which -// is used to access the type feedback for a certain {Node}. -class V8_EXPORT_PRIVATE VectorSlotPair { - public: - VectorSlotPair(); - VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot, - InlineCacheState ic_state) - : vector_(vector), slot_(slot), ic_state_(ic_state) {} - - bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); } - - Handle<FeedbackVector> vector() const { return vector_; } - FeedbackSlot slot() const { return slot_; } - InlineCacheState ic_state() const { return ic_state_; } - - int index() const; - - private: - Handle<FeedbackVector> vector_; - FeedbackSlot slot_; - InlineCacheState ic_state_ = UNINITIALIZED; -}; - -bool operator==(VectorSlotPair const&, VectorSlotPair const&); -bool operator!=(VectorSlotPair const&, VectorSlotPair const&); - -V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, - VectorSlotPair const&); - -size_t hash_value(VectorSlotPair const&); - -} // namespace internal -} // namespace v8 - -#endif // V8_COMPILER_VECTOR_SLOT_PAIR_H_ diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc index d3d4d54ea2..608d6ffee6 100644 --- a/deps/v8/src/compiler/verifier.cc +++ b/deps/v8/src/compiler/verifier.cc @@ -580,7 +580,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { // TODO(jarin): what are the constraints on these? break; case IrOpcode::kCall: - case IrOpcode::kCallWithCallerSavedRegisters: // TODO(rossberg): what are the constraints on these? break; case IrOpcode::kTailCall: @@ -766,6 +765,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckNotTyped(node); CHECK(StoreNamedOwnParametersOf(node->op()).feedback().IsValid()); break; + case IrOpcode::kJSGetIterator: + // Type can be anything + CheckValueInputIs(node, 0, Type::Any()); + CheckTypeIs(node, Type::Any()); + break; case IrOpcode::kJSStoreDataPropertyInLiteral: case IrOpcode::kJSStoreInArrayLiteral: // Type is empty. @@ -1800,6 +1804,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kBitcastTaggedSignedToWord: case IrOpcode::kBitcastWordToTagged: case IrOpcode::kBitcastWordToTaggedSigned: + case IrOpcode::kBitcastWord32ToCompressedSigned: + case IrOpcode::kBitcastCompressedSignedToWord32: case IrOpcode::kChangeInt32ToInt64: case IrOpcode::kChangeUint32ToUint64: case IrOpcode::kChangeTaggedToCompressed: @@ -1838,7 +1844,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kTaggedPoisonOnSpeculation: case IrOpcode::kWord32PoisonOnSpeculation: case IrOpcode::kWord64PoisonOnSpeculation: - case IrOpcode::kLoadStackPointer: case IrOpcode::kLoadFramePointer: case IrOpcode::kLoadParentFramePointer: case IrOpcode::kUnalignedLoad: @@ -1877,6 +1882,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kSignExtendWord16ToInt64: case IrOpcode::kSignExtendWord32ToInt64: case IrOpcode::kStaticAssert: + case IrOpcode::kStackPointerGreaterThan: #define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name: MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE) diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc index 2da7177ece..28f9943e59 100644 --- a/deps/v8/src/compiler/wasm-compiler.cc +++ b/deps/v8/src/compiler/wasm-compiler.cc @@ -25,12 +25,11 @@ #include "src/compiler/graph-visualizer.h" #include "src/compiler/graph.h" #include "src/compiler/int64-lowering.h" -#include "src/compiler/js-graph.h" -#include "src/compiler/js-operator.h" #include "src/compiler/linkage.h" #include "src/compiler/machine-operator.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-origin-table.h" +#include "src/compiler/node-properties.h" #include "src/compiler/pipeline.h" #include "src/compiler/simd-scalar-lowering.h" #include "src/compiler/zone-stats.h" @@ -259,25 +258,25 @@ Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) { Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count, Node** vals, Node* control) { DCHECK(IrOpcode::IsMergeOpcode(control->opcode())); - Node** buf = Realloc(vals, count, count + 1); + Vector<Node*> buf = Realloc(vals, count, count + 1); buf[count] = control; return graph()->NewNode( mcgraph()->common()->Phi(wasm::ValueTypes::MachineRepresentationFor(type), count), - count + 1, buf); + count + 1, buf.begin()); } Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects, Node* control) { DCHECK(IrOpcode::IsMergeOpcode(control->opcode())); - Node** buf = Realloc(effects, count, count + 1); + Vector<Node*> buf = Realloc(effects, count, count + 1); buf[count] = control; return graph()->NewNode(mcgraph()->common()->EffectPhi(count), count + 1, - buf); + buf.begin()); } Node* WasmGraphBuilder::RefNull() { - Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); + Node* isolate_root = BuildLoadIsolateRoot(); return LOAD_TAGGED_POINTER( isolate_root, IsolateData::root_slot_offset(RootIndex::kNullValue)); } @@ -291,10 +290,17 @@ Node* WasmGraphBuilder::RefFunc(uint32_t function_index) { } Node* WasmGraphBuilder::NoContextConstant() { - // TODO(titzer): avoiding a dependency on JSGraph here. Refactor. return mcgraph()->IntPtrConstant(0); } +Node* WasmGraphBuilder::BuildLoadIsolateRoot() { + // The IsolateRoot is loaded from the instance node so that the generated + // code is Isolate independent. This can be overridden by setting a specific + // node in {isolate_root_node_} beforehand. + if (isolate_root_node_.is_set()) return isolate_root_node_.get(); + return LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); +} + Node* WasmGraphBuilder::Uint32Constant(uint32_t value) { return mcgraph()->Uint32Constant(value); } @@ -320,10 +326,6 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position, if (effect == nullptr) effect = effect_; if (control == nullptr) control = control_; - // This instruction sequence is matched in the instruction selector to - // load the stack pointer directly on some platforms. Hence, when modifying - // please also fix WasmStackCheckMatcher in node-matchers.h - Node* limit_address = graph()->NewNode( mcgraph()->machine()->Load(MachineType::Pointer()), instance_node_.get(), mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(StackLimitAddress)), @@ -332,10 +334,9 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position, mcgraph()->machine()->Load(MachineType::Pointer()), limit_address, mcgraph()->IntPtrConstant(0), limit_address, *control); *effect = limit; - Node* pointer = graph()->NewNode(mcgraph()->machine()->LoadStackPointer()); Node* check = - graph()->NewNode(mcgraph()->machine()->UintLessThan(), limit, pointer); + graph()->NewNode(mcgraph()->machine()->StackPointerGreaterThan(), limit); Diamond stack_check(graph(), mcgraph()->common(), check, BranchHint::kTrue); stack_check.Chain(*control); @@ -1126,12 +1127,13 @@ Node* WasmGraphBuilder::IfDefault(Node* sw) { return graph()->NewNode(mcgraph()->common()->IfDefault(), sw); } -Node* WasmGraphBuilder::Return(unsigned count, Node** vals) { +Node* WasmGraphBuilder::Return(Vector<Node*> vals) { static const int kStackAllocatedNodeBufferSize = 8; Node* stack_buffer[kStackAllocatedNodeBufferSize]; std::vector<Node*> heap_buffer; Node** buf = stack_buffer; + unsigned count = static_cast<unsigned>(vals.size()); if (count + 3 > kStackAllocatedNodeBufferSize) { heap_buffer.resize(count + 3); buf = heap_buffer.data(); @@ -1139,7 +1141,7 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) { buf[0] = mcgraph()->Int32Constant(0); if (count > 0) { - memcpy(buf + 1, vals, sizeof(void*) * count); + memcpy(buf + 1, vals.begin(), sizeof(void*) * count); } buf[count + 1] = Effect(); buf[count + 2] = Control(); @@ -1150,11 +1152,9 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) { return ret; } -Node* WasmGraphBuilder::ReturnVoid() { return Return(0, nullptr); } - Node* WasmGraphBuilder::Unreachable(wasm::WasmCodePosition position) { TrapIfFalse(wasm::TrapReason::kTrapUnreachable, Int32Constant(0), position); - ReturnVoid(); + Return(Vector<Node*>{}); return nullptr; } @@ -2295,13 +2295,13 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) { return BuildCallToRuntime(Runtime::kWasmExceptionGetTag, &except_obj, 1); } -Node** WasmGraphBuilder::GetExceptionValues( +Vector<Node*> WasmGraphBuilder::GetExceptionValues( Node* except_obj, const wasm::WasmException* exception) { Node* values_array = BuildCallToRuntime(Runtime::kWasmExceptionGetValues, &except_obj, 1); uint32_t index = 0; const wasm::WasmExceptionSig* sig = exception->sig; - Node** values = Buffer(sig->parameter_count()); + Vector<Node*> values = Buffer(sig->parameter_count()); for (size_t i = 0; i < sig->parameter_count(); ++i) { Node* value; switch (sig->GetParam(i)) { @@ -2695,7 +2695,7 @@ Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig, Node** args, const size_t count = 1 + params + extra; // Reallocate the buffer to make space for extra inputs. - args = Realloc(args, 1 + params, count); + args = Realloc(args, 1 + params, count).begin(); // Make room for the instance_node parameter at index 1, just after code. memmove(&args[2], &args[1], params * sizeof(Node*)); @@ -2725,7 +2725,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args, size_t ret_count = sig->return_count(); if (ret_count == 0) return call; // No return value. - *rets = Buffer(ret_count); + *rets = Buffer(ret_count).begin(); if (ret_count == 1) { // Only a single return value. (*rets)[0] = call; @@ -3183,12 +3183,12 @@ Node* WasmGraphBuilder::CreateOrMergeIntoPhi(MachineRepresentation rep, } else if (tnode != fnode) { uint32_t count = merge->InputCount(); // + 1 for the merge node. - Node** vals = Buffer(count + 1); + Vector<Node*> vals = Buffer(count + 1); for (uint32_t j = 0; j < count - 1; j++) vals[j] = tnode; vals[count - 1] = fnode; vals[count] = merge; return graph()->NewNode(mcgraph()->common()->Phi(rep, count), count + 1, - vals); + vals.begin()); } return tnode; } @@ -3199,12 +3199,12 @@ Node* WasmGraphBuilder::CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode, AppendToPhi(tnode, fnode); } else if (tnode != fnode) { uint32_t count = merge->InputCount(); - Node** effects = Buffer(count); + Vector<Node*> effects = Buffer(count); for (uint32_t j = 0; j < count - 1; j++) { effects[j] = tnode; } effects[count - 1] = fnode; - tnode = EffectPhi(count, effects, merge); + tnode = EffectPhi(count, effects.begin(), merge); } return tnode; } @@ -3326,11 +3326,14 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext( auto call_descriptor = Linkage::GetRuntimeCallDescriptor( mcgraph()->zone(), f, fun->nargs, Operator::kNoProperties, CallDescriptor::kNoFlags); - // The CEntryStub is loaded from the instance_node so that generated code is + // The CEntryStub is loaded from the IsolateRoot so that generated code is // Isolate independent. At the moment this is only done for CEntryStub(1). + Node* isolate_root = BuildLoadIsolateRoot(); DCHECK_EQ(1, fun->result_size); - Node* centry_stub = LOAD_INSTANCE_FIELD( - CEntryStub, MachineType::TypeCompressedTaggedPointer()); + auto centry_id = + Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit; + Node* centry_stub = LOAD_TAGGED_POINTER( + isolate_root, IsolateData::builtin_slot_offset(centry_id)); // TODO(titzer): allow arbitrary number of runtime arguments // At the moment we only allow 5 parameters. If more parameters are needed, // increase this constant accordingly. @@ -3943,30 +3946,43 @@ Graph* WasmGraphBuilder::graph() { return mcgraph()->graph(); } namespace { Signature<MachineRepresentation>* CreateMachineSignature( - Zone* zone, wasm::FunctionSig* sig) { + Zone* zone, wasm::FunctionSig* sig, WasmGraphBuilder::CallOrigin origin) { Signature<MachineRepresentation>::Builder builder(zone, sig->return_count(), sig->parameter_count()); for (auto ret : sig->returns()) { - builder.AddReturn(wasm::ValueTypes::MachineRepresentationFor(ret)); + if (origin == WasmGraphBuilder::kCalledFromJS) { + builder.AddReturn(MachineRepresentation::kTagged); + } else { + builder.AddReturn(wasm::ValueTypes::MachineRepresentationFor(ret)); + } } for (auto param : sig->parameters()) { - builder.AddParam(wasm::ValueTypes::MachineRepresentationFor(param)); + if (origin == WasmGraphBuilder::kCalledFromJS) { + // Parameters coming from JavaScript are always tagged values. Especially + // when the signature says that it's an I64 value, then a BigInt object is + // provided by JavaScript, and not two 32-bit parameters. + builder.AddParam(MachineRepresentation::kTagged); + } else { + builder.AddParam(wasm::ValueTypes::MachineRepresentationFor(param)); + } } return builder.Build(); } } // namespace -void WasmGraphBuilder::LowerInt64() { +void WasmGraphBuilder::LowerInt64(CallOrigin origin) { if (mcgraph()->machine()->Is64()) return; Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(), mcgraph()->zone(), - CreateMachineSignature(mcgraph()->zone(), sig_)); + CreateMachineSignature(mcgraph()->zone(), sig_, origin), + std::move(lowering_special_case_)); r.LowerGraph(); } void WasmGraphBuilder::SimdScalarLoweringForTesting() { - SimdScalarLowering(mcgraph(), CreateMachineSignature(mcgraph()->zone(), sig_)) + SimdScalarLowering(mcgraph(), CreateMachineSignature(mcgraph()->zone(), sig_, + kCalledFromWasm)) .LowerGraph(); } @@ -3992,6 +4008,24 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]); case wasm::kExprF64x2Neg: return graph()->NewNode(mcgraph()->machine()->F64x2Neg(), inputs[0]); + case wasm::kExprF64x2Add: + return graph()->NewNode(mcgraph()->machine()->F64x2Add(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Sub: + return graph()->NewNode(mcgraph()->machine()->F64x2Sub(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Mul: + return graph()->NewNode(mcgraph()->machine()->F64x2Mul(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Div: + return graph()->NewNode(mcgraph()->machine()->F64x2Div(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Min: + return graph()->NewNode(mcgraph()->machine()->F64x2Min(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Max: + return graph()->NewNode(mcgraph()->machine()->F64x2Max(), inputs[0], + inputs[1]); case wasm::kExprF64x2Eq: return graph()->NewNode(mcgraph()->machine()->F64x2Eq(), inputs[0], inputs[1]); @@ -4040,6 +4074,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprF32x4Mul: return graph()->NewNode(mcgraph()->machine()->F32x4Mul(), inputs[0], inputs[1]); + case wasm::kExprF32x4Div: + return graph()->NewNode(mcgraph()->machine()->F32x4Div(), inputs[0], + inputs[1]); case wasm::kExprF32x4Min: return graph()->NewNode(mcgraph()->machine()->F32x4Min(), inputs[0], inputs[1]); @@ -4068,6 +4105,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]); case wasm::kExprI64x2Neg: return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]); + case wasm::kExprI64x2Shl: + return graph()->NewNode(mcgraph()->machine()->I64x2Shl(), inputs[0], + inputs[1]); + case wasm::kExprI64x2ShrS: + return graph()->NewNode(mcgraph()->machine()->I64x2ShrS(), inputs[0], + inputs[1]); case wasm::kExprI64x2Add: return graph()->NewNode(mcgraph()->machine()->I64x2Add(), inputs[0], inputs[1]); @@ -4077,6 +4120,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprI64x2Mul: return graph()->NewNode(mcgraph()->machine()->I64x2Mul(), inputs[0], inputs[1]); + case wasm::kExprI64x2MinS: + return graph()->NewNode(mcgraph()->machine()->I64x2MinS(), inputs[0], + inputs[1]); + case wasm::kExprI64x2MaxS: + return graph()->NewNode(mcgraph()->machine()->I64x2MaxS(), inputs[0], + inputs[1]); case wasm::kExprI64x2Eq: return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0], inputs[1]); @@ -4095,6 +4144,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprI64x2GeS: return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0], inputs[1]); + case wasm::kExprI64x2ShrU: + return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(), inputs[0], + inputs[1]); + case wasm::kExprI64x2MinU: + return graph()->NewNode(mcgraph()->machine()->I64x2MinU(), inputs[0], + inputs[1]); + case wasm::kExprI64x2MaxU: + return graph()->NewNode(mcgraph()->machine()->I64x2MaxU(), inputs[0], + inputs[1]); case wasm::kExprI64x2LtU: return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[1], inputs[0]); @@ -4123,6 +4181,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { inputs[0]); case wasm::kExprI32x4Neg: return graph()->NewNode(mcgraph()->machine()->I32x4Neg(), inputs[0]); + case wasm::kExprI32x4Shl: + return graph()->NewNode(mcgraph()->machine()->I32x4Shl(), inputs[0], + inputs[1]); + case wasm::kExprI32x4ShrS: + return graph()->NewNode(mcgraph()->machine()->I32x4ShrS(), inputs[0], + inputs[1]); case wasm::kExprI32x4Add: return graph()->NewNode(mcgraph()->machine()->I32x4Add(), inputs[0], inputs[1]); @@ -4165,6 +4229,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprI32x4UConvertI16x8High: return graph()->NewNode(mcgraph()->machine()->I32x4UConvertI16x8High(), inputs[0]); + case wasm::kExprI32x4ShrU: + return graph()->NewNode(mcgraph()->machine()->I32x4ShrU(), inputs[0], + inputs[1]); case wasm::kExprI32x4MinU: return graph()->NewNode(mcgraph()->machine()->I32x4MinU(), inputs[0], inputs[1]); @@ -4191,6 +4258,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprI16x8SConvertI8x16High: return graph()->NewNode(mcgraph()->machine()->I16x8SConvertI8x16High(), inputs[0]); + case wasm::kExprI16x8Shl: + return graph()->NewNode(mcgraph()->machine()->I16x8Shl(), inputs[0], + inputs[1]); + case wasm::kExprI16x8ShrS: + return graph()->NewNode(mcgraph()->machine()->I16x8ShrS(), inputs[0], + inputs[1]); case wasm::kExprI16x8Neg: return graph()->NewNode(mcgraph()->machine()->I16x8Neg(), inputs[0]); case wasm::kExprI16x8SConvertI32x4: @@ -4247,6 +4320,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprI16x8UConvertI32x4: return graph()->NewNode(mcgraph()->machine()->I16x8UConvertI32x4(), inputs[0], inputs[1]); + case wasm::kExprI16x8ShrU: + return graph()->NewNode(mcgraph()->machine()->I16x8ShrU(), inputs[0], + inputs[1]); case wasm::kExprI16x8AddSaturateU: return graph()->NewNode(mcgraph()->machine()->I16x8AddSaturateU(), inputs[0], inputs[1]); @@ -4275,6 +4351,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { return graph()->NewNode(mcgraph()->machine()->I8x16Splat(), inputs[0]); case wasm::kExprI8x16Neg: return graph()->NewNode(mcgraph()->machine()->I8x16Neg(), inputs[0]); + case wasm::kExprI8x16Shl: + return graph()->NewNode(mcgraph()->machine()->I8x16Shl(), inputs[0], + inputs[1]); + case wasm::kExprI8x16ShrS: + return graph()->NewNode(mcgraph()->machine()->I8x16ShrS(), inputs[0], + inputs[1]); case wasm::kExprI8x16SConvertI16x8: return graph()->NewNode(mcgraph()->machine()->I8x16SConvertI16x8(), inputs[0], inputs[1]); @@ -4317,6 +4399,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprI8x16GeS: return graph()->NewNode(mcgraph()->machine()->I8x16GeS(), inputs[0], inputs[1]); + case wasm::kExprI8x16ShrU: + return graph()->NewNode(mcgraph()->machine()->I8x16ShrU(), inputs[0], + inputs[1]); case wasm::kExprI8x16UConvertI16x8: return graph()->NewNode(mcgraph()->machine()->I8x16UConvertI16x8(), inputs[0], inputs[1]); @@ -4424,47 +4509,6 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane, } } -Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift, - Node* const* inputs) { - has_simd_ = true; - switch (opcode) { - case wasm::kExprI64x2Shl: - return graph()->NewNode(mcgraph()->machine()->I64x2Shl(shift), inputs[0]); - case wasm::kExprI64x2ShrS: - return graph()->NewNode(mcgraph()->machine()->I64x2ShrS(shift), - inputs[0]); - case wasm::kExprI64x2ShrU: - return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(shift), - inputs[0]); - case wasm::kExprI32x4Shl: - return graph()->NewNode(mcgraph()->machine()->I32x4Shl(shift), inputs[0]); - case wasm::kExprI32x4ShrS: - return graph()->NewNode(mcgraph()->machine()->I32x4ShrS(shift), - inputs[0]); - case wasm::kExprI32x4ShrU: - return graph()->NewNode(mcgraph()->machine()->I32x4ShrU(shift), - inputs[0]); - case wasm::kExprI16x8Shl: - return graph()->NewNode(mcgraph()->machine()->I16x8Shl(shift), inputs[0]); - case wasm::kExprI16x8ShrS: - return graph()->NewNode(mcgraph()->machine()->I16x8ShrS(shift), - inputs[0]); - case wasm::kExprI16x8ShrU: - return graph()->NewNode(mcgraph()->machine()->I16x8ShrU(shift), - inputs[0]); - case wasm::kExprI8x16Shl: - return graph()->NewNode(mcgraph()->machine()->I8x16Shl(shift), inputs[0]); - case wasm::kExprI8x16ShrS: - return graph()->NewNode(mcgraph()->machine()->I8x16ShrS(shift), - inputs[0]); - case wasm::kExprI8x16ShrU: - return graph()->NewNode(mcgraph()->machine()->I8x16ShrU(shift), - inputs[0]); - default: - FATAL_UNSUPPORTED_OPCODE(opcode); - } -} - Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16], Node* const* inputs) { has_simd_ = true; @@ -5010,15 +5054,86 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() { namespace { class WasmWrapperGraphBuilder : public WasmGraphBuilder { public: - WasmWrapperGraphBuilder(Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* sig, + WasmWrapperGraphBuilder(Zone* zone, MachineGraph* mcgraph, + wasm::FunctionSig* sig, compiler::SourcePositionTable* spt, StubCallMode stub_mode, wasm::WasmFeatures features) - : WasmGraphBuilder(nullptr, zone, jsgraph, sig, spt), - isolate_(jsgraph->isolate()), - jsgraph_(jsgraph), + : WasmGraphBuilder(nullptr, zone, mcgraph, sig, spt), stub_mode_(stub_mode), enabled_features_(features) {} + CallDescriptor* GetI32PairToBigIntCallDescriptor() { + I32PairToBigIntDescriptor interface_descriptor; + + return Linkage::GetStubCallDescriptor( + mcgraph()->zone(), // zone + interface_descriptor, // descriptor + interface_descriptor.GetStackParameterCount(), // stack parameter count + CallDescriptor::kNoFlags, // flags + Operator::kNoProperties, // properties + stub_mode_); // stub call mode + } + + CallDescriptor* GetI64ToBigIntCallDescriptor() { + if (!lowering_special_case_) { + lowering_special_case_ = base::make_unique<Int64LoweringSpecialCase>(); + } + + if (lowering_special_case_->i64_to_bigint_call_descriptor) { + return lowering_special_case_->i64_to_bigint_call_descriptor; + } + + I64ToBigIntDescriptor interface_descriptor; + auto call_descriptor = Linkage::GetStubCallDescriptor( + mcgraph()->zone(), // zone + interface_descriptor, // descriptor + interface_descriptor.GetStackParameterCount(), // stack parameter count + CallDescriptor::kNoFlags, // flags + Operator::kNoProperties, // properties + stub_mode_); // stub call mode + + lowering_special_case_->i64_to_bigint_call_descriptor = call_descriptor; + lowering_special_case_->i32_pair_to_bigint_call_descriptor = + GetI32PairToBigIntCallDescriptor(); + return call_descriptor; + } + + CallDescriptor* GetBigIntToI32PairCallDescriptor() { + BigIntToI32PairDescriptor interface_descriptor; + + return Linkage::GetStubCallDescriptor( + mcgraph()->zone(), // zone + interface_descriptor, // descriptor + interface_descriptor.GetStackParameterCount(), // stack parameter count + CallDescriptor::kNoFlags, // flags + Operator::kNoProperties, // properties + stub_mode_); // stub call mode + } + + CallDescriptor* GetBigIntToI64CallDescriptor() { + if (!lowering_special_case_) { + lowering_special_case_ = base::make_unique<Int64LoweringSpecialCase>(); + } + + if (lowering_special_case_->bigint_to_i64_call_descriptor) { + return lowering_special_case_->bigint_to_i64_call_descriptor; + } + + BigIntToI64Descriptor interface_descriptor; + auto call_descriptor = Linkage::GetStubCallDescriptor( + mcgraph()->zone(), // zone + interface_descriptor, // descriptor + interface_descriptor.GetStackParameterCount(), // stack parameter count + CallDescriptor::kNoFlags, // flags + Operator::kNoProperties, // properties + stub_mode_); // stub call mode + + lowering_special_case_->bigint_to_i64_call_descriptor = call_descriptor; + lowering_special_case_->bigint_to_i32_pair_call_descriptor = + GetBigIntToI32PairCallDescriptor(); + return call_descriptor; + } + Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control) { MachineOperatorBuilder* machine = mcgraph()->machine(); CommonOperatorBuilder* common = mcgraph()->common(); @@ -5027,7 +5142,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ? mcgraph()->RelocatableIntPtrConstant( wasm::WasmCode::kWasmAllocateHeapNumber, RelocInfo::WASM_STUB_CALL) - : BuildLoadBuiltinFromInstance(Builtins::kAllocateHeapNumber); + : BuildLoadBuiltinFromIsolateRoot(Builtins::kAllocateHeapNumber); if (!allocate_heap_number_operator_.is_set()) { auto call_descriptor = Linkage::GetStubCallDescriptor( mcgraph()->zone(), AllocateHeapNumberDescriptor(), 0, @@ -5084,10 +5199,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { return undefined_value_node_.get(); } - Node* BuildLoadBuiltinFromInstance(int builtin_index) { + Node* BuildLoadBuiltinFromIsolateRoot(int builtin_index) { DCHECK(Builtins::IsBuiltinId(builtin_index)); - Node* isolate_root = - LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); + Node* isolate_root = BuildLoadIsolateRoot(); return LOAD_TAGGED_POINTER(isolate_root, IsolateData::builtin_slot_offset(builtin_index)); } @@ -5213,7 +5327,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { vsmi, vbox, merge); } - int AddArgumentNodes(Node** args, int pos, int param_count, + int AddArgumentNodes(Vector<Node*> args, int pos, int param_count, wasm::FunctionSig* sig) { // Convert wasm numbers to JS values. for (int i = 0; i < param_count; ++i) { @@ -5232,7 +5346,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) ? mcgraph()->RelocatableIntPtrConstant( wasm::WasmCode::kWasmToNumber, RelocInfo::WASM_STUB_CALL) - : BuildLoadBuiltinFromInstance(Builtins::kToNumber); + : BuildLoadBuiltinFromIsolateRoot(Builtins::kToNumber); Node* result = SetEffect( graph()->NewNode(mcgraph()->common()->Call(call_descriptor), stub_code, @@ -5317,47 +5431,59 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { } Node* BuildChangeInt64ToBigInt(Node* input) { - I64ToBigIntDescriptor interface_descriptor; - - auto call_descriptor = Linkage::GetStubCallDescriptor( - mcgraph()->zone(), // zone - interface_descriptor, // descriptor - interface_descriptor.GetStackParameterCount(), // stack parameter count - CallDescriptor::kNoFlags, // flags - Operator::kNoProperties, // properties - stub_mode_); // stub call mode - - Node* target = - (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) - ? mcgraph()->RelocatableIntPtrConstant( - wasm::WasmCode::kWasmI64ToBigInt, RelocInfo::WASM_STUB_CALL) - : BuildLoadBuiltinFromInstance(Builtins::kI64ToBigInt); + const Operator* call = + mcgraph()->common()->Call(GetI64ToBigIntCallDescriptor()); + + Node* target; + if (mcgraph()->machine()->Is64()) { + target = + (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) + ? mcgraph()->RelocatableIntPtrConstant( + wasm::WasmCode::kWasmI64ToBigInt, RelocInfo::WASM_STUB_CALL) + : BuildLoadBuiltinFromIsolateRoot(Builtins::kI64ToBigInt); + } else { + DCHECK(mcgraph()->machine()->Is32()); + // On 32-bit platforms we already set the target to the + // I32PairToBigInt builtin here, so that we don't have to replace the + // target in the int64-lowering. + target = + (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) + ? mcgraph()->RelocatableIntPtrConstant( + wasm::WasmCode::kWasmI32PairToBigInt, + RelocInfo::WASM_STUB_CALL) + : BuildLoadBuiltinFromIsolateRoot(Builtins::kI32PairToBigInt); + } return SetEffect( - SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor), - target, input, Effect(), Control()))); + SetControl(graph()->NewNode(call, target, input, Effect(), Control()))); } Node* BuildChangeBigIntToInt64(Node* input, Node* context) { - BigIntToI64Descriptor interface_descriptor; - - auto call_descriptor = Linkage::GetStubCallDescriptor( - mcgraph()->zone(), // zone - interface_descriptor, // descriptor - interface_descriptor.GetStackParameterCount(), // stack parameter count - CallDescriptor::kNoFlags, // flags - Operator::kNoProperties, // properties - stub_mode_); // stub call mode - - Node* target = - (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) - ? mcgraph()->RelocatableIntPtrConstant( - wasm::WasmCode::kWasmBigIntToI64, RelocInfo::WASM_STUB_CALL) - : BuildLoadBuiltinFromInstance(Builtins::kBigIntToI64); + const Operator* call = + mcgraph()->common()->Call(GetBigIntToI64CallDescriptor()); + + Node* target; + if (mcgraph()->machine()->Is64()) { + target = + (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) + ? mcgraph()->RelocatableIntPtrConstant( + wasm::WasmCode::kWasmBigIntToI64, RelocInfo::WASM_STUB_CALL) + : BuildLoadBuiltinFromIsolateRoot(Builtins::kBigIntToI64); + } else { + DCHECK(mcgraph()->machine()->Is32()); + // On 32-bit platforms we already set the target to the + // BigIntToI32Pair builtin here, so that we don't have to replace the + // target in the int64-lowering. + target = + (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) + ? mcgraph()->RelocatableIntPtrConstant( + wasm::WasmCode::kWasmBigIntToI32Pair, + RelocInfo::WASM_STUB_CALL) + : BuildLoadBuiltinFromIsolateRoot(Builtins::kBigIntToI32Pair); + } return SetEffect(SetControl( - graph()->NewNode(mcgraph()->common()->Call(call_descriptor), target, - input, context, Effect(), Control()))); + graph()->NewNode(call, target, input, context, Effect(), Control()))); } Node* FromJS(Node* node, Node* js_context, wasm::ValueType type) { @@ -5427,8 +5553,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { void BuildModifyThreadInWasmFlag(bool new_value) { if (!trap_handler::IsTrapHandlerEnabled()) return; - Node* isolate_root = - LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); + Node* isolate_root = BuildLoadIsolateRoot(); Node* thread_in_wasm_flag_address = LOAD_RAW(isolate_root, Isolate::thread_in_wasm_flag_address_offset(), @@ -5446,9 +5571,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Diamond flag_check(graph(), mcgraph()->common(), check, BranchHint::kTrue); flag_check.Chain(Control()); - Node* message_id = jsgraph()->SmiConstant(static_cast<int32_t>( - new_value ? AbortReason::kUnexpectedThreadInWasmSet - : AbortReason::kUnexpectedThreadInWasmUnset)); + Node* message_id = graph()->NewNode( + mcgraph()->common()->NumberConstant(static_cast<int32_t>( + new_value ? AbortReason::kUnexpectedThreadInWasmSet + : AbortReason::kUnexpectedThreadInWasmUnset))); Node* effect = Effect(); BuildCallToRuntimeWithContext(Runtime::kAbort, NoContextConstant(), @@ -5509,7 +5635,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // Create the js_closure and js_context parameters. Node* js_closure = - graph()->NewNode(jsgraph()->common()->Parameter( + graph()->NewNode(mcgraph()->common()->Parameter( Linkage::kJSCallClosureParamIndex, "%closure"), graph()->start()); Node* js_context = graph()->NewNode( @@ -5525,18 +5651,18 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { instance_node_.set( BuildLoadInstanceFromExportedFunctionData(function_data)); - if (!wasm::IsJSCompatibleSignature(sig_, enabled_features_.bigint)) { + if (!wasm::IsJSCompatibleSignature(sig_, enabled_features_)) { // Throw a TypeError. Use the js_context of the calling javascript // function (passed as a parameter), such that the generated code is // js_context independent. BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context, nullptr, 0, effect_, Control()); - Return(jsgraph()->SmiConstant(0)); + TerminateThrow(Effect(), Control()); return; } const int args_count = wasm_count + 1; // +1 for wasm_code. - Node** args = Buffer(args_count); + Vector<Node*> args = Buffer(args_count); Node** rets; // Convert JS parameters to wasm numbers. @@ -5554,8 +5680,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // Load function index from {WasmExportedFunctionData}. Node* function_index = BuildLoadFunctionIndexFromExportedFunctionData(function_data); - BuildImportCall(sig_, args, &rets, wasm::kNoCodePosition, function_index, - kCallContinues); + BuildImportCall(sig_, args.begin(), &rets, wasm::kNoCodePosition, + function_index, kCallContinues); } else { // Call to a wasm function defined in this module. // The call target is the jump table slot for that function. @@ -5567,16 +5693,35 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { mcgraph()->machine()->IntAdd(), jump_table_start, jump_table_offset); args[0] = jump_table_slot; - BuildWasmCall(sig_, args, &rets, wasm::kNoCodePosition, nullptr, + BuildWasmCall(sig_, args.begin(), &rets, wasm::kNoCodePosition, nullptr, kNoRetpoline); } // Clear the ThreadInWasm flag. BuildModifyThreadInWasmFlag(false); - Node* jsval = sig_->return_count() == 0 ? jsgraph()->UndefinedConstant() - : ToJS(rets[0], sig_->GetReturn()); + Node* jsval; + if (sig_->return_count() == 0) { + jsval = BuildLoadUndefinedValueFromInstance(); + } else if (sig_->return_count() == 1) { + jsval = ToJS(rets[0], sig_->GetReturn()); + } else { + int32_t return_count = static_cast<int32_t>(sig_->return_count()); + Node* size = + graph()->NewNode(mcgraph()->common()->NumberConstant(return_count)); + // TODO(thibaudm): Replace runtime calls with TurboFan code. + Node* fixed_array = + BuildCallToRuntime(Runtime::kWasmNewMultiReturnFixedArray, &size, 1); + for (int i = 0; i < return_count; ++i) { + Node* value = ToJS(rets[i], sig_->GetReturn(i)); + STORE_FIXED_ARRAY_SLOT_ANY(fixed_array, i, value); + } + jsval = BuildCallToRuntimeWithContext(Runtime::kWasmNewMultiReturnJSArray, + js_context, &fixed_array, 1, + effect_, Control()); + } Return(jsval); + if (ContainsInt64(sig_)) LowerInt64(kCalledFromJS); } bool BuildWasmImportCallWrapper(WasmImportCallKind kind) { @@ -5597,9 +5742,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, native_context, nullptr, 0, effect_, Control()); - // We don't need to return a value here, as the runtime call will not - // return anyway (the c entry stub will trigger stack unwinding). - ReturnVoid(); + TerminateThrow(Effect(), Control()); return false; } @@ -5622,7 +5765,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { sloppy_receiver = false; V8_FALLTHROUGH; // fallthru case WasmImportCallKind::kJSFunctionArityMatchSloppy: { - Node** args = Buffer(wasm_count + 9); + Vector<Node*> args = Buffer(wasm_count + 7); int pos = 0; Node* function_context = LOAD_RAW(callable_node, @@ -5650,8 +5793,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { args[pos++] = Effect(); args[pos++] = Control(); + DCHECK_EQ(pos, args.size()); call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos, - args); + args.begin()); break; } // ======================================================================= @@ -5661,14 +5805,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { sloppy_receiver = false; V8_FALLTHROUGH; // fallthru case WasmImportCallKind::kJSFunctionArityMismatchSloppy: { - Node** args = Buffer(wasm_count + 9); + Vector<Node*> args = Buffer(wasm_count + 9); int pos = 0; Node* function_context = LOAD_RAW(callable_node, wasm::ObjectAccess::ContextOffsetInTaggedJSFunction(), MachineType::TypeCompressedTaggedPointer()); - args[pos++] = - BuildLoadBuiltinFromInstance(Builtins::kArgumentsAdaptorTrampoline); + args[pos++] = BuildLoadBuiltinFromIsolateRoot( + Builtins::kArgumentsAdaptorTrampoline); args[pos++] = callable_node; // target callable args[pos++] = undefined_node; // new target args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count @@ -5712,26 +5856,27 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { args[pos++] = function_context; args[pos++] = Effect(); args[pos++] = Control(); + + DCHECK_EQ(pos, args.size()); call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos, - args); + args.begin()); break; } // ======================================================================= // === General case of unknown callable ================================== // ======================================================================= case WasmImportCallKind::kUseCallBuiltin: { - Node** args = Buffer(wasm_count + 9); + Vector<Node*> args = Buffer(wasm_count + 7); int pos = 0; - args[pos++] = mcgraph()->RelocatableIntPtrConstant( - wasm::WasmCode::kWasmCallJavaScript, RelocInfo::WASM_STUB_CALL); + args[pos++] = + BuildLoadBuiltinFromIsolateRoot(Builtins::kCall_ReceiverIsAny); args[pos++] = callable_node; args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count args[pos++] = undefined_node; // receiver auto call_descriptor = Linkage::GetStubCallDescriptor( graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1, - CallDescriptor::kNoFlags, Operator::kNoProperties, - StubCallMode::kCallWasmRuntimeStub); + CallDescriptor::kNoFlags, Operator::kNoProperties); // Convert wasm numbers to JS values. pos = AddArgumentNodes(args, pos, wasm_count, sig_); @@ -5745,8 +5890,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { args[pos++] = Effect(); args[pos++] = Control(); + DCHECK_EQ(pos, args.size()); call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos, - args); + args.begin()); break; } default: @@ -5766,6 +5912,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { BuildModifyThreadInWasmFlag(true); Return(val); + + if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm); return true; } @@ -5807,13 +5955,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* sfi_data = LOAD_RAW( shared, SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag, MachineType::TypeCompressedTagged()); - Node* host_data = LOAD_RAW( + Node* host_data_foreign = LOAD_RAW( sfi_data, WasmCapiFunctionData::kEmbedderDataOffset - kHeapObjectTag, - MachineType::Pointer()); + MachineType::TypeCompressedTagged()); BuildModifyThreadInWasmFlag(false); - Node* isolate_root = - LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); + Node* isolate_root = BuildLoadIsolateRoot(); Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer()); STORE_RAW(isolate_root, Isolate::c_entry_fp_offset(), fp_value, MachineType::PointerRepresentation(), kNoWriteBarrier); @@ -5824,11 +5971,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref)); - // Parameters: void* data, Address arguments. + // Parameters: Address host_data_foreign, Address arguments. MachineType host_sig_types[] = { MachineType::Pointer(), MachineType::Pointer(), MachineType::Pointer()}; MachineSignature host_sig(1, 2, host_sig_types); - Node* return_value = BuildCCall(&host_sig, function, host_data, values); + Node* return_value = + BuildCCall(&host_sig, function, host_data_foreign, values); BuildModifyThreadInWasmFlag(true); @@ -5854,13 +6002,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { SetControl( graph()->NewNode(mcgraph()->common()->IfTrue(), exception_branch)); DCHECK_LT(sig_->return_count(), wasm::kV8MaxWasmFunctionMultiReturns); - int return_count = static_cast<int>(sig_->return_count()); + size_t return_count = sig_->return_count(); if (return_count == 0) { Return(Int32Constant(0)); } else { - Node** returns = Buffer(return_count); + Vector<Node*> returns = Buffer(return_count); offset = 0; - for (int i = 0; i < return_count; ++i) { + for (size_t i = 0; i < return_count; ++i) { wasm::ValueType type = sig_->GetReturn(i); Node* val = SetEffect( graph()->NewNode(GetSafeLoadOperator(offset, type), values, @@ -5868,10 +6016,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { returns[i] = val; offset += wasm::ValueTypes::ElementSizeInBytes(type); } - Return(return_count, returns); + Return(returns); } - if (ContainsInt64(sig_)) LowerInt64(); + if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm); } void BuildWasmInterpreterEntry(int func_index) { @@ -5918,17 +6066,19 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // We are passing the raw arg_buffer here. To the GC and other parts, it // looks like a Smi (lowest bit not set). In the runtime function however, // don't call Smi::value on it, but just cast it to a byte pointer. - Node* parameters[] = {jsgraph()->SmiConstant(func_index), arg_buffer}; + Node* parameters[] = { + graph()->NewNode(mcgraph()->common()->NumberConstant(func_index)), + arg_buffer}; BuildCallToRuntime(Runtime::kWasmRunInterpreter, parameters, arraysize(parameters)); // Read back the return value. DCHECK_LT(sig_->return_count(), wasm::kV8MaxWasmFunctionMultiReturns); - unsigned return_count = static_cast<unsigned>(sig_->return_count()); + size_t return_count = sig_->return_count(); if (return_count == 0) { Return(Int32Constant(0)); } else { - Node** returns = Buffer(return_count); + Vector<Node*> returns = Buffer(return_count); offset = 0; for (size_t i = 0; i < return_count; ++i) { wasm::ValueType type = sig_->GetReturn(i); @@ -5938,10 +6088,85 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { returns[i] = val; offset += wasm::ValueTypes::ElementSizeInBytes(type); } - Return(return_count, returns); + Return(returns); } - if (ContainsInt64(sig_)) LowerInt64(); + if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm); + } + + void BuildJSToJSWrapper(Isolate* isolate) { + int wasm_count = static_cast<int>(sig_->parameter_count()); + + // Build the start and the parameter nodes. + int param_count = 1 /* closure */ + 1 /* receiver */ + wasm_count + + 1 /* new.target */ + 1 /* #arg */ + 1 /* context */; + SetEffect(SetControl(Start(param_count))); + Node* closure = Param(Linkage::kJSCallClosureParamIndex); + Node* context = Param(Linkage::GetJSCallContextParamIndex(wasm_count + 1)); + + // Since JS-to-JS wrappers are specific to one Isolate, it is OK to embed + // values (for undefined and root) directly into the instruction stream. + isolate_root_node_ = mcgraph()->IntPtrConstant(isolate->isolate_root()); + undefined_value_node_ = graph()->NewNode(mcgraph()->common()->HeapConstant( + isolate->factory()->undefined_value())); + + // Throw a TypeError if the signature is incompatible with JavaScript. + if (!wasm::IsJSCompatibleSignature(sig_, enabled_features_)) { + BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context, + nullptr, 0, effect_, Control()); + TerminateThrow(Effect(), Control()); + return; + } + + // Load the original callable from the closure. + Node* shared = LOAD_TAGGED_ANY( + closure, + wasm::ObjectAccess::ToTagged(JSFunction::kSharedFunctionInfoOffset)); + Node* func_data = LOAD_TAGGED_ANY( + shared, + wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset)); + Node* callable = LOAD_TAGGED_ANY( + func_data, + wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset)); + + // Call the underlying closure. + Vector<Node*> args = Buffer(wasm_count + 7); + int pos = 0; + args[pos++] = graph()->NewNode(mcgraph()->common()->HeapConstant( + BUILTIN_CODE(isolate, Call_ReceiverIsAny))); + args[pos++] = callable; + args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count + args[pos++] = BuildLoadUndefinedValueFromInstance(); // receiver + + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1, + CallDescriptor::kNoFlags, Operator::kNoProperties, + StubCallMode::kCallCodeObject); + + // Convert parameter JS values to wasm numbers and back to JS values. + for (int i = 0; i < wasm_count; ++i) { + Node* param = Param(i + 1); // Start from index 1 to skip receiver. + args[pos++] = + ToJS(FromJS(param, context, sig_->GetParam(i)), sig_->GetParam(i)); + } + + args[pos++] = context; + args[pos++] = Effect(); + args[pos++] = Control(); + + DCHECK_EQ(pos, args.size()); + Node* call = SetEffect(graph()->NewNode( + mcgraph()->common()->Call(call_descriptor), pos, args.begin())); + + // TODO(wasm): Extend this to support multi-return. + DCHECK_LE(sig_->return_count(), 1); + + // Convert return JS values to wasm numbers and back to JS values. + Node* jsval = + sig_->return_count() == 0 + ? BuildLoadUndefinedValueFromInstance() + : ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn()); + Return(jsval); } void BuildCWasmEntry() { @@ -5959,8 +6184,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { kNoWriteBarrier); int wasm_arg_count = static_cast<int>(sig_->parameter_count()); - int arg_count = wasm_arg_count + 4; // code, object_ref, control, effect - Node** args = Buffer(arg_count); + Vector<Node*> args = Buffer(wasm_arg_count + 4); int pos = 0; args[pos++] = code_entry; @@ -5977,13 +6201,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { args[pos++] = Effect(); args[pos++] = Control(); - DCHECK_EQ(arg_count, pos); // Call the wasm code. auto call_descriptor = GetWasmCallDescriptor(mcgraph()->zone(), sig_); + DCHECK_EQ(pos, args.size()); Node* call = SetEffect(graph()->NewNode( - mcgraph()->common()->Call(call_descriptor), arg_count, args)); + mcgraph()->common()->Call(call_descriptor), pos, args.begin())); Node* if_success = graph()->NewNode(mcgraph()->common()->IfSuccess(), call); Node* if_exception = @@ -6011,9 +6235,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { pos++; } - Return(jsgraph()->SmiConstant(0)); + Return(mcgraph()->IntPtrConstant(0)); if (mcgraph()->machine()->Is32() && ContainsInt64(sig_)) { + // No special lowering should be requested in the C entry. + DCHECK_NULL(lowering_special_case_); + MachineRepresentation sig_reps[] = { MachineType::PointerRepresentation(), // return value MachineType::PointerRepresentation(), // target @@ -6028,11 +6255,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { } } - JSGraph* jsgraph() { return jsgraph_; } - private: - Isolate* const isolate_; - JSGraph* jsgraph_; StubCallMode stub_mode_; SetOncePointer<Node> undefined_value_node_; SetOncePointer<const Operator> allocate_heap_number_operator_; @@ -6058,26 +6281,27 @@ void AppendSignature(char* buffer, size_t max_name_len, } // namespace std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob( - Isolate* isolate, wasm::FunctionSig* sig, bool is_import) { + Isolate* isolate, wasm::WasmEngine* wasm_engine, wasm::FunctionSig* sig, + bool is_import, const wasm::WasmFeatures& enabled_features) { //---------------------------------------------------------------------------- // Create the Graph. //---------------------------------------------------------------------------- std::unique_ptr<Zone> zone = - base::make_unique<Zone>(isolate->allocator(), ZONE_NAME); + base::make_unique<Zone>(wasm_engine->allocator(), ZONE_NAME); Graph* graph = new (zone.get()) Graph(zone.get()); CommonOperatorBuilder common(zone.get()); MachineOperatorBuilder machine( zone.get(), MachineType::PointerRepresentation(), InstructionSelector::SupportedMachineOperatorFlags(), InstructionSelector::AlignmentRequirements()); - JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine); + MachineGraph mcgraph(graph, &common, &machine); Node* control = nullptr; Node* effect = nullptr; - WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr, + WasmWrapperGraphBuilder builder(zone.get(), &mcgraph, sig, nullptr, StubCallMode::kCallCodeObject, - wasm::WasmFeaturesFromIsolate(isolate)); + enabled_features); builder.set_control_ptr(&control); builder.set_effect_ptr(&effect); builder.BuildJSToWasmWrapper(is_import); @@ -6095,13 +6319,13 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob( zone.get(), false, params + 1, CallDescriptor::kNoFlags); return Pipeline::NewWasmHeapStubCompilationJob( - isolate, incoming, std::move(zone), graph, Code::JS_TO_WASM_FUNCTION, - std::move(debug_name), WasmAssemblerOptions()); + isolate, wasm_engine, incoming, std::move(zone), graph, + Code::JS_TO_WASM_FUNCTION, std::move(debug_name), WasmAssemblerOptions()); } std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall( Handle<JSReceiver> callable, wasm::FunctionSig* expected_sig, - bool has_bigint_feature) { + const wasm::WasmFeatures& enabled_features) { if (WasmExportedFunction::IsWasmExportedFunction(*callable)) { auto imported_function = Handle<WasmExportedFunction>::cast(callable); auto func_index = imported_function->function_index(); @@ -6136,7 +6360,7 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall( return std::make_pair(WasmImportCallKind::kWasmToCapi, callable); } // Assuming we are calling to JS, check whether this would be a runtime error. - if (!wasm::IsJSCompatibleSignature(expected_sig, has_bigint_feature)) { + if (!wasm::IsJSCompatibleSignature(expected_sig, enabled_features)) { return std::make_pair(WasmImportCallKind::kRuntimeTypeError, callable); } // For JavaScript calls, determine whether the target has an arity match @@ -6176,10 +6400,7 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall( COMPARE_SIG_FOR_BUILTIN_F64(Exp); COMPARE_SIG_FOR_BUILTIN_F64(Log); COMPARE_SIG_FOR_BUILTIN_F64(Atan2); - //=========================================================== - // TODO(8505): Math.pow for wasm does not match JS. - // COMPARE_SIG_FOR_BUILTIN_F64(Pow); - //=========================================================== + COMPARE_SIG_FOR_BUILTIN_F64(Pow); COMPARE_SIG_FOR_BUILTIN_F32_F64(Min); COMPARE_SIG_FOR_BUILTIN_F32_F64(Max); COMPARE_SIG_FOR_BUILTIN_F32_F64(Abs); @@ -6347,7 +6568,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper( &zone, MachineType::PointerRepresentation(), InstructionSelector::SupportedMachineOperatorFlags(), InstructionSelector::AlignmentRequirements()); - JSGraph jsgraph(nullptr, &graph, &common, nullptr, nullptr, &machine); + MachineGraph mcgraph(&graph, &common, &machine); Node* control = nullptr; Node* effect = nullptr; @@ -6355,7 +6576,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper( SourcePositionTable* source_position_table = source_positions ? new (&zone) SourcePositionTable(&graph) : nullptr; - WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_position_table, + WasmWrapperGraphBuilder builder(&zone, &mcgraph, sig, source_position_table, StubCallMode::kCallWasmRuntimeStub, env->enabled_features); builder.set_control_ptr(&control); @@ -6372,7 +6593,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper( incoming = GetI32WasmCallDescriptor(&zone, incoming); } wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub( - wasm_engine, incoming, &jsgraph, Code::WASM_TO_JS_FUNCTION, + wasm_engine, incoming, &mcgraph, Code::WASM_TO_JS_FUNCTION, wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(), source_position_table); result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper; @@ -6395,10 +6616,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine, &zone, MachineType::PointerRepresentation(), InstructionSelector::SupportedMachineOperatorFlags(), InstructionSelector::AlignmentRequirements())); - JSGraph jsgraph(nullptr, mcgraph->graph(), mcgraph->common(), nullptr, - nullptr, mcgraph->machine()); - WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_positions, + WasmWrapperGraphBuilder builder(&zone, mcgraph, sig, source_positions, StubCallMode::kCallWasmRuntimeStub, native_module->enabled_features()); @@ -6448,12 +6667,12 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry( &zone, MachineType::PointerRepresentation(), InstructionSelector::SupportedMachineOperatorFlags(), InstructionSelector::AlignmentRequirements()); - JSGraph jsgraph(nullptr, &graph, &common, nullptr, nullptr, &machine); + MachineGraph mcgraph(&graph, &common, &machine); Node* control = nullptr; Node* effect = nullptr; - WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr, + WasmWrapperGraphBuilder builder(&zone, &mcgraph, sig, nullptr, StubCallMode::kCallWasmRuntimeStub, enabled_features); builder.set_control_ptr(&control); @@ -6471,7 +6690,7 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry( SNPrintF(func_name, "wasm-interpreter-entry#%d", func_index)); wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub( - wasm_engine, incoming, &jsgraph, Code::WASM_INTERPRETER_ENTRY, + wasm_engine, incoming, &mcgraph, Code::WASM_INTERPRETER_ENTRY, wasm::WasmCode::kInterpreterEntry, func_name.begin(), WasmStubAssemblerOptions()); result.result_tier = wasm::ExecutionTier::kInterpreter; @@ -6480,6 +6699,54 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry( return result; } +MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate, + wasm::FunctionSig* sig) { + std::unique_ptr<Zone> zone = + base::make_unique<Zone>(isolate->allocator(), ZONE_NAME); + Graph* graph = new (zone.get()) Graph(zone.get()); + CommonOperatorBuilder common(zone.get()); + MachineOperatorBuilder machine( + zone.get(), MachineType::PointerRepresentation(), + InstructionSelector::SupportedMachineOperatorFlags(), + InstructionSelector::AlignmentRequirements()); + MachineGraph mcgraph(graph, &common, &machine); + + Node* control = nullptr; + Node* effect = nullptr; + + WasmWrapperGraphBuilder builder(zone.get(), &mcgraph, sig, nullptr, + StubCallMode::kCallCodeObject, + wasm::WasmFeaturesFromIsolate(isolate)); + builder.set_control_ptr(&control); + builder.set_effect_ptr(&effect); + builder.BuildJSToJSWrapper(isolate); + + int wasm_count = static_cast<int>(sig->parameter_count()); + CallDescriptor* incoming = Linkage::GetJSCallDescriptor( + zone.get(), false, wasm_count + 1, CallDescriptor::kNoFlags); + + // Build a name in the form "js-to-js-wrapper:<params>:<returns>". + static constexpr size_t kMaxNameLen = 128; + auto debug_name = std::unique_ptr<char[]>(new char[kMaxNameLen]); + memcpy(debug_name.get(), "js-to-js-wrapper:", 18); + AppendSignature(debug_name.get(), kMaxNameLen, sig); + + // Run the compilation job synchronously. + std::unique_ptr<OptimizedCompilationJob> job( + Pipeline::NewWasmHeapStubCompilationJob( + isolate, isolate->wasm_engine(), incoming, std::move(zone), graph, + Code::JS_TO_JS_FUNCTION, std::move(debug_name), + AssemblerOptions::Default(isolate))); + + if (job->ExecuteJob() == CompilationJob::FAILED || + job->FinalizeJob(isolate) == CompilationJob::FAILED) { + return {}; + } + Handle<Code> code = job->compilation_info()->code(); + + return code; +} + MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { std::unique_ptr<Zone> zone = base::make_unique<Zone>(isolate->allocator(), ZONE_NAME); @@ -6489,12 +6756,12 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { zone.get(), MachineType::PointerRepresentation(), InstructionSelector::SupportedMachineOperatorFlags(), InstructionSelector::AlignmentRequirements()); - JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine); + MachineGraph mcgraph(graph, &common, &machine); Node* control = nullptr; Node* effect = nullptr; - WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr, + WasmWrapperGraphBuilder builder(zone.get(), &mcgraph, sig, nullptr, StubCallMode::kCallCodeObject, wasm::WasmFeaturesFromIsolate(isolate)); builder.set_control_ptr(&control); @@ -6510,9 +6777,9 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { MachineSignature incoming_sig(1, 4, sig_types); // Traps need the root register, for TailCallRuntimeWithCEntry to call // Runtime::kThrowWasmError. - bool initialize_root_flag = true; - CallDescriptor* incoming = Linkage::GetSimplifiedCDescriptor( - zone.get(), &incoming_sig, initialize_root_flag); + CallDescriptor::Flags flags = CallDescriptor::kInitializeRootRegister; + CallDescriptor* incoming = + Linkage::GetSimplifiedCDescriptor(zone.get(), &incoming_sig, flags); // Build a name in the form "c-wasm-entry:<params>:<returns>". static constexpr size_t kMaxNameLen = 128; @@ -6523,11 +6790,11 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { // Run the compilation job synchronously. std::unique_ptr<OptimizedCompilationJob> job( Pipeline::NewWasmHeapStubCompilationJob( - isolate, incoming, std::move(zone), graph, Code::C_WASM_ENTRY, - std::move(debug_name), AssemblerOptions::Default(isolate))); + isolate, isolate->wasm_engine(), incoming, std::move(zone), graph, + Code::C_WASM_ENTRY, std::move(debug_name), + AssemblerOptions::Default(isolate))); - if (job->PrepareJob(isolate) == CompilationJob::FAILED || - job->ExecuteJob() == CompilationJob::FAILED || + if (job->ExecuteJob() == CompilationJob::FAILED || job->FinalizeJob(isolate) == CompilationJob::FAILED) { return {}; } @@ -6536,6 +6803,8 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { return code; } +namespace { + bool BuildGraphForWasmFunction(AccountingAllocator* allocator, wasm::CompilationEnv* env, const wasm::FunctionBody& func_body, @@ -6558,12 +6827,13 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator, return false; } - builder.LowerInt64(); + builder.LowerInt64(WasmWrapperGraphBuilder::kCalledFromWasm); if (builder.has_simd() && (!CpuFeatures::SupportsWasmSimd128() || env->lower_simd)) { - SimdScalarLowering(mcgraph, - CreateMachineSignature(mcgraph->zone(), func_body.sig)) + SimdScalarLowering( + mcgraph, CreateMachineSignature(mcgraph->zone(), func_body.sig, + WasmGraphBuilder::kCalledFromWasm)) .LowerGraph(); } @@ -6574,7 +6844,6 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator, return true; } -namespace { Vector<const char> GetDebugName(Zone* zone, int index) { // TODO(herhut): Use name from module if available. constexpr int kBufferLength = 24; @@ -6587,6 +6856,7 @@ Vector<const char> GetDebugName(Zone* zone, int index) { memcpy(index_name, name_vector.begin(), name_len); return Vector<const char>(index_name, name_len); } + } // namespace wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation( @@ -6755,7 +7025,7 @@ CallDescriptor* GetWasmCallDescriptor( wasm::kFpReturnRegisters); int parameter_slots = params.NumStackSlots(); - if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2); + if (ShouldPadArguments(parameter_slots)) parameter_slots++; rets.SetStackOffset(parameter_slots); @@ -6803,7 +7073,7 @@ CallDescriptor* GetWasmCallDescriptor( namespace { CallDescriptor* ReplaceTypeInCallDescriptorWith( - Zone* zone, CallDescriptor* call_descriptor, size_t num_replacements, + Zone* zone, const CallDescriptor* call_descriptor, size_t num_replacements, MachineType input_type, MachineRepresentation output_type) { size_t parameter_count = call_descriptor->ParameterCount(); size_t return_count = call_descriptor->ReturnCount(); @@ -6819,14 +7089,23 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith( } if (parameter_count == call_descriptor->ParameterCount() && return_count == call_descriptor->ReturnCount()) { - return call_descriptor; + return const_cast<CallDescriptor*>(call_descriptor); } LocationSignature::Builder locations(zone, return_count, parameter_count); + // The last parameter may be the special callable parameter. In that case we + // have to preserve it as the last parameter, i.e. we allocate it in the new + // location signature again in the same register. + bool has_callable_param = + (call_descriptor->GetInputLocation(call_descriptor->InputCount() - 1) == + LinkageLocation::ForRegister(kJSFunctionRegister.code(), + MachineType::TaggedPointer())); LinkageLocationAllocator params(wasm::kGpParamRegisters, wasm::kFpParamRegisters); - for (size_t i = 0; i < call_descriptor->ParameterCount(); i++) { + for (size_t i = 0, e = call_descriptor->ParameterCount() - + (has_callable_param ? 1 : 0); + i < e; i++) { if (call_descriptor->GetParameterType(i) == input_type) { for (size_t j = 0; j < num_replacements; j++) { locations.AddParam(params.Next(output_type)); @@ -6836,6 +7115,10 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith( params.Next(call_descriptor->GetParameterType(i).representation())); } } + if (has_callable_param) { + locations.AddParam(LinkageLocation::ForRegister( + kJSFunctionRegister.code(), MachineType::TaggedPointer())); + } LinkageLocationAllocator rets(wasm::kGpReturnRegisters, wasm::kFpReturnRegisters); @@ -6867,8 +7150,8 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith( } } // namespace -CallDescriptor* GetI32WasmCallDescriptor(Zone* zone, - CallDescriptor* call_descriptor) { +CallDescriptor* GetI32WasmCallDescriptor( + Zone* zone, const CallDescriptor* call_descriptor) { return ReplaceTypeInCallDescriptorWith(zone, call_descriptor, 2, MachineType::Int64(), MachineRepresentation::kWord32); diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h index 315733c396..dd86ea1499 100644 --- a/deps/v8/src/compiler/wasm-compiler.h +++ b/deps/v8/src/compiler/wasm-compiler.h @@ -34,6 +34,7 @@ class Operator; class SourcePositionTable; class WasmDecorator; enum class TrapId : uint32_t; +struct Int64LoweringSpecialCase; } // namespace compiler namespace wasm { @@ -47,14 +48,6 @@ struct WasmFeatures; namespace compiler { -bool BuildGraphForWasmFunction(AccountingAllocator* allocator, - wasm::CompilationEnv* env, - const wasm::FunctionBody& func_body, - int func_index, wasm::WasmFeatures* detected, - MachineGraph* mcgraph, - NodeOriginTable* node_origins, - SourcePositionTable* source_positions); - wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation( wasm::WasmEngine*, wasm::CompilationEnv*, const wasm::FunctionBody&, int func_index, Counters*, wasm::WasmFeatures* detected); @@ -117,7 +110,7 @@ constexpr WasmImportCallKind kDefaultImportCallKind = // another target, which is why the ultimate target is returned as well. V8_EXPORT_PRIVATE std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(Handle<JSReceiver> callable, wasm::FunctionSig* sig, - bool has_bigint_feature); + const wasm::WasmFeatures& enabled_features); // Compiles an import call wrapper, which allows WASM to call imports. V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmImportCallWrapper( @@ -131,7 +124,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine*, // Returns an OptimizedCompilationJob object for a JS to Wasm wrapper. std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob( - Isolate* isolate, wasm::FunctionSig* sig, bool is_import); + Isolate* isolate, wasm::WasmEngine* wasm_engine, wasm::FunctionSig* sig, + bool is_import, const wasm::WasmFeatures& enabled_features); // Compiles a stub that redirects a call to a wasm function to the wasm // interpreter. It's ABI compatible with the compiled wasm function. @@ -139,6 +133,12 @@ V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmInterpreterEntry( wasm::WasmEngine*, const wasm::WasmFeatures& enabled_features, uint32_t func_index, wasm::FunctionSig*); +// Compiles a stub with JS linkage that serves as an adapter for function +// objects constructed via {WebAssembly.Function}. It performs a round-trip +// simulating a JS-to-Wasm-to-JS coercion of parameter and return values. +MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate, + wasm::FunctionSig* sig); + enum CWasmEntryParameters { kCodeEntry, kObjectRef, @@ -179,14 +179,14 @@ class WasmGraphBuilder { wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph, wasm::FunctionSig* sig, compiler::SourcePositionTable* spt = nullptr); - Node** Buffer(size_t count) { + Vector<Node*> Buffer(size_t count) { if (count > cur_bufsize_) { size_t new_size = count + cur_bufsize_ + 5; cur_buffer_ = reinterpret_cast<Node**>(zone_->New(new_size * sizeof(Node*))); cur_bufsize_ = new_size; } - return cur_buffer_; + return {cur_buffer_, count}; } //----------------------------------------------------------------------- @@ -223,8 +223,8 @@ class WasmGraphBuilder { Node* ExceptionTagEqual(Node* caught_tag, Node* expected_tag); Node* LoadExceptionTagFromTable(uint32_t exception_index); Node* GetExceptionTag(Node* except_obj); - Node** GetExceptionValues(Node* except_obj, - const wasm::WasmException* exception); + Vector<Node*> GetExceptionValues(Node* except_obj, + const wasm::WasmException* exception); bool IsPhiWithMerge(Node* phi, Node* merge); bool ThrowsException(Node* node, Node** if_success, Node** if_exception); void AppendToMerge(Node* merge, Node* from); @@ -267,13 +267,12 @@ class WasmGraphBuilder { Node* Switch(unsigned count, Node* key); Node* IfValue(int32_t value, Node* sw); Node* IfDefault(Node* sw); - Node* Return(unsigned count, Node** nodes); + Node* Return(Vector<Node*> nodes); template <typename... Nodes> Node* Return(Node* fst, Nodes*... more) { Node* arr[] = {fst, more...}; - return Return(arraysize(arr), arr); + return Return(ArrayVector(arr)); } - Node* ReturnVoid(); Node* Unreachable(wasm::WasmCodePosition position); Node* CallDirect(uint32_t index, Node** args, Node*** rets, @@ -364,7 +363,9 @@ class WasmGraphBuilder { wasm::FunctionSig* GetFunctionSignature() { return sig_; } - V8_EXPORT_PRIVATE void LowerInt64(); + enum CallOrigin { kCalledFromWasm, kCalledFromJS }; + + V8_EXPORT_PRIVATE void LowerInt64(CallOrigin origin); V8_EXPORT_PRIVATE void SimdScalarLoweringForTesting(); @@ -379,9 +380,6 @@ class WasmGraphBuilder { Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane, Node* const* inputs); - Node* SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift, - Node* const* inputs); - Node* Simd8x16ShuffleOp(const uint8_t shuffle[16], Node* const* inputs); Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, @@ -443,6 +441,7 @@ class WasmGraphBuilder { SetOncePointer<Node> globals_start_; SetOncePointer<Node> imported_mutable_globals_; SetOncePointer<Node> stack_check_code_node_; + SetOncePointer<Node> isolate_root_node_; SetOncePointer<const Operator> stack_check_call_operator_; Node** cur_buffer_; @@ -458,8 +457,12 @@ class WasmGraphBuilder { compiler::SourcePositionTable* const source_position_table_ = nullptr; + std::unique_ptr<Int64LoweringSpecialCase> lowering_special_case_; + Node* NoContextConstant(); + Node* BuildLoadIsolateRoot(); + Node* MemBuffer(uint32_t offset); // BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index. Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset, @@ -596,9 +599,13 @@ class WasmGraphBuilder { Node* BuildDecodeException32BitValue(Node* values_array, uint32_t* index); Node* BuildDecodeException64BitValue(Node* values_array, uint32_t* index); - Node** Realloc(Node* const* buffer, size_t old_count, size_t new_count) { - Node** buf = Buffer(new_count); - if (buf != buffer) memcpy(buf, buffer, old_count * sizeof(Node*)); + Vector<Node*> Realloc(Node* const* buffer, size_t old_count, + size_t new_count) { + DCHECK_GE(new_count, old_count); // Only support growing. + Vector<Node*> buf = Buffer(new_count); + if (buf.begin() != buffer) { + memcpy(buf.begin(), buffer, old_count * sizeof(Node*)); + } return buf; } @@ -624,7 +631,7 @@ V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor( WasmCallKind kind = kWasmFunction); V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor( - Zone* zone, CallDescriptor* call_descriptor); + Zone* zone, const CallDescriptor* call_descriptor); V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd( Zone* zone, CallDescriptor* call_descriptor); |