From 2dcc3665abf57c3607cebffdeeca062f5894885d Mon Sep 17 00:00:00 2001 From: Michaël Zasso Date: Thu, 1 Aug 2019 08:38:30 +0200 Subject: deps: update V8 to 7.6.303.28 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/28016 Reviewed-By: Colin Ihrig Reviewed-By: Refael Ackermann (רפאל פלחי) Reviewed-By: Rich Trott Reviewed-By: Michael Dawson Reviewed-By: Jiawen Geng --- deps/v8/src/objects/allocation-site-inl.h | 22 +- deps/v8/src/objects/allocation-site-scopes-inl.h | 59 + deps/v8/src/objects/allocation-site-scopes.h | 69 + deps/v8/src/objects/allocation-site.h | 45 +- deps/v8/src/objects/api-callbacks-inl.h | 10 +- deps/v8/src/objects/arguments-inl.h | 20 +- deps/v8/src/objects/arguments.h | 2 +- deps/v8/src/objects/bigint.cc | 64 +- deps/v8/src/objects/bigint.h | 8 +- deps/v8/src/objects/cell-inl.h | 2 +- deps/v8/src/objects/cell.h | 2 +- deps/v8/src/objects/code-inl.h | 207 +- deps/v8/src/objects/code.cc | 143 +- deps/v8/src/objects/code.h | 27 +- deps/v8/src/objects/compilation-cache-inl.h | 32 +- deps/v8/src/objects/compilation-cache.h | 2 +- deps/v8/src/objects/compressed-slots-inl.h | 28 +- deps/v8/src/objects/contexts-inl.h | 256 + deps/v8/src/objects/contexts.cc | 512 ++ deps/v8/src/objects/contexts.h | 720 ++ deps/v8/src/objects/data-handler-inl.h | 10 +- deps/v8/src/objects/debug-objects-inl.h | 12 +- deps/v8/src/objects/debug-objects.cc | 75 +- deps/v8/src/objects/debug-objects.h | 37 +- deps/v8/src/objects/descriptor-array-inl.h | 34 +- deps/v8/src/objects/descriptor-array.h | 27 +- deps/v8/src/objects/dictionary-inl.h | 50 +- deps/v8/src/objects/dictionary.h | 16 +- deps/v8/src/objects/elements-inl.h | 38 + deps/v8/src/objects/elements-kind.cc | 266 + deps/v8/src/objects/elements-kind.h | 317 + deps/v8/src/objects/elements.cc | 4798 ++++++++++++ deps/v8/src/objects/elements.h | 241 + deps/v8/src/objects/embedder-data-array.cc | 2 +- deps/v8/src/objects/embedder-data-array.h | 6 +- deps/v8/src/objects/embedder-data-slot-inl.h | 8 +- deps/v8/src/objects/embedder-data-slot.h | 4 +- deps/v8/src/objects/feedback-cell-inl.h | 13 +- deps/v8/src/objects/feedback-cell.h | 19 +- deps/v8/src/objects/feedback-vector-inl.h | 354 + deps/v8/src/objects/feedback-vector.cc | 1420 ++++ deps/v8/src/objects/feedback-vector.h | 772 ++ deps/v8/src/objects/field-index-inl.h | 73 + deps/v8/src/objects/field-index.h | 127 + deps/v8/src/objects/field-type.cc | 87 + deps/v8/src/objects/field-type.h | 56 + deps/v8/src/objects/fixed-array-inl.h | 523 +- deps/v8/src/objects/fixed-array.h | 153 +- deps/v8/src/objects/foreign-inl.h | 8 +- deps/v8/src/objects/foreign.h | 2 +- deps/v8/src/objects/frame-array-inl.h | 6 +- deps/v8/src/objects/frame-array.h | 2 +- deps/v8/src/objects/free-space-inl.h | 10 +- deps/v8/src/objects/free-space.h | 2 +- deps/v8/src/objects/function-kind.h | 194 + deps/v8/src/objects/hash-table-inl.h | 8 +- deps/v8/src/objects/hash-table.h | 6 +- deps/v8/src/objects/heap-number-inl.h | 18 +- deps/v8/src/objects/heap-object-inl.h | 11 +- deps/v8/src/objects/heap-object.h | 14 +- deps/v8/src/objects/instance-type-inl.h | 15 +- deps/v8/src/objects/instance-type.h | 40 +- deps/v8/src/objects/intl-objects.cc | 223 +- deps/v8/src/objects/intl-objects.h | 34 +- deps/v8/src/objects/intl-objects.tq | 64 + deps/v8/src/objects/js-array-buffer-inl.h | 71 +- deps/v8/src/objects/js-array-buffer.cc | 52 +- deps/v8/src/objects/js-array-buffer.h | 56 +- deps/v8/src/objects/js-array-inl.h | 10 +- deps/v8/src/objects/js-array.h | 2 +- deps/v8/src/objects/js-break-iterator-inl.h | 2 +- deps/v8/src/objects/js-break-iterator.cc | 10 +- deps/v8/src/objects/js-break-iterator.h | 2 +- deps/v8/src/objects/js-collator-inl.h | 2 +- deps/v8/src/objects/js-collator.cc | 6 +- deps/v8/src/objects/js-collator.h | 4 +- deps/v8/src/objects/js-collection-inl.h | 8 +- deps/v8/src/objects/js-collection-iterator.h | 4 +- deps/v8/src/objects/js-collection.h | 14 +- deps/v8/src/objects/js-date-time-format-inl.h | 8 +- deps/v8/src/objects/js-date-time-format.cc | 349 +- deps/v8/src/objects/js-date-time-format.h | 15 +- deps/v8/src/objects/js-generator-inl.h | 2 +- deps/v8/src/objects/js-list-format-inl.h | 4 +- deps/v8/src/objects/js-list-format.cc | 39 +- deps/v8/src/objects/js-list-format.h | 15 +- deps/v8/src/objects/js-locale-inl.h | 6 +- deps/v8/src/objects/js-locale.cc | 215 +- deps/v8/src/objects/js-locale.h | 14 +- deps/v8/src/objects/js-number-format-inl.h | 50 +- deps/v8/src/objects/js-number-format.cc | 1151 ++- deps/v8/src/objects/js-number-format.h | 101 +- deps/v8/src/objects/js-objects-inl.h | 288 +- deps/v8/src/objects/js-objects.cc | 926 ++- deps/v8/src/objects/js-objects.h | 81 +- deps/v8/src/objects/js-plural-rules-inl.h | 8 +- deps/v8/src/objects/js-plural-rules.cc | 31 +- deps/v8/src/objects/js-plural-rules.h | 17 +- deps/v8/src/objects/js-promise-inl.h | 4 +- deps/v8/src/objects/js-proxy-inl.h | 4 +- deps/v8/src/objects/js-proxy.h | 2 +- deps/v8/src/objects/js-regexp-inl.h | 32 +- .../v8/src/objects/js-regexp-string-iterator-inl.h | 2 +- deps/v8/src/objects/js-regexp.h | 8 +- deps/v8/src/objects/js-relative-time-format-inl.h | 4 +- deps/v8/src/objects/js-relative-time-format.cc | 56 +- deps/v8/src/objects/js-relative-time-format.h | 14 +- deps/v8/src/objects/js-segment-iterator-inl.h | 4 +- deps/v8/src/objects/js-segment-iterator.cc | 16 +- deps/v8/src/objects/js-segment-iterator.h | 16 +- deps/v8/src/objects/js-segmenter-inl.h | 4 +- deps/v8/src/objects/js-segmenter.cc | 4 +- deps/v8/src/objects/js-segmenter.h | 16 +- deps/v8/src/objects/js-weak-refs-inl.h | 100 +- deps/v8/src/objects/keys.cc | 1017 +++ deps/v8/src/objects/keys.h | 175 + deps/v8/src/objects/layout-descriptor-inl.h | 253 + deps/v8/src/objects/layout-descriptor.cc | 288 + deps/v8/src/objects/layout-descriptor.h | 175 + deps/v8/src/objects/literal-objects-inl.h | 4 +- deps/v8/src/objects/literal-objects.cc | 50 +- deps/v8/src/objects/lookup-cache-inl.h | 43 + deps/v8/src/objects/lookup-cache.cc | 15 + deps/v8/src/objects/lookup-cache.h | 60 + deps/v8/src/objects/lookup-inl.h | 194 + deps/v8/src/objects/lookup.cc | 1215 +++ deps/v8/src/objects/lookup.h | 281 + deps/v8/src/objects/managed.h | 8 +- deps/v8/src/objects/map-inl.h | 142 +- deps/v8/src/objects/map-updater.cc | 805 ++ deps/v8/src/objects/map-updater.h | 205 + deps/v8/src/objects/map.cc | 672 +- deps/v8/src/objects/map.h | 83 +- deps/v8/src/objects/maybe-object-inl.h | 123 +- deps/v8/src/objects/maybe-object.h | 126 +- deps/v8/src/objects/microtask-inl.h | 2 +- deps/v8/src/objects/microtask.h | 2 +- deps/v8/src/objects/module-inl.h | 16 +- deps/v8/src/objects/module.cc | 102 +- deps/v8/src/objects/module.h | 2 +- deps/v8/src/objects/name-inl.h | 24 +- deps/v8/src/objects/name.h | 6 +- deps/v8/src/objects/object-list-macros.h | 270 + deps/v8/src/objects/object-macros-undef.h | 32 +- deps/v8/src/objects/object-macros.h | 248 +- deps/v8/src/objects/objects-body-descriptors-inl.h | 1116 +++ deps/v8/src/objects/objects-body-descriptors.h | 186 + deps/v8/src/objects/objects-definitions.h | 400 + deps/v8/src/objects/objects-inl.h | 1039 +++ deps/v8/src/objects/objects.cc | 8200 ++++++++++++++++++++ deps/v8/src/objects/objects.h | 836 ++ deps/v8/src/objects/oddball-inl.h | 29 +- deps/v8/src/objects/oddball.h | 37 +- deps/v8/src/objects/ordered-hash-table-inl.h | 10 +- deps/v8/src/objects/ordered-hash-table.cc | 204 +- deps/v8/src/objects/ordered-hash-table.h | 10 +- deps/v8/src/objects/promise.h | 12 +- deps/v8/src/objects/property-array-inl.h | 13 +- deps/v8/src/objects/property-array.h | 6 +- deps/v8/src/objects/property-cell.h | 2 +- .../src/objects/property-descriptor-object-inl.h | 2 +- deps/v8/src/objects/property-descriptor-object.h | 2 +- deps/v8/src/objects/property-descriptor.cc | 370 + deps/v8/src/objects/property-descriptor.h | 134 + deps/v8/src/objects/property-details.h | 409 + deps/v8/src/objects/property.cc | 146 + deps/v8/src/objects/property.h | 75 + deps/v8/src/objects/prototype-info-inl.h | 10 +- deps/v8/src/objects/prototype-info.h | 2 +- deps/v8/src/objects/prototype-inl.h | 144 + deps/v8/src/objects/prototype.h | 89 + deps/v8/src/objects/regexp-match-info.h | 2 +- deps/v8/src/objects/scope-info.cc | 152 +- deps/v8/src/objects/scope-info.h | 15 +- deps/v8/src/objects/script-inl.h | 14 +- deps/v8/src/objects/script.h | 2 +- deps/v8/src/objects/shared-function-info-inl.h | 186 +- deps/v8/src/objects/shared-function-info.h | 64 +- deps/v8/src/objects/slots-atomic-inl.h | 1 + deps/v8/src/objects/slots-inl.h | 19 +- deps/v8/src/objects/slots.h | 4 +- deps/v8/src/objects/smi-inl.h | 4 +- deps/v8/src/objects/smi.h | 8 +- deps/v8/src/objects/stack-frame-info-inl.h | 8 +- deps/v8/src/objects/stack-frame-info.cc | 36 +- deps/v8/src/objects/stack-frame-info.h | 16 + deps/v8/src/objects/string-comparator.cc | 2 +- deps/v8/src/objects/string-comparator.h | 4 +- deps/v8/src/objects/string-inl.h | 265 +- deps/v8/src/objects/string-table-inl.h | 31 +- deps/v8/src/objects/string-table.h | 29 +- deps/v8/src/objects/string.cc | 349 +- deps/v8/src/objects/string.h | 126 +- deps/v8/src/objects/struct-inl.h | 25 +- deps/v8/src/objects/struct.h | 37 +- deps/v8/src/objects/tagged-impl-inl.h | 257 + deps/v8/src/objects/tagged-impl.cc | 39 + deps/v8/src/objects/tagged-impl.h | 181 + deps/v8/src/objects/tagged-value-inl.h | 39 + deps/v8/src/objects/tagged-value.h | 42 + deps/v8/src/objects/template-objects-inl.h | 2 +- deps/v8/src/objects/template-objects.cc | 16 +- deps/v8/src/objects/template-objects.h | 13 +- deps/v8/src/objects/templates-inl.h | 54 +- deps/v8/src/objects/templates.h | 18 +- deps/v8/src/objects/transitions-inl.h | 321 + deps/v8/src/objects/transitions.cc | 657 ++ deps/v8/src/objects/transitions.h | 350 + deps/v8/src/objects/type-hints.cc | 89 + deps/v8/src/objects/type-hints.h | 77 + deps/v8/src/objects/value-serializer.cc | 2213 ++++++ deps/v8/src/objects/value-serializer.h | 314 + deps/v8/src/objects/visitors.cc | 33 + deps/v8/src/objects/visitors.h | 157 + 214 files changed, 38146 insertions(+), 4520 deletions(-) create mode 100644 deps/v8/src/objects/allocation-site-scopes-inl.h create mode 100644 deps/v8/src/objects/allocation-site-scopes.h create mode 100644 deps/v8/src/objects/contexts-inl.h create mode 100644 deps/v8/src/objects/contexts.cc create mode 100644 deps/v8/src/objects/contexts.h create mode 100644 deps/v8/src/objects/elements-inl.h create mode 100644 deps/v8/src/objects/elements-kind.cc create mode 100644 deps/v8/src/objects/elements-kind.h create mode 100644 deps/v8/src/objects/elements.cc create mode 100644 deps/v8/src/objects/elements.h create mode 100644 deps/v8/src/objects/feedback-vector-inl.h create mode 100644 deps/v8/src/objects/feedback-vector.cc create mode 100644 deps/v8/src/objects/feedback-vector.h create mode 100644 deps/v8/src/objects/field-index-inl.h create mode 100644 deps/v8/src/objects/field-index.h create mode 100644 deps/v8/src/objects/field-type.cc create mode 100644 deps/v8/src/objects/field-type.h create mode 100644 deps/v8/src/objects/function-kind.h create mode 100644 deps/v8/src/objects/intl-objects.tq create mode 100644 deps/v8/src/objects/keys.cc create mode 100644 deps/v8/src/objects/keys.h create mode 100644 deps/v8/src/objects/layout-descriptor-inl.h create mode 100644 deps/v8/src/objects/layout-descriptor.cc create mode 100644 deps/v8/src/objects/layout-descriptor.h create mode 100644 deps/v8/src/objects/lookup-cache-inl.h create mode 100644 deps/v8/src/objects/lookup-cache.cc create mode 100644 deps/v8/src/objects/lookup-cache.h create mode 100644 deps/v8/src/objects/lookup-inl.h create mode 100644 deps/v8/src/objects/lookup.cc create mode 100644 deps/v8/src/objects/lookup.h create mode 100644 deps/v8/src/objects/map-updater.cc create mode 100644 deps/v8/src/objects/map-updater.h create mode 100644 deps/v8/src/objects/object-list-macros.h create mode 100644 deps/v8/src/objects/objects-body-descriptors-inl.h create mode 100644 deps/v8/src/objects/objects-body-descriptors.h create mode 100644 deps/v8/src/objects/objects-definitions.h create mode 100644 deps/v8/src/objects/objects-inl.h create mode 100644 deps/v8/src/objects/objects.cc create mode 100644 deps/v8/src/objects/objects.h create mode 100644 deps/v8/src/objects/property-descriptor.cc create mode 100644 deps/v8/src/objects/property-descriptor.h create mode 100644 deps/v8/src/objects/property-details.h create mode 100644 deps/v8/src/objects/property.cc create mode 100644 deps/v8/src/objects/property.h create mode 100644 deps/v8/src/objects/prototype-inl.h create mode 100644 deps/v8/src/objects/prototype.h create mode 100644 deps/v8/src/objects/tagged-impl-inl.h create mode 100644 deps/v8/src/objects/tagged-impl.cc create mode 100644 deps/v8/src/objects/tagged-impl.h create mode 100644 deps/v8/src/objects/tagged-value-inl.h create mode 100644 deps/v8/src/objects/tagged-value.h create mode 100644 deps/v8/src/objects/transitions-inl.h create mode 100644 deps/v8/src/objects/transitions.cc create mode 100644 deps/v8/src/objects/transitions.h create mode 100644 deps/v8/src/objects/type-hints.cc create mode 100644 deps/v8/src/objects/type-hints.h create mode 100644 deps/v8/src/objects/value-serializer.cc create mode 100644 deps/v8/src/objects/value-serializer.h create mode 100644 deps/v8/src/objects/visitors.cc create mode 100644 deps/v8/src/objects/visitors.h (limited to 'deps/v8/src/objects') diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h index ac0a16c944..aaf0105e51 100644 --- a/deps/v8/src/objects/allocation-site-inl.h +++ b/deps/v8/src/objects/allocation-site-inl.h @@ -46,7 +46,7 @@ void AllocationSite::set_boilerplate(JSObject object, WriteBarrierMode mode) { int AllocationSite::transition_info() const { DCHECK(!PointsToLiteral()); - return Smi::cast(transition_info_or_boilerplate())->value(); + return Smi::cast(transition_info_or_boilerplate()).value(); } void AllocationSite::set_transition_info(int value) { @@ -105,9 +105,9 @@ void AllocationSite::SetDoNotInlineCall() { bool AllocationSite::PointsToLiteral() const { Object raw_value = transition_info_or_boilerplate(); - DCHECK_EQ(!raw_value->IsSmi(), - raw_value->IsJSArray() || raw_value->IsJSObject()); - return !raw_value->IsSmi(); + DCHECK_EQ(!raw_value.IsSmi(), + raw_value.IsJSArray() || raw_value.IsJSObject()); + return !raw_value.IsSmi(); } // Heuristic: We only need to create allocation site info if the boilerplate @@ -181,8 +181,8 @@ inline void AllocationSite::IncrementMementoCreateCount() { } bool AllocationMemento::IsValid() const { - return allocation_site()->IsAllocationSite() && - !AllocationSite::cast(allocation_site())->IsZombie(); + return allocation_site().IsAllocationSite() && + !AllocationSite::cast(allocation_site()).IsZombie(); } AllocationSite AllocationMemento::GetAllocationSite() const { @@ -191,7 +191,7 @@ AllocationSite AllocationMemento::GetAllocationSite() const { } Address AllocationMemento::GetAllocationSiteUnchecked() const { - return allocation_site()->ptr(); + return allocation_site().ptr(); } template @@ -200,7 +200,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle site, Isolate* isolate = site->GetIsolate(); bool result = false; - if (site->PointsToLiteral() && site->boilerplate()->IsJSArray()) { + if (site->PointsToLiteral() && site->boilerplate().IsJSArray()) { Handle boilerplate(JSArray::cast(site->boilerplate()), isolate); ElementsKind kind = boilerplate->GetElementsKind(); // if kind is holey ensure that to_kind is as well. @@ -211,7 +211,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle site, // If the array is huge, it's not likely to be defined in a local // function, so we shouldn't make new instances of it very often. uint32_t length = 0; - CHECK(boilerplate->length()->ToArrayLength(&length)); + CHECK(boilerplate->length().ToArrayLength(&length)); if (length <= kMaximumArrayBytesToPretransition) { if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) { return true; @@ -224,7 +224,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle site, ElementsKindToString(to_kind)); } JSObject::TransitionElementsKind(boilerplate, to_kind); - site->dependent_code()->DeoptimizeDependentCodeGroup( + site->dependent_code().DeoptimizeDependentCodeGroup( isolate, DependentCode::kAllocationSiteTransitionChangedGroup); result = true; } @@ -244,7 +244,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle site, ElementsKindToString(to_kind)); } site->SetElementsKind(to_kind); - site->dependent_code()->DeoptimizeDependentCodeGroup( + site->dependent_code().DeoptimizeDependentCodeGroup( isolate, DependentCode::kAllocationSiteTransitionChangedGroup); result = true; } diff --git a/deps/v8/src/objects/allocation-site-scopes-inl.h b/deps/v8/src/objects/allocation-site-scopes-inl.h new file mode 100644 index 0000000000..350b243e46 --- /dev/null +++ b/deps/v8/src/objects/allocation-site-scopes-inl.h @@ -0,0 +1,59 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_ALLOCATION_SITE_SCOPES_INL_H_ +#define V8_OBJECTS_ALLOCATION_SITE_SCOPES_INL_H_ + +#include "src/objects/allocation-site-scopes.h" + +#include "src/objects/allocation-site-inl.h" + +namespace v8 { +namespace internal { + +void AllocationSiteContext::InitializeTraversal(Handle site) { + top_ = site; + // {current_} is updated in place to not create unnecessary Handles, hence + // we initially need a separate handle. + current_ = Handle::New(*top_, isolate()); +} + +Handle AllocationSiteUsageContext::EnterNewScope() { + if (top().is_null()) { + InitializeTraversal(top_site_); + } else { + // Advance current site + Object nested_site = current()->nested_site(); + // Something is wrong if we advance to the end of the list here. + update_current_site(AllocationSite::cast(nested_site)); + } + return Handle(*current(), isolate()); +} + +void AllocationSiteUsageContext::ExitScope(Handle scope_site, + Handle object) { + // This assert ensures that we are pointing at the right sub-object in a + // recursive walk of a nested literal. + DCHECK(object.is_null() || *object == scope_site->boilerplate()); +} + +bool AllocationSiteUsageContext::ShouldCreateMemento(Handle object) { + if (activated_ && AllocationSite::CanTrack(object->map().instance_type())) { + if (FLAG_allocation_site_pretenuring || + AllocationSite::ShouldTrack(object->GetElementsKind())) { + if (FLAG_trace_creation_allocation_sites) { + PrintF("*** Creating Memento for %s %p\n", + object->IsJSArray() ? "JSArray" : "JSObject", + reinterpret_cast(object->ptr())); + } + return true; + } + } + return false; +} + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_ALLOCATION_SITE_SCOPES_INL_H_ diff --git a/deps/v8/src/objects/allocation-site-scopes.h b/deps/v8/src/objects/allocation-site-scopes.h new file mode 100644 index 0000000000..8f5fb42986 --- /dev/null +++ b/deps/v8/src/objects/allocation-site-scopes.h @@ -0,0 +1,69 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_ALLOCATION_SITE_SCOPES_H_ +#define V8_OBJECTS_ALLOCATION_SITE_SCOPES_H_ + +#include "src/handles/handles.h" +#include "src/objects/allocation-site.h" +#include "src/objects/map.h" +#include "src/objects/objects.h" + +namespace v8 { +namespace internal { + +// AllocationSiteContext is the base class for walking and copying a nested +// boilerplate with AllocationSite and AllocationMemento support. +class AllocationSiteContext { + public: + explicit AllocationSiteContext(Isolate* isolate) { isolate_ = isolate; } + + Handle top() { return top_; } + Handle current() { return current_; } + + bool ShouldCreateMemento(Handle object) { return false; } + + Isolate* isolate() { return isolate_; } + + protected: + void update_current_site(AllocationSite site) { + *(current_.location()) = site.ptr(); + } + + inline void InitializeTraversal(Handle site); + + private: + Isolate* isolate_; + Handle top_; + Handle current_; +}; + +// AllocationSiteUsageContext aids in the creation of AllocationMementos placed +// behind some/all components of a copied object literal. +class AllocationSiteUsageContext : public AllocationSiteContext { + public: + AllocationSiteUsageContext(Isolate* isolate, Handle site, + bool activated) + : AllocationSiteContext(isolate), + top_site_(site), + activated_(activated) {} + + inline Handle EnterNewScope(); + + inline void ExitScope(Handle scope_site, + Handle object); + + inline bool ShouldCreateMemento(Handle object); + + static const bool kCopying = true; + + private: + Handle top_site_; + bool activated_; +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_ALLOCATION_SITE_SCOPES_H_ diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h index b221bd02dd..9289a83f70 100644 --- a/deps/v8/src/objects/allocation-site.h +++ b/deps/v8/src/objects/allocation-site.h @@ -5,7 +5,7 @@ #ifndef V8_OBJECTS_ALLOCATION_SITE_H_ #define V8_OBJECTS_ALLOCATION_SITE_H_ -#include "src/objects.h" +#include "src/objects/objects.h" #include "src/objects/struct.h" // Has to be the last include (doesn't have include guards): @@ -134,25 +134,25 @@ class AllocationSite : public Struct { static bool ShouldTrack(ElementsKind from, ElementsKind to); static inline bool CanTrack(InstanceType type); -// Layout description. -// AllocationSite has to start with TransitionInfoOrboilerPlateOffset -// and end with WeakNext field. -#define ALLOCATION_SITE_FIELDS(V) \ - V(kStartOffset, 0) \ - V(kTransitionInfoOrBoilerplateOffset, kTaggedSize) \ - V(kNestedSiteOffset, kTaggedSize) \ - V(kDependentCodeOffset, kTaggedSize) \ - V(kCommonPointerFieldEndOffset, 0) \ - V(kPretenureDataOffset, kInt32Size) \ - V(kPretenureCreateCountOffset, kInt32Size) \ - /* Size of AllocationSite without WeakNext field */ \ - V(kSizeWithoutWeakNext, 0) \ - V(kWeakNextOffset, kTaggedSize) \ - /* Size of AllocationSite with WeakNext field */ \ - V(kSizeWithWeakNext, 0) + // Layout description. + // AllocationSite has to start with TransitionInfoOrboilerPlateOffset + // and end with WeakNext field. + #define ALLOCATION_SITE_FIELDS(V) \ + V(kStartOffset, 0) \ + V(kTransitionInfoOrBoilerplateOffset, kTaggedSize) \ + V(kNestedSiteOffset, kTaggedSize) \ + V(kDependentCodeOffset, kTaggedSize) \ + V(kCommonPointerFieldEndOffset, 0) \ + V(kPretenureDataOffset, kInt32Size) \ + V(kPretenureCreateCountOffset, kInt32Size) \ + /* Size of AllocationSite without WeakNext field */ \ + V(kSizeWithoutWeakNext, 0) \ + V(kWeakNextOffset, kTaggedSize) \ + /* Size of AllocationSite with WeakNext field */ \ + V(kSizeWithWeakNext, 0) DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ALLOCATION_SITE_FIELDS) -#undef ALLOCATION_SITE_FIELDS + #undef ALLOCATION_SITE_FIELDS class BodyDescriptor; @@ -164,14 +164,9 @@ class AllocationSite : public Struct { class AllocationMemento : public Struct { public: -// Layout description. -#define ALLOCATION_MEMENTO_FIELDS(V) \ - V(kAllocationSiteOffset, kTaggedSize) \ - V(kSize, 0) - + // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, - ALLOCATION_MEMENTO_FIELDS) -#undef ALLOCATION_MEMENTO_FIELDS + TORQUE_GENERATED_ALLOCATION_MEMENTO_FIELDS) DECL_ACCESSORS(allocation_site, Object) diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h index 7cd08f7052..041247637a 100644 --- a/deps/v8/src/objects/api-callbacks-inl.h +++ b/deps/v8/src/objects/api-callbacks-inl.h @@ -46,7 +46,7 @@ bool AccessorInfo::has_getter() { bool result = getter() != Smi::kZero; DCHECK_EQ(result, getter() != Smi::kZero && - Foreign::cast(getter())->foreign_address() != kNullAddress); + Foreign::cast(getter()).foreign_address() != kNullAddress); return result; } @@ -54,7 +54,7 @@ bool AccessorInfo::has_setter() { bool result = setter() != Smi::kZero; DCHECK_EQ(result, setter() != Smi::kZero && - Foreign::cast(setter())->foreign_address() != kNullAddress); + Foreign::cast(setter()).foreign_address() != kNullAddress); return result; } @@ -88,13 +88,13 @@ BIT_FIELD_ACCESSORS(AccessorInfo, flags, initial_property_attributes, bool AccessorInfo::IsCompatibleReceiver(Object receiver) { if (!HasExpectedReceiverType()) return true; - if (!receiver->IsJSObject()) return false; + if (!receiver.IsJSObject()) return false; return FunctionTemplateInfo::cast(expected_receiver_type()) - ->IsTemplateFor(JSObject::cast(receiver)->map()); + .IsTemplateFor(JSObject::cast(receiver).map()); } bool AccessorInfo::HasExpectedReceiverType() { - return expected_receiver_type()->IsFunctionTemplateInfo(); + return expected_receiver_type().IsFunctionTemplateInfo(); } ACCESSORS(AccessCheckInfo, callback, Object, kCallbackOffset) diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h index 4132aec04d..c2ef59a896 100644 --- a/deps/v8/src/objects/arguments-inl.h +++ b/deps/v8/src/objects/arguments-inl.h @@ -7,10 +7,10 @@ #include "src/objects/arguments.h" -#include "src/contexts-inl.h" -#include "src/isolate-inl.h" -#include "src/objects-inl.h" +#include "src/execution/isolate-inl.h" +#include "src/objects/contexts-inl.h" #include "src/objects/fixed-array-inl.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -62,23 +62,23 @@ bool JSSloppyArgumentsObject::GetSloppyArgumentsLength(Isolate* isolate, int* out) { Context context = *isolate->native_context(); Map map = object->map(); - if (map != context->sloppy_arguments_map() && - map != context->strict_arguments_map() && - map != context->fast_aliased_arguments_map()) { + if (map != context.sloppy_arguments_map() && + map != context.strict_arguments_map() && + map != context.fast_aliased_arguments_map()) { return false; } DCHECK(object->HasFastElements() || object->HasFastArgumentsElements()); Object len_obj = object->InObjectPropertyAt(JSArgumentsObjectWithLength::kLengthIndex); - if (!len_obj->IsSmi()) return false; + if (!len_obj.IsSmi()) return false; *out = Max(0, Smi::ToInt(len_obj)); FixedArray parameters = FixedArray::cast(object->elements()); if (object->HasSloppyArgumentsElements()) { - FixedArray arguments = FixedArray::cast(parameters->get(1)); - return *out <= arguments->length(); + FixedArray arguments = FixedArray::cast(parameters.get(1)); + return *out <= arguments.length(); } - return *out <= parameters->length(); + return *out <= parameters.length(); } } // namespace internal diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h index 720820268c..a1d39f1f36 100644 --- a/deps/v8/src/objects/arguments.h +++ b/deps/v8/src/objects/arguments.h @@ -8,7 +8,7 @@ #include "src/objects/fixed-array.h" #include "src/objects/js-objects.h" #include "src/objects/struct.h" -#include "torque-generated/class-definitions-from-dsl.h" +#include "torque-generated/field-offsets-tq.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc index 7b67aa3ffb..92b78f8821 100644 --- a/deps/v8/src/objects/bigint.cc +++ b/deps/v8/src/objects/bigint.cc @@ -19,14 +19,14 @@ #include "src/objects/bigint.h" -#include "src/conversions.h" -#include "src/double.h" +#include "src/execution/isolate-inl.h" #include "src/heap/factory.h" #include "src/heap/heap-write-barrier-inl.h" -#include "src/isolate-inl.h" -#include "src/objects-inl.h" +#include "src/numbers/conversions.h" +#include "src/numbers/double.h" #include "src/objects/heap-number-inl.h" #include "src/objects/instance-type-inl.h" +#include "src/objects/objects-inl.h" #include "src/objects/smi.h" namespace v8 { @@ -202,11 +202,11 @@ class MutableBigInt : public FreshlyAllocatedBigInt { } inline void initialize_bitfield(bool sign, int length) { int32_t bitfield = LengthBits::encode(length) | SignBits::encode(sign); - WRITE_INT32_FIELD(*this, kBitfieldOffset, bitfield); + WriteField(kBitfieldOffset, bitfield); } inline void set_digit(int n, digit_t value) { SLOW_DCHECK(0 <= n && n < length()); - WRITE_UINTPTR_FIELD(*this, kDigitsOffset + n * kDigitSize, value); + WriteField(kDigitsOffset + n * kDigitSize, value); } void set_64_bits(uint64_t bits); @@ -498,7 +498,7 @@ MaybeHandle BigInt::Multiply(Isolate* isolate, Handle x, work_estimate = 0; StackLimitCheck interrupt_check(isolate); if (interrupt_check.InterruptRequested() && - isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) { + isolate->stack_guard()->HandleInterrupts().IsException(isolate)) { return MaybeHandle(); } } @@ -655,10 +655,10 @@ ComparisonResult BigInt::CompareToBigInt(Handle x, Handle y) { } bool BigInt::EqualToBigInt(BigInt x, BigInt y) { - if (x->sign() != y->sign()) return false; - if (x->length() != y->length()) return false; - for (int i = 0; i < x->length(); i++) { - if (x->digit(i) != y->digit(i)) return false; + if (x.sign() != y.sign()) return false; + if (x.length() != y.length()) return false; + for (int i = 0; i < x.length(); i++) { + if (x.digit(i) != y.digit(i)) return false; } return true; } @@ -979,7 +979,7 @@ MaybeHandle BigInt::FromNumber(Isolate* isolate, if (number->IsSmi()) { return MutableBigInt::NewFromInt(isolate, Smi::ToInt(*number)); } - double value = HeapNumber::cast(*number)->value(); + double value = HeapNumber::cast(*number).value(); if (!std::isfinite(value) || (DoubleToInteger(value) != value)) { THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntFromNumber, number), @@ -1311,8 +1311,8 @@ inline Handle MutableBigInt::AbsoluteBitwiseOp( if (result_storage.is_null()) { result = New(isolate, result_length).ToHandleChecked(); } else { - DCHECK(result_storage->length() >= result_length); - result_length = result_storage->length(); + DCHECK(result_storage.length() >= result_length); + result_length = result_storage.length(); } int i = 0; for (; i < num_pairs; i++) { @@ -1428,12 +1428,12 @@ void MutableBigInt::MultiplyAccumulate(Handle multiplicand, void MutableBigInt::InternalMultiplyAdd(BigIntBase source, digit_t factor, digit_t summand, int n, MutableBigInt result) { - DCHECK(source->length() >= n); - DCHECK(result->length() >= n); + DCHECK(source.length() >= n); + DCHECK(result.length() >= n); digit_t carry = summand; digit_t high = 0; for (int i = 0; i < n; i++) { - digit_t current = source->digit(i); + digit_t current = source.digit(i); digit_t new_carry = 0; // Compute this round's multiplication. digit_t new_high = 0; @@ -1442,15 +1442,15 @@ void MutableBigInt::InternalMultiplyAdd(BigIntBase source, digit_t factor, current = digit_add(current, high, &new_carry); current = digit_add(current, carry, &new_carry); // Store result and prepare for next round. - result->set_digit(i, current); + result.set_digit(i, current); carry = new_carry; high = new_high; } - if (result->length() > n) { - result->set_digit(n++, carry + high); + if (result.length() > n) { + result.set_digit(n++, carry + high); // Current callers don't pass in such large results, but let's be robust. - while (n < result->length()) { - result->set_digit(n++, 0); + while (n < result.length()) { + result.set_digit(n++, 0); } } else { CHECK_EQ(carry + high, 0); @@ -1601,7 +1601,7 @@ bool MutableBigInt::AbsoluteDivLarge(Isolate* isolate, work_estimate = 0; StackLimitCheck interrupt_check(isolate); if (interrupt_check.InterruptRequested() && - isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) { + isolate->stack_guard()->HandleInterrupts().IsException(isolate)) { return false; } } @@ -1949,14 +1949,14 @@ MaybeHandle BigInt::FromSerializedDigits( void* digits = reinterpret_cast(result->ptr() + kDigitsOffset - kHeapObjectTag); #if defined(V8_TARGET_LITTLE_ENDIAN) - memcpy(digits, digits_storage.start(), bytelength); + memcpy(digits, digits_storage.begin(), bytelength); void* padding_start = reinterpret_cast(reinterpret_cast
(digits) + bytelength); memset(padding_start, 0, length * kDigitSize - bytelength); #elif defined(V8_TARGET_BIG_ENDIAN) digit_t* digit = reinterpret_cast(digits); const digit_t* digit_storage = - reinterpret_cast(digits_storage.start()); + reinterpret_cast(digits_storage.begin()); for (int i = 0; i < bytelength / kDigitSize; i++) { *digit = ByteReverse(*digit_storage); digit_storage++; @@ -2146,7 +2146,7 @@ MaybeHandle MutableBigInt::ToStringGeneric(Isolate* isolate, if (interrupt_check.InterruptRequested()) { { AllowHeapAllocation might_throw; - if (isolate->stack_guard()->HandleInterrupts()->IsException( + if (isolate->stack_guard()->HandleInterrupts().IsException( isolate)) { return MaybeHandle(); } @@ -2156,7 +2156,7 @@ MaybeHandle MutableBigInt::ToStringGeneric(Isolate* isolate, chars = result->GetChars(no_gc); } if (interrupt_check.InterruptRequested() && - isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) { + isolate->stack_guard()->HandleInterrupts().IsException(isolate)) { return MaybeHandle(); } } @@ -2457,16 +2457,16 @@ void BigInt::ToWordsArray64(int* sign_bit, int* words64_count, uint64_t MutableBigInt::GetRawBits(BigIntBase x, bool* lossless) { if (lossless != nullptr) *lossless = true; - if (x->is_zero()) return 0; - int len = x->length(); + if (x.is_zero()) return 0; + int len = x.length(); STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32); if (lossless != nullptr && len > 64 / kDigitBits) *lossless = false; - uint64_t raw = static_cast(x->digit(0)); + uint64_t raw = static_cast(x.digit(0)); if (kDigitBits == 32 && len > 1) { - raw |= static_cast(x->digit(1)) << 32; + raw |= static_cast(x.digit(1)) << 32; } // Simulate two's complement. MSVC dislikes "-raw". - return x->sign() ? ((~raw) + 1u) : raw; + return x.sign() ? ((~raw) + 1u) : raw; } int64_t BigInt::AsInt64(bool* lossless) { diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h index e59c7d6982..3f5d35878b 100644 --- a/deps/v8/src/objects/bigint.h +++ b/deps/v8/src/objects/bigint.h @@ -5,10 +5,10 @@ #ifndef V8_OBJECTS_BIGINT_H_ #define V8_OBJECTS_BIGINT_H_ -#include "src/globals.h" -#include "src/objects.h" +#include "src/common/globals.h" #include "src/objects/heap-object.h" -#include "src/utils.h" +#include "src/objects/objects.h" +#include "src/utils/utils.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -87,7 +87,7 @@ class BigIntBase : public HeapObject { inline digit_t digit(int n) const { SLOW_DCHECK(0 <= n && n < length()); - return READ_UINTPTR_FIELD(*this, kDigitsOffset + n * kDigitSize); + return ReadField(kDigitsOffset + n * kDigitSize); } bool is_zero() const { return length() == 0; } diff --git a/deps/v8/src/objects/cell-inl.h b/deps/v8/src/objects/cell-inl.h index c48a82fd31..90266b7599 100644 --- a/deps/v8/src/objects/cell-inl.h +++ b/deps/v8/src/objects/cell-inl.h @@ -8,7 +8,7 @@ #include "src/objects/cell.h" #include "src/heap/heap-write-barrier-inl.h" -#include "src/objects-inl.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h index c15b31a61c..9c77f5d332 100644 --- a/deps/v8/src/objects/cell.h +++ b/deps/v8/src/objects/cell.h @@ -6,7 +6,7 @@ #define V8_OBJECTS_CELL_H_ #include "src/objects/heap-object.h" -#include "torque-generated/class-definitions-from-dsl.h" +#include "torque-generated/field-offsets-tq.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h index a0dc3b3ae1..0877746d11 100644 --- a/deps/v8/src/objects/code-inl.h +++ b/deps/v8/src/objects/code-inl.h @@ -7,16 +7,16 @@ #include "src/objects/code.h" -#include "src/code-desc.h" +#include "src/codegen/code-desc.h" +#include "src/common/v8memory.h" +#include "src/execution/isolate.h" #include "src/interpreter/bytecode-register.h" -#include "src/isolate.h" #include "src/objects/dictionary.h" #include "src/objects/instance-type-inl.h" #include "src/objects/map-inl.h" #include "src/objects/maybe-object-inl.h" #include "src/objects/oddball.h" #include "src/objects/smi-inl.h" -#include "src/v8memory.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -29,7 +29,7 @@ OBJECT_CONSTRUCTORS_IMPL(BytecodeArray, FixedArrayBase) OBJECT_CONSTRUCTORS_IMPL(AbstractCode, HeapObject) OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakFixedArray) OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject) -OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache, Tuple2) +OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache, Struct) NEVER_READ_ONLY_SPACE_IMPL(AbstractCode) @@ -42,94 +42,94 @@ CAST_ACCESSOR(DeoptimizationData) CAST_ACCESSOR(SourcePositionTableWithFrameCache) ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray, - kSourcePositionTableIndex) + kSourcePositionTableOffset) ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache, - SimpleNumberDictionary, kStackFrameCacheIndex) + SimpleNumberDictionary, kStackFrameCacheOffset) int AbstractCode::raw_instruction_size() { if (IsCode()) { - return GetCode()->raw_instruction_size(); + return GetCode().raw_instruction_size(); } else { - return GetBytecodeArray()->length(); + return GetBytecodeArray().length(); } } int AbstractCode::InstructionSize() { if (IsCode()) { - return GetCode()->InstructionSize(); + return GetCode().InstructionSize(); } else { - return GetBytecodeArray()->length(); + return GetBytecodeArray().length(); } } ByteArray AbstractCode::source_position_table() { if (IsCode()) { - return GetCode()->SourcePositionTable(); + return GetCode().SourcePositionTable(); } else { - return GetBytecodeArray()->SourcePositionTable(); + return GetBytecodeArray().SourcePositionTable(); } } Object AbstractCode::stack_frame_cache() { Object maybe_table; if (IsCode()) { - maybe_table = GetCode()->source_position_table(); + maybe_table = GetCode().source_position_table(); } else { - maybe_table = GetBytecodeArray()->source_position_table(); + maybe_table = GetBytecodeArray().source_position_table(); } - if (maybe_table->IsSourcePositionTableWithFrameCache()) { + if (maybe_table.IsSourcePositionTableWithFrameCache()) { return SourcePositionTableWithFrameCache::cast(maybe_table) - ->stack_frame_cache(); + .stack_frame_cache(); } return Smi::kZero; } int AbstractCode::SizeIncludingMetadata() { if (IsCode()) { - return GetCode()->SizeIncludingMetadata(); + return GetCode().SizeIncludingMetadata(); } else { - return GetBytecodeArray()->SizeIncludingMetadata(); + return GetBytecodeArray().SizeIncludingMetadata(); } } int AbstractCode::ExecutableSize() { if (IsCode()) { - return GetCode()->ExecutableSize(); + return GetCode().ExecutableSize(); } else { - return GetBytecodeArray()->BytecodeArraySize(); + return GetBytecodeArray().BytecodeArraySize(); } } Address AbstractCode::raw_instruction_start() { if (IsCode()) { - return GetCode()->raw_instruction_start(); + return GetCode().raw_instruction_start(); } else { - return GetBytecodeArray()->GetFirstBytecodeAddress(); + return GetBytecodeArray().GetFirstBytecodeAddress(); } } Address AbstractCode::InstructionStart() { if (IsCode()) { - return GetCode()->InstructionStart(); + return GetCode().InstructionStart(); } else { - return GetBytecodeArray()->GetFirstBytecodeAddress(); + return GetBytecodeArray().GetFirstBytecodeAddress(); } } Address AbstractCode::raw_instruction_end() { if (IsCode()) { - return GetCode()->raw_instruction_end(); + return GetCode().raw_instruction_end(); } else { - return GetBytecodeArray()->GetFirstBytecodeAddress() + - GetBytecodeArray()->length(); + return GetBytecodeArray().GetFirstBytecodeAddress() + + GetBytecodeArray().length(); } } Address AbstractCode::InstructionEnd() { if (IsCode()) { - return GetCode()->InstructionEnd(); + return GetCode().InstructionEnd(); } else { - return GetBytecodeArray()->GetFirstBytecodeAddress() + - GetBytecodeArray()->length(); + return GetBytecodeArray().GetFirstBytecodeAddress() + + GetBytecodeArray().length(); } } @@ -139,7 +139,7 @@ bool AbstractCode::contains(Address inner_pointer) { AbstractCode::Kind AbstractCode::kind() { if (IsCode()) { - return static_cast(GetCode()->kind()); + return static_cast(GetCode().kind()); } else { return INTERPRETED_FUNCTION; } @@ -236,26 +236,26 @@ void Code::clear_padding() { ByteArray Code::SourcePositionTableIfCollected() const { ReadOnlyRoots roots = GetReadOnlyRoots(); Object maybe_table = source_position_table(); - if (maybe_table->IsUndefined(roots) || maybe_table->IsException(roots)) + if (maybe_table.IsUndefined(roots) || maybe_table.IsException(roots)) return roots.empty_byte_array(); return SourcePositionTable(); } ByteArray Code::SourcePositionTable() const { Object maybe_table = source_position_table(); - DCHECK(!maybe_table->IsUndefined() && !maybe_table->IsException()); - if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table); - DCHECK(maybe_table->IsSourcePositionTableWithFrameCache()); + DCHECK(!maybe_table.IsUndefined() && !maybe_table.IsException()); + if (maybe_table.IsByteArray()) return ByteArray::cast(maybe_table); + DCHECK(maybe_table.IsSourcePositionTableWithFrameCache()); return SourcePositionTableWithFrameCache::cast(maybe_table) - ->source_position_table(); + .source_position_table(); } Object Code::next_code_link() const { - return code_data_container()->next_code_link(); + return code_data_container().next_code_link(); } void Code::set_next_code_link(Object value) { - code_data_container()->set_next_code_link(value); + code_data_container().set_next_code_link(value); } int Code::InstructionSize() const { @@ -297,13 +297,12 @@ int Code::GetUnwindingInfoSizeOffset() const { int Code::unwinding_info_size() const { DCHECK(has_unwinding_info()); - return static_cast( - READ_UINT64_FIELD(*this, GetUnwindingInfoSizeOffset())); + return static_cast(ReadField(GetUnwindingInfoSizeOffset())); } void Code::set_unwinding_info_size(int value) { DCHECK(has_unwinding_info()); - WRITE_UINT64_FIELD(*this, GetUnwindingInfoSizeOffset(), value); + WriteField(GetUnwindingInfoSizeOffset(), value); } Address Code::unwinding_info_start() const { @@ -326,8 +325,8 @@ int Code::body_size() const { int Code::SizeIncludingMetadata() const { int size = CodeSize(); - size += relocation_info()->Size(); - size += deoptimization_data()->Size(); + size += relocation_info().Size(); + size += deoptimization_data().Size(); return size; } @@ -336,15 +335,15 @@ ByteArray Code::unchecked_relocation_info() const { } byte* Code::relocation_start() const { - return unchecked_relocation_info()->GetDataStartAddress(); + return unchecked_relocation_info().GetDataStartAddress(); } byte* Code::relocation_end() const { - return unchecked_relocation_info()->GetDataEndAddress(); + return unchecked_relocation_info().GetDataEndAddress(); } int Code::relocation_size() const { - return unchecked_relocation_info()->length(); + return unchecked_relocation_info().length(); } Address Code::entry() const { return raw_instruction_start(); } @@ -369,8 +368,8 @@ int Code::ExecutableSize() const { // static void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) { - DCHECK_EQ(dest->length(), desc.reloc_size); - CopyBytes(dest->GetDataStartAddress(), + DCHECK_EQ(dest.length(), desc.reloc_size); + CopyBytes(dest.GetDataStartAddress(), desc.buffer + desc.buffer_size - desc.reloc_size, static_cast(desc.reloc_size)); } @@ -378,7 +377,7 @@ void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) { int Code::CodeSize() const { return SizeFor(body_size()); } Code::Kind Code::kind() const { - return KindField::decode(READ_UINT32_FIELD(*this, kFlagsOffset)); + return KindField::decode(ReadField(kFlagsOffset)); } void Code::initialize_flags(Kind kind, bool has_unwinding_info, @@ -391,7 +390,7 @@ void Code::initialize_flags(Kind kind, bool has_unwinding_info, IsTurbofannedField::encode(is_turbofanned) | StackSlotsField::encode(stack_slots) | IsOffHeapTrampoline::encode(is_off_heap_trampoline); - WRITE_UINT32_FIELD(*this, kFlagsOffset, flags); + WriteField(kFlagsOffset, flags); DCHECK_IMPLIES(stack_slots != 0, has_safepoint_info()); } @@ -417,54 +416,54 @@ inline bool Code::has_tagged_params() const { } inline bool Code::has_unwinding_info() const { - return HasUnwindingInfoField::decode(READ_UINT32_FIELD(*this, kFlagsOffset)); + return HasUnwindingInfoField::decode(ReadField(kFlagsOffset)); } inline bool Code::is_turbofanned() const { - return IsTurbofannedField::decode(READ_UINT32_FIELD(*this, kFlagsOffset)); + return IsTurbofannedField::decode(ReadField(kFlagsOffset)); } inline bool Code::can_have_weak_objects() const { DCHECK(kind() == OPTIMIZED_FUNCTION); - int32_t flags = code_data_container()->kind_specific_flags(); + int32_t flags = code_data_container().kind_specific_flags(); return CanHaveWeakObjectsField::decode(flags); } inline void Code::set_can_have_weak_objects(bool value) { DCHECK(kind() == OPTIMIZED_FUNCTION); - int32_t previous = code_data_container()->kind_specific_flags(); + int32_t previous = code_data_container().kind_specific_flags(); int32_t updated = CanHaveWeakObjectsField::update(previous, value); - code_data_container()->set_kind_specific_flags(updated); + code_data_container().set_kind_specific_flags(updated); } inline bool Code::is_promise_rejection() const { DCHECK(kind() == BUILTIN); - int32_t flags = code_data_container()->kind_specific_flags(); + int32_t flags = code_data_container().kind_specific_flags(); return IsPromiseRejectionField::decode(flags); } inline void Code::set_is_promise_rejection(bool value) { DCHECK(kind() == BUILTIN); - int32_t previous = code_data_container()->kind_specific_flags(); + int32_t previous = code_data_container().kind_specific_flags(); int32_t updated = IsPromiseRejectionField::update(previous, value); - code_data_container()->set_kind_specific_flags(updated); + code_data_container().set_kind_specific_flags(updated); } inline bool Code::is_exception_caught() const { DCHECK(kind() == BUILTIN); - int32_t flags = code_data_container()->kind_specific_flags(); + int32_t flags = code_data_container().kind_specific_flags(); return IsExceptionCaughtField::decode(flags); } inline void Code::set_is_exception_caught(bool value) { DCHECK(kind() == BUILTIN); - int32_t previous = code_data_container()->kind_specific_flags(); + int32_t previous = code_data_container().kind_specific_flags(); int32_t updated = IsExceptionCaughtField::update(previous, value); - code_data_container()->set_kind_specific_flags(updated); + code_data_container().set_kind_specific_flags(updated); } inline bool Code::is_off_heap_trampoline() const { - return IsOffHeapTrampoline::decode(READ_UINT32_FIELD(*this, kFlagsOffset)); + return IsOffHeapTrampoline::decode(ReadField(kFlagsOffset)); } inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() { @@ -474,14 +473,14 @@ inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() { } int Code::builtin_index() const { - int index = READ_INT_FIELD(*this, kBuiltinIndexOffset); + int index = ReadField(kBuiltinIndexOffset); DCHECK(index == -1 || Builtins::IsBuiltinId(index)); return index; } void Code::set_builtin_index(int index) { DCHECK(index == -1 || Builtins::IsBuiltinId(index)); - WRITE_INT_FIELD(*this, kBuiltinIndexOffset, index); + WriteField(kBuiltinIndexOffset, index); } bool Code::is_builtin() const { return builtin_index() != -1; } @@ -492,49 +491,49 @@ bool Code::has_safepoint_info() const { int Code::stack_slots() const { DCHECK(has_safepoint_info()); - return StackSlotsField::decode(READ_UINT32_FIELD(*this, kFlagsOffset)); + return StackSlotsField::decode(ReadField(kFlagsOffset)); } bool Code::marked_for_deoptimization() const { DCHECK(kind() == OPTIMIZED_FUNCTION); - int32_t flags = code_data_container()->kind_specific_flags(); + int32_t flags = code_data_container().kind_specific_flags(); return MarkedForDeoptimizationField::decode(flags); } void Code::set_marked_for_deoptimization(bool flag) { DCHECK(kind() == OPTIMIZED_FUNCTION); DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate())); - int32_t previous = code_data_container()->kind_specific_flags(); + int32_t previous = code_data_container().kind_specific_flags(); int32_t updated = MarkedForDeoptimizationField::update(previous, flag); - code_data_container()->set_kind_specific_flags(updated); + code_data_container().set_kind_specific_flags(updated); } bool Code::embedded_objects_cleared() const { DCHECK(kind() == OPTIMIZED_FUNCTION); - int32_t flags = code_data_container()->kind_specific_flags(); + int32_t flags = code_data_container().kind_specific_flags(); return EmbeddedObjectsClearedField::decode(flags); } void Code::set_embedded_objects_cleared(bool flag) { DCHECK(kind() == OPTIMIZED_FUNCTION); DCHECK_IMPLIES(flag, marked_for_deoptimization()); - int32_t previous = code_data_container()->kind_specific_flags(); + int32_t previous = code_data_container().kind_specific_flags(); int32_t updated = EmbeddedObjectsClearedField::update(previous, flag); - code_data_container()->set_kind_specific_flags(updated); + code_data_container().set_kind_specific_flags(updated); } bool Code::deopt_already_counted() const { DCHECK(kind() == OPTIMIZED_FUNCTION); - int32_t flags = code_data_container()->kind_specific_flags(); + int32_t flags = code_data_container().kind_specific_flags(); return DeoptAlreadyCountedField::decode(flags); } void Code::set_deopt_already_counted(bool flag) { DCHECK(kind() == OPTIMIZED_FUNCTION); DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate())); - int32_t previous = code_data_container()->kind_specific_flags(); + int32_t previous = code_data_container().kind_specific_flags(); int32_t updated = DeoptAlreadyCountedField::update(previous, flag); - code_data_container()->set_kind_specific_flags(updated); + code_data_container().set_kind_specific_flags(updated); } bool Code::is_optimized_code() const { return kind() == OPTIMIZED_FUNCTION; } @@ -542,13 +541,13 @@ bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; } int Code::constant_pool_offset() const { if (!FLAG_enable_embedded_constant_pool) return code_comments_offset(); - return READ_INT_FIELD(*this, kConstantPoolOffsetOffset); + return ReadField(kConstantPoolOffsetOffset); } void Code::set_constant_pool_offset(int value) { if (!FLAG_enable_embedded_constant_pool) return; DCHECK_LE(value, InstructionSize()); - WRITE_INT_FIELD(*this, kConstantPoolOffsetOffset, value); + WriteField(kConstantPoolOffsetOffset, value); } Address Code::constant_pool() const { @@ -592,10 +591,10 @@ bool Code::IsWeakObject(HeapObject object) { } bool Code::IsWeakObjectInOptimizedCode(HeapObject object) { - Map map = object->synchronized_map(); - InstanceType instance_type = map->instance_type(); + Map map = object.synchronized_map(); + InstanceType instance_type = map.instance_type(); if (InstanceTypeChecker::IsMap(instance_type)) { - return Map::cast(object)->CanTransition(); + return Map::cast(object).CanTransition(); } return InstanceTypeChecker::IsPropertyCell(instance_type) || InstanceTypeChecker::IsJSReceiver(instance_type) || @@ -615,22 +614,22 @@ void CodeDataContainer::clear_padding() { byte BytecodeArray::get(int index) const { DCHECK(index >= 0 && index < this->length()); - return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize); + return ReadField(kHeaderSize + index * kCharSize); } void BytecodeArray::set(int index, byte value) { DCHECK(index >= 0 && index < this->length()); - WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize, value); + WriteField(kHeaderSize + index * kCharSize, value); } void BytecodeArray::set_frame_size(int frame_size) { DCHECK_GE(frame_size, 0); DCHECK(IsAligned(frame_size, kSystemPointerSize)); - WRITE_INT_FIELD(*this, kFrameSizeOffset, frame_size); + WriteField(kFrameSizeOffset, frame_size); } int BytecodeArray::frame_size() const { - return READ_INT_FIELD(*this, kFrameSizeOffset); + return ReadField(kFrameSizeOffset); } int BytecodeArray::register_count() const { @@ -641,14 +640,14 @@ void BytecodeArray::set_parameter_count(int number_of_parameters) { DCHECK_GE(number_of_parameters, 0); // Parameter count is stored as the size on stack of the parameters to allow // it to be used directly by generated code. - WRITE_INT_FIELD(*this, kParameterSizeOffset, + WriteField(kParameterSizeOffset, (number_of_parameters << kSystemPointerSizeLog2)); } interpreter::Register BytecodeArray::incoming_new_target_or_generator_register() const { int register_operand = - READ_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset); + ReadField(kIncomingNewTargetOrGeneratorRegisterOffset); if (register_operand == 0) { return interpreter::Register::invalid_value(); } else { @@ -659,24 +658,24 @@ interpreter::Register BytecodeArray::incoming_new_target_or_generator_register() void BytecodeArray::set_incoming_new_target_or_generator_register( interpreter::Register incoming_new_target_or_generator_register) { if (!incoming_new_target_or_generator_register.is_valid()) { - WRITE_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset, 0); + WriteField(kIncomingNewTargetOrGeneratorRegisterOffset, 0); } else { DCHECK(incoming_new_target_or_generator_register.index() < register_count()); DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand()); - WRITE_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset, + WriteField(kIncomingNewTargetOrGeneratorRegisterOffset, incoming_new_target_or_generator_register.ToOperand()); } } int BytecodeArray::osr_loop_nesting_level() const { - return READ_INT8_FIELD(*this, kOSRNestingLevelOffset); + return ReadField(kOSRNestingLevelOffset); } void BytecodeArray::set_osr_loop_nesting_level(int depth) { DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker); STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8); - WRITE_INT8_FIELD(*this, kOSRNestingLevelOffset, depth); + WriteField(kOSRNestingLevelOffset, depth); } BytecodeArray::Age BytecodeArray::bytecode_age() const { @@ -695,7 +694,7 @@ void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) { int BytecodeArray::parameter_count() const { // Parameter count is stored as the size on stack of the parameters to allow // it to be used directly by generated code. - return READ_INT_FIELD(*this, kParameterSizeOffset) >> kSystemPointerSizeLog2; + return ReadField(kParameterSizeOffset) >> kSystemPointerSizeLog2; } ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset) @@ -715,11 +714,11 @@ Address BytecodeArray::GetFirstBytecodeAddress() { bool BytecodeArray::HasSourcePositionTable() const { Object maybe_table = source_position_table(); - return !(maybe_table->IsUndefined() || DidSourcePositionGenerationFail()); + return !(maybe_table.IsUndefined() || DidSourcePositionGenerationFail()); } bool BytecodeArray::DidSourcePositionGenerationFail() const { - return source_position_table()->IsException(); + return source_position_table().IsException(); } void BytecodeArray::SetSourcePositionsFailedToCollect() { @@ -728,14 +727,14 @@ void BytecodeArray::SetSourcePositionsFailedToCollect() { ByteArray BytecodeArray::SourcePositionTable() const { Object maybe_table = source_position_table(); - if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table); + if (maybe_table.IsByteArray()) return ByteArray::cast(maybe_table); ReadOnlyRoots roots = GetReadOnlyRoots(); - if (maybe_table->IsException(roots)) return roots.empty_byte_array(); + if (maybe_table.IsException(roots)) return roots.empty_byte_array(); - DCHECK(!maybe_table->IsUndefined(roots)); - DCHECK(maybe_table->IsSourcePositionTableWithFrameCache()); + DCHECK(!maybe_table.IsUndefined(roots)); + DCHECK(maybe_table.IsSourcePositionTableWithFrameCache()); return SourcePositionTableWithFrameCache::cast(maybe_table) - ->source_position_table(); + .source_position_table(); } ByteArray BytecodeArray::SourcePositionTableIfCollected() const { @@ -746,20 +745,20 @@ ByteArray BytecodeArray::SourcePositionTableIfCollected() const { void BytecodeArray::ClearFrameCacheFromSourcePositionTable() { Object maybe_table = source_position_table(); - if (maybe_table->IsUndefined() || maybe_table->IsByteArray()) return; - DCHECK(maybe_table->IsSourcePositionTableWithFrameCache()); + if (maybe_table.IsUndefined() || maybe_table.IsByteArray()) return; + DCHECK(maybe_table.IsSourcePositionTableWithFrameCache()); set_source_position_table(SourcePositionTableWithFrameCache::cast(maybe_table) - ->source_position_table()); + .source_position_table()); } int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); } int BytecodeArray::SizeIncludingMetadata() { int size = BytecodeArraySize(); - size += constant_pool()->Size(); - size += handler_table()->Size(); + size += constant_pool().Size(); + size += handler_table().Size(); if (HasSourcePositionTable()) { - size += SourcePositionTable()->Size(); + size += SourcePositionTable().Size(); } return size; } @@ -777,7 +776,7 @@ DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi) DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi) BailoutId DeoptimizationData::BytecodeOffset(int i) { - return BailoutId(BytecodeOffsetRaw(i)->value()); + return BailoutId(BytecodeOffsetRaw(i).value()); } void DeoptimizationData::SetBytecodeOffset(int i, BailoutId value) { diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc index ab929db8a7..89180693a5 100644 --- a/deps/v8/src/objects/code.cc +++ b/deps/v8/src/objects/code.cc @@ -6,24 +6,24 @@ #include "src/objects/code.h" -#include "src/assembler-inl.h" -#include "src/cpu-features.h" -#include "src/deoptimizer.h" +#include "src/codegen/assembler-inl.h" +#include "src/codegen/cpu-features.h" +#include "src/codegen/reloc-info.h" +#include "src/codegen/safepoint-table.h" +#include "src/deoptimizer/deoptimizer.h" #include "src/interpreter/bytecode-array-iterator.h" #include "src/interpreter/bytecode-decoder.h" #include "src/interpreter/interpreter.h" #include "src/objects/allocation-site-inl.h" -#include "src/ostreams.h" -#include "src/reloc-info.h" -#include "src/roots-inl.h" -#include "src/safepoint-table.h" -#include "src/snapshot/embedded-data.h" +#include "src/roots/roots-inl.h" +#include "src/snapshot/embedded/embedded-data.h" +#include "src/utils/ostreams.h" #ifdef ENABLE_DISASSEMBLER -#include "src/code-comments.h" -#include "src/disasm.h" -#include "src/disassembler.h" -#include "src/eh-frame.h" +#include "src/codegen/code-comments.h" +#include "src/diagnostics/disasm.h" +#include "src/diagnostics/disassembler.h" +#include "src/diagnostics/eh-frame.h" #endif namespace v8 { @@ -63,12 +63,10 @@ int Code::ExecutableInstructionSize() const { return safepoint_table_offset(); } void Code::ClearEmbeddedObjects(Heap* heap) { HeapObject undefined = ReadOnlyRoots(heap).undefined_value(); - int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + int mode_mask = RelocInfo::EmbeddedObjectModeMask(); for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) { - RelocInfo::Mode mode = it.rinfo()->rmode(); - if (mode == RelocInfo::EMBEDDED_OBJECT) { - it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER); - } + DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode())); + it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER); } set_embedded_objects_cleared(true); } @@ -107,7 +105,7 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) { const int mode_mask = RelocInfo::PostCodegenRelocationMask(); for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) { RelocInfo::Mode mode = it.rinfo()->rmode(); - if (mode == RelocInfo::EMBEDDED_OBJECT) { + if (RelocInfo::IsEmbeddedObjectMode(mode)) { Handle p = it.rinfo()->target_object_handle(origin); it.rinfo()->set_target_object(heap, *p, UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH); @@ -116,7 +114,7 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) { // code object. Handle p = it.rinfo()->target_object_handle(origin); Code code = Code::cast(*p); - it.rinfo()->set_target_address(code->raw_instruction_start(), + it.rinfo()->set_target_address(code.raw_instruction_start(), UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH); } else if (RelocInfo::IsRuntimeEntry(mode)) { Address p = it.rinfo()->target_runtime_entry(origin); @@ -194,12 +192,12 @@ void AbstractCode::SetStackFrameCache(Handle abstract_code, namespace { template void DropStackFrameCacheCommon(Code code) { - i::Object maybe_table = code->source_position_table(); - if (maybe_table->IsUndefined() || maybe_table->IsByteArray()) return; - DCHECK(maybe_table->IsSourcePositionTableWithFrameCache()); - code->set_source_position_table( + i::Object maybe_table = code.source_position_table(); + if (maybe_table.IsUndefined() || maybe_table.IsByteArray()) return; + DCHECK(maybe_table.IsSourcePositionTableWithFrameCache()); + code.set_source_position_table( i::SourcePositionTableWithFrameCache::cast(maybe_table) - ->source_position_table()); + .source_position_table()); } } // namespace @@ -213,7 +211,7 @@ void AbstractCode::DropStackFrameCache() { int AbstractCode::SourcePosition(int offset) { Object maybe_table = source_position_table(); - if (maybe_table->IsException()) return kNoSourcePosition; + if (maybe_table.IsException()) return kNoSourcePosition; ByteArray source_position_table = ByteArray::cast(maybe_table); int position = 0; @@ -259,10 +257,10 @@ bool Code::CanDeoptAt(Address pc) { DeoptimizationData deopt_data = DeoptimizationData::cast(deoptimization_data()); Address code_start_address = InstructionStart(); - for (int i = 0; i < deopt_data->DeoptCount(); i++) { - if (deopt_data->Pc(i)->value() == -1) continue; - Address address = code_start_address + deopt_data->Pc(i)->value(); - if (address == pc && deopt_data->BytecodeOffset(i) != BailoutId::None()) { + for (int i = 0; i < deopt_data.DeoptCount(); i++) { + if (deopt_data.Pc(i).value() == -1) continue; + Address address = code_start_address + deopt_data.Pc(i).value(); + if (address == pc && deopt_data.BytecodeOffset(i) != BailoutId::None()) { return true; } } @@ -302,7 +300,8 @@ bool Code::IsIsolateIndependent(Isolate* isolate) { STATIC_ASSERT(mode_mask == (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) | - RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | + RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) | + RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) | RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) | @@ -324,7 +323,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) { if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue; Code target = Code::GetCodeFromTargetAddress(target_address); - CHECK(target->IsCode()); + CHECK(target.IsCode()); if (Builtins::IsIsolateIndependentBuiltin(target)) continue; } #endif @@ -340,12 +339,12 @@ bool Code::Inlines(SharedFunctionInfo sfi) { DisallowHeapAllocation no_gc; DeoptimizationData const data = DeoptimizationData::cast(deoptimization_data()); - if (data->length() == 0) return false; - if (data->SharedFunctionInfo() == sfi) return true; - FixedArray const literals = data->LiteralArray(); - int const inlined_count = data->InlinedFunctionCount()->value(); + if (data.length() == 0) return false; + if (data.SharedFunctionInfo() == sfi) return true; + FixedArray const literals = data.LiteralArray(); + int const inlined_count = data.InlinedFunctionCount().value(); for (int i = 0; i < inlined_count; ++i) { - if (SharedFunctionInfo::cast(literals->get(i)) == sfi) return true; + if (SharedFunctionInfo::cast(literals.get(i)) == sfi) return true; } return false; } @@ -353,7 +352,7 @@ bool Code::Inlines(SharedFunctionInfo sfi) { Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) { isolate_ = isolate; Object list = isolate->heap()->native_contexts_list(); - next_context_ = list->IsUndefined(isolate_) ? Context() : Context::cast(list); + next_context_ = list.IsUndefined(isolate_) ? Context() : Context::cast(list); } Code Code::OptimizedCodeIterator::Next() { @@ -361,21 +360,21 @@ Code Code::OptimizedCodeIterator::Next() { Object next; if (!current_code_.is_null()) { // Get next code in the linked list. - next = current_code_->next_code_link(); + next = current_code_.next_code_link(); } else if (!next_context_.is_null()) { // Linked list of code exhausted. Get list of next context. - next = next_context_->OptimizedCodeListHead(); - Object next_context = next_context_->next_context_link(); - next_context_ = next_context->IsUndefined(isolate_) + next = next_context_.OptimizedCodeListHead(); + Object next_context = next_context_.next_context_link(); + next_context_ = next_context.IsUndefined(isolate_) ? Context() : Context::cast(next_context); } else { // Exhausted contexts. return Code(); } - current_code_ = next->IsUndefined(isolate_) ? Code() : Code::cast(next); + current_code_ = next.IsUndefined(isolate_) ? Code() : Code::cast(next); } while (current_code_.is_null()); - DCHECK_EQ(Code::OPTIMIZED_FUNCTION, current_code_->kind()); + DCHECK_EQ(Code::OPTIMIZED_FUNCTION, current_code_.kind()); return current_code_; } @@ -395,7 +394,7 @@ SharedFunctionInfo DeoptimizationData::GetInlinedFunction(int index) { if (index == -1) { return SharedFunctionInfo::cast(SharedFunctionInfo()); } else { - return SharedFunctionInfo::cast(LiteralArray()->get(index)); + return SharedFunctionInfo::cast(LiteralArray().get(index)); } } @@ -428,10 +427,10 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT } disasm::NameConverter converter; - int const inlined_function_count = InlinedFunctionCount()->value(); + int const inlined_function_count = InlinedFunctionCount().value(); os << "Inlined functions (count = " << inlined_function_count << ")\n"; for (int id = 0; id < inlined_function_count; ++id) { - Object info = LiteralArray()->get(id); + Object info = LiteralArray().get(id); os << " " << Brief(SharedFunctionInfo::cast(info)) << "\n"; } os << "\n"; @@ -445,7 +444,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT for (int i = 0; i < deopt_count; i++) { os << std::setw(6) << i << " " << std::setw(15) << BytecodeOffset(i).ToInt() << " " << std::setw(4); - print_pc(os, Pc(i)->value()); + print_pc(os, Pc(i).value()); os << std::setw(2); if (!FLAG_print_code_verbose) { @@ -454,7 +453,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT } // Print details of the frame translation. - int translation_index = TranslationIndex(i)->value(); + int translation_index = TranslationIndex(i).value(); TranslationIterator iterator(TranslationByteArray(), translation_index); Translation::Opcode opcode = static_cast(iterator.Next()); @@ -483,9 +482,9 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT unsigned height = iterator.Next(); int return_value_offset = iterator.Next(); int return_value_count = iterator.Next(); - Object shared_info = LiteralArray()->get(shared_info_id); + Object shared_info = LiteralArray().get(shared_info_id); os << "{bytecode_offset=" << bytecode_offset << ", function=" - << Brief(SharedFunctionInfo::cast(shared_info)->DebugName()) + << Brief(SharedFunctionInfo::cast(shared_info).DebugName()) << ", height=" << height << ", retval=@" << return_value_offset << "(#" << return_value_count << ")}"; break; @@ -494,10 +493,10 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT case Translation::CONSTRUCT_STUB_FRAME: { int bailout_id = iterator.Next(); int shared_info_id = iterator.Next(); - Object shared_info = LiteralArray()->get(shared_info_id); + Object shared_info = LiteralArray().get(shared_info_id); unsigned height = iterator.Next(); os << "{bailout_id=" << bailout_id << ", function=" - << Brief(SharedFunctionInfo::cast(shared_info)->DebugName()) + << Brief(SharedFunctionInfo::cast(shared_info).DebugName()) << ", height=" << height << "}"; break; } @@ -507,20 +506,20 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: { int bailout_id = iterator.Next(); int shared_info_id = iterator.Next(); - Object shared_info = LiteralArray()->get(shared_info_id); + Object shared_info = LiteralArray().get(shared_info_id); unsigned height = iterator.Next(); os << "{bailout_id=" << bailout_id << ", function=" - << Brief(SharedFunctionInfo::cast(shared_info)->DebugName()) + << Brief(SharedFunctionInfo::cast(shared_info).DebugName()) << ", height=" << height << "}"; break; } case Translation::ARGUMENTS_ADAPTOR_FRAME: { int shared_info_id = iterator.Next(); - Object shared_info = LiteralArray()->get(shared_info_id); + Object shared_info = LiteralArray().get(shared_info_id); unsigned height = iterator.Next(); os << "{function=" - << Brief(SharedFunctionInfo::cast(shared_info)->DebugName()) + << Brief(SharedFunctionInfo::cast(shared_info).DebugName()) << ", height=" << height << "}"; break; } @@ -610,7 +609,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT case Translation::LITERAL: { int literal_index = iterator.Next(); - Object literal_value = LiteralArray()->get(literal_index); + Object literal_value = LiteralArray().get(literal_index); os << "{literal_id=" << literal_index << " (" << Brief(literal_value) << ")}"; break; @@ -705,7 +704,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) { constant_pool_offset()); for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) { SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr); - os << static_cast(ptr) << " " << buf.start() << "\n"; + os << static_cast(ptr) << " " << buf.begin() << "\n"; } } } @@ -744,7 +743,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) { if (kind() == OPTIMIZED_FUNCTION) { DeoptimizationData data = DeoptimizationData::cast(this->deoptimization_data()); - data->DeoptimizationDataPrint(os); + data.DeoptimizationDataPrint(os); } os << "\n"; @@ -854,16 +853,16 @@ void BytecodeArray::Disassemble(std::ostream& os) { iterator.Advance(); } - os << "Constant pool (size = " << constant_pool()->length() << ")\n"; + os << "Constant pool (size = " << constant_pool().length() << ")\n"; #ifdef OBJECT_PRINT - if (constant_pool()->length() > 0) { - constant_pool()->Print(); + if (constant_pool().length() > 0) { + constant_pool().Print(); } #endif - os << "Handler Table (size = " << handler_table()->length() << ")\n"; + os << "Handler Table (size = " << handler_table().length() << ")\n"; #ifdef ENABLE_DISASSEMBLER - if (handler_table()->length() > 0) { + if (handler_table().length() > 0) { HandlerTable table(*this); table.HandlerTableRangePrint(os); } @@ -872,10 +871,10 @@ void BytecodeArray::Disassemble(std::ostream& os) { void BytecodeArray::CopyBytecodesTo(BytecodeArray to) { BytecodeArray from = *this; - DCHECK_EQ(from->length(), to->length()); - CopyBytes(reinterpret_cast(to->GetFirstBytecodeAddress()), - reinterpret_cast(from->GetFirstBytecodeAddress()), - from->length()); + DCHECK_EQ(from.length(), to.length()); + CopyBytes(reinterpret_cast(to.GetFirstBytecodeAddress()), + reinterpret_cast(from.GetFirstBytecodeAddress()), + from.length()); } void BytecodeArray::MakeOlder() { @@ -1018,7 +1017,7 @@ bool DependentCode::MarkCodeForDeoptimization( } if (this->group() < group) { // The group comes later in the list. - return next_link()->MarkCodeForDeoptimization(isolate, group); + return next_link().MarkCodeForDeoptimization(isolate, group); } DCHECK_EQ(group, this->group()); DisallowHeapAllocation no_allocation_scope; @@ -1029,8 +1028,8 @@ bool DependentCode::MarkCodeForDeoptimization( MaybeObject obj = object_at(i); if (obj->IsCleared()) continue; Code code = Code::cast(obj->GetHeapObjectAssumeWeak()); - if (!code->marked_for_deoptimization()) { - code->SetMarkedForDeoptimization(DependencyGroupName(group)); + if (!code.marked_for_deoptimization()) { + code.SetMarkedForDeoptimization(DependencyGroupName(group)); marked = true; } } @@ -1061,7 +1060,7 @@ void Code::SetMarkedForDeoptimization(const char* reason) { PrintF(scope.file(), "[marking dependent code " V8PRIxPTR_FMT " (opt #%d) for deoptimization, reason: %s]\n", - ptr(), deopt_data->OptimizationId()->value(), reason); + ptr(), deopt_data.OptimizationId().value(), reason); } } diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h index 647cfebe69..a950261103 100644 --- a/deps/v8/src/objects/code.h +++ b/deps/v8/src/objects/code.h @@ -5,11 +5,11 @@ #ifndef V8_OBJECTS_CODE_H_ #define V8_OBJECTS_CODE_H_ -#include "src/contexts.h" -#include "src/handler-table.h" -#include "src/objects.h" +#include "src/codegen/handler-table.h" +#include "src/objects/contexts.h" #include "src/objects/fixed-array.h" #include "src/objects/heap-object.h" +#include "src/objects/objects.h" #include "src/objects/struct.h" // Has to be the last include (doesn't have include guards): @@ -22,7 +22,6 @@ class ByteArray; class BytecodeArray; class CodeDataContainer; class CodeDesc; -class MaybeObject; namespace interpreter { class Register; @@ -43,6 +42,7 @@ class Code : public HeapObject { V(BUILTIN) \ V(REGEXP) \ V(WASM_FUNCTION) \ + V(WASM_TO_CAPI_FUNCTION) \ V(WASM_TO_JS_FUNCTION) \ V(JS_TO_WASM_FUNCTION) \ V(WASM_INTERPRETER_ENTRY) \ @@ -948,25 +948,22 @@ class DeoptimizationData : public FixedArray { OBJECT_CONSTRUCTORS(DeoptimizationData, FixedArray); }; -class SourcePositionTableWithFrameCache : public Tuple2 { +class SourcePositionTableWithFrameCache : public Struct { public: DECL_ACCESSORS(source_position_table, ByteArray) DECL_ACCESSORS(stack_frame_cache, SimpleNumberDictionary) DECL_CAST(SourcePositionTableWithFrameCache) -// Layout description. -#define SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS(V) \ - V(kSourcePositionTableIndex, kTaggedSize) \ - V(kStackFrameCacheIndex, kTaggedSize) \ - /* Total size. */ \ - V(kSize, 0) + DECL_PRINTER(SourcePositionTableWithFrameCache) + DECL_VERIFIER(SourcePositionTableWithFrameCache) - DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, - SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS) -#undef SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS + // Layout description. + DEFINE_FIELD_OFFSET_CONSTANTS( + Struct::kHeaderSize, + TORQUE_GENERATED_SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_FIELDS) - OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache, Tuple2); + OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache, Struct); }; } // namespace internal diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-inl.h index 18491118ad..81b953a589 100644 --- a/deps/v8/src/objects/compilation-cache-inl.h +++ b/deps/v8/src/objects/compilation-cache-inl.h @@ -28,22 +28,22 @@ NEVER_READ_ONLY_SPACE_IMPL(CompilationCacheTable) CAST_ACCESSOR(CompilationCacheTable) uint32_t CompilationCacheShape::RegExpHash(String string, Smi flags) { - return string->Hash() + flags->value(); + return string.Hash() + flags.value(); } uint32_t CompilationCacheShape::StringSharedHash(String source, SharedFunctionInfo shared, LanguageMode language_mode, int position) { - uint32_t hash = source->Hash(); - if (shared->HasSourceCode()) { + uint32_t hash = source.Hash(); + if (shared.HasSourceCode()) { // Instead of using the SharedFunctionInfo pointer in the hash // code computation, we use a combination of the hash of the // script source code and the start position of the calling scope. // We do this to ensure that the cache entries can survive garbage // collection. - Script script(Script::cast(shared->script())); - hash ^= String::cast(script->source())->Hash(); + Script script(Script::cast(shared.script())); + hash ^= String::cast(script.source()).Hash(); STATIC_ASSERT(LanguageModeSize == 2); if (is_strict(language_mode)) hash ^= 0x8000; hash += position; @@ -53,27 +53,27 @@ uint32_t CompilationCacheShape::StringSharedHash(String source, uint32_t CompilationCacheShape::HashForObject(ReadOnlyRoots roots, Object object) { - if (object->IsNumber()) return static_cast(object->Number()); + if (object.IsNumber()) return static_cast(object.Number()); FixedArray val = FixedArray::cast(object); - if (val->map() == roots.fixed_cow_array_map()) { - DCHECK_EQ(4, val->length()); - SharedFunctionInfo shared = SharedFunctionInfo::cast(val->get(0)); - String source = String::cast(val->get(1)); - int language_unchecked = Smi::ToInt(val->get(2)); + if (val.map() == roots.fixed_cow_array_map()) { + DCHECK_EQ(4, val.length()); + SharedFunctionInfo shared = SharedFunctionInfo::cast(val.get(0)); + String source = String::cast(val.get(1)); + int language_unchecked = Smi::ToInt(val.get(2)); DCHECK(is_valid_language_mode(language_unchecked)); LanguageMode language_mode = static_cast(language_unchecked); - int position = Smi::ToInt(val->get(3)); + int position = Smi::ToInt(val.get(3)); return StringSharedHash(source, shared, language_mode, position); } - DCHECK_LT(2, val->length()); - return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)), - Smi::cast(val->get(JSRegExp::kFlagsIndex))); + DCHECK_LT(2, val.length()); + return RegExpHash(String::cast(val.get(JSRegExp::kSourceIndex)), + Smi::cast(val.get(JSRegExp::kFlagsIndex))); } InfoCellPair::InfoCellPair(SharedFunctionInfo shared, FeedbackCell feedback_cell) - : is_compiled_scope_(!shared.is_null() ? shared->is_compiled_scope() + : is_compiled_scope_(!shared.is_null() ? shared.is_compiled_scope() : IsCompiledScope()), shared_(shared), feedback_cell_(feedback_cell) {} diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h index d3feb1b233..2072339c5e 100644 --- a/deps/v8/src/objects/compilation-cache.h +++ b/deps/v8/src/objects/compilation-cache.h @@ -9,7 +9,7 @@ #include "src/objects/hash-table.h" #include "src/objects/js-regexp.h" #include "src/objects/shared-function-info.h" -#include "src/roots.h" +#include "src/roots/roots.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/compressed-slots-inl.h b/deps/v8/src/objects/compressed-slots-inl.h index 9c55de9ae6..b08bc938e5 100644 --- a/deps/v8/src/objects/compressed-slots-inl.h +++ b/deps/v8/src/objects/compressed-slots-inl.h @@ -7,10 +7,10 @@ #ifdef V8_COMPRESS_POINTERS +#include "src/common/ptr-compr-inl.h" #include "src/objects/compressed-slots.h" #include "src/objects/heap-object-inl.h" #include "src/objects/maybe-object-inl.h" -#include "src/ptr-compr-inl.h" namespace v8 { namespace internal { @@ -28,7 +28,7 @@ Object CompressedObjectSlot::operator*() const { } void CompressedObjectSlot::store(Object value) const { - *location() = CompressTagged(value->ptr()); + *location() = CompressTagged(value.ptr()); } Object CompressedObjectSlot::Acquire_Load() const { @@ -42,19 +42,19 @@ Object CompressedObjectSlot::Relaxed_Load() const { } void CompressedObjectSlot::Relaxed_Store(Object value) const { - Tagged_t ptr = CompressTagged(value->ptr()); + Tagged_t ptr = CompressTagged(value.ptr()); AsAtomicTagged::Relaxed_Store(location(), ptr); } void CompressedObjectSlot::Release_Store(Object value) const { - Tagged_t ptr = CompressTagged(value->ptr()); + Tagged_t ptr = CompressTagged(value.ptr()); AsAtomicTagged::Release_Store(location(), ptr); } Object CompressedObjectSlot::Release_CompareAndSwap(Object old, Object target) const { - Tagged_t old_ptr = CompressTagged(old->ptr()); - Tagged_t target_ptr = CompressTagged(target->ptr()); + Tagged_t old_ptr = CompressTagged(old.ptr()); + Tagged_t target_ptr = CompressTagged(target.ptr()); Tagged_t result = AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr); return Object(DecompressTaggedAny(address(), result)); @@ -95,14 +95,14 @@ Object CompressedMapWordSlot::Acquire_Load() const { } void CompressedMapWordSlot::Release_Store(Object value) const { - Tagged_t ptr = CompressTagged(value->ptr()); + Tagged_t ptr = CompressTagged(value.ptr()); AsAtomicTagged::Release_Store(location(), ptr); } Object CompressedMapWordSlot::Release_CompareAndSwap(Object old, Object target) const { - Tagged_t old_ptr = CompressTagged(old->ptr()); - Tagged_t target_ptr = CompressTagged(target->ptr()); + Tagged_t old_ptr = CompressTagged(old.ptr()); + Tagged_t target_ptr = CompressTagged(target.ptr()); Tagged_t result = AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr); return Object(DecompressTaggedPointer(address(), result)); @@ -118,7 +118,7 @@ MaybeObject CompressedMaybeObjectSlot::operator*() const { } void CompressedMaybeObjectSlot::store(MaybeObject value) const { - *location() = CompressTagged(value->ptr()); + *location() = CompressTagged(value.ptr()); } MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const { @@ -127,14 +127,14 @@ MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const { } void CompressedMaybeObjectSlot::Relaxed_Store(MaybeObject value) const { - Tagged_t ptr = CompressTagged(value->ptr()); + Tagged_t ptr = CompressTagged(value.ptr()); AsAtomicTagged::Relaxed_Store(location(), ptr); } void CompressedMaybeObjectSlot::Release_CompareAndSwap( MaybeObject old, MaybeObject target) const { - Tagged_t old_ptr = CompressTagged(old->ptr()); - Tagged_t target_ptr = CompressTagged(target->ptr()); + Tagged_t old_ptr = CompressTagged(old.ptr()); + Tagged_t target_ptr = CompressTagged(target.ptr()); AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr); } @@ -158,7 +158,7 @@ HeapObject CompressedHeapObjectSlot::ToHeapObject() const { } void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const { - *location() = CompressTagged(value->ptr()); + *location() = CompressTagged(value.ptr()); } } // namespace internal diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h new file mode 100644 index 0000000000..bb861a1d1e --- /dev/null +++ b/deps/v8/src/objects/contexts-inl.h @@ -0,0 +1,256 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_CONTEXTS_INL_H_ +#define V8_OBJECTS_CONTEXTS_INL_H_ + +#include "src/objects/contexts.h" + +#include "src/heap/heap-write-barrier.h" +#include "src/objects/dictionary-inl.h" +#include "src/objects/fixed-array-inl.h" +#include "src/objects/js-objects-inl.h" +#include "src/objects/map-inl.h" +#include "src/objects/objects-inl.h" +#include "src/objects/regexp-match-info.h" +#include "src/objects/scope-info.h" +#include "src/objects/shared-function-info.h" + +// Has to be the last include (doesn't have include guards): +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +OBJECT_CONSTRUCTORS_IMPL(ScriptContextTable, FixedArray) +CAST_ACCESSOR(ScriptContextTable) + +int ScriptContextTable::used() const { return Smi::ToInt(get(kUsedSlotIndex)); } + +void ScriptContextTable::set_used(int used) { + set(kUsedSlotIndex, Smi::FromInt(used)); +} + +// static +Handle ScriptContextTable::GetContext(Isolate* isolate, + Handle table, + int i) { + return handle(table->get_context(i), isolate); +} + +Context ScriptContextTable::get_context(int i) const { + DCHECK_LT(i, used()); + return Context::cast(this->get(i + kFirstContextSlotIndex)); +} + +OBJECT_CONSTRUCTORS_IMPL(Context, HeapObject) +NEVER_READ_ONLY_SPACE_IMPL(Context) +CAST_ACCESSOR(Context) +SMI_ACCESSORS(Context, length, kLengthOffset) + +CAST_ACCESSOR(NativeContext) + +Object Context::get(int index) const { + DCHECK_LT(static_cast(index), + static_cast(this->length())); + return RELAXED_READ_FIELD(*this, OffsetOfElementAt(index)); +} + +void Context::set(int index, Object value) { + DCHECK_LT(static_cast(index), + static_cast(this->length())); + int offset = OffsetOfElementAt(index); + RELAXED_WRITE_FIELD(*this, offset, value); + WRITE_BARRIER(*this, offset, value); +} + +void Context::set(int index, Object value, WriteBarrierMode mode) { + DCHECK_LT(static_cast(index), + static_cast(this->length())); + int offset = OffsetOfElementAt(index); + RELAXED_WRITE_FIELD(*this, offset, value); + CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); +} + +void Context::set_scope_info(ScopeInfo scope_info) { + set(SCOPE_INFO_INDEX, scope_info); +} + +Object Context::unchecked_previous() { return get(PREVIOUS_INDEX); } + +Context Context::previous() { + Object result = get(PREVIOUS_INDEX); + DCHECK(IsBootstrappingOrValidParentContext(result, *this)); + return Context::unchecked_cast(result); +} +void Context::set_previous(Context context) { set(PREVIOUS_INDEX, context); } + +Object Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); } + +bool Context::has_extension() { return !extension().IsTheHole(); } +HeapObject Context::extension() { + return HeapObject::cast(get(EXTENSION_INDEX)); +} +void Context::set_extension(HeapObject object) { set(EXTENSION_INDEX, object); } + +NativeContext Context::native_context() const { + Object result = get(NATIVE_CONTEXT_INDEX); + DCHECK(IsBootstrappingOrNativeContext(this->GetIsolate(), result)); + return NativeContext::unchecked_cast(result); +} + +void Context::set_native_context(NativeContext context) { + set(NATIVE_CONTEXT_INDEX, context); +} + +bool Context::IsFunctionContext() const { + return map().instance_type() == FUNCTION_CONTEXT_TYPE; +} + +bool Context::IsCatchContext() const { + return map().instance_type() == CATCH_CONTEXT_TYPE; +} + +bool Context::IsWithContext() const { + return map().instance_type() == WITH_CONTEXT_TYPE; +} + +bool Context::IsDebugEvaluateContext() const { + return map().instance_type() == DEBUG_EVALUATE_CONTEXT_TYPE; +} + +bool Context::IsAwaitContext() const { + return map().instance_type() == AWAIT_CONTEXT_TYPE; +} + +bool Context::IsBlockContext() const { + return map().instance_type() == BLOCK_CONTEXT_TYPE; +} + +bool Context::IsModuleContext() const { + return map().instance_type() == MODULE_CONTEXT_TYPE; +} + +bool Context::IsEvalContext() const { + return map().instance_type() == EVAL_CONTEXT_TYPE; +} + +bool Context::IsScriptContext() const { + return map().instance_type() == SCRIPT_CONTEXT_TYPE; +} + +bool Context::HasSameSecurityTokenAs(Context that) const { + return this->native_context().security_token() == + that.native_context().security_token(); +} + +#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \ + void Context::set_##name(type value) { \ + DCHECK(IsNativeContext()); \ + set(index, value); \ + } \ + bool Context::is_##name(type value) const { \ + DCHECK(IsNativeContext()); \ + return type::cast(get(index)) == value; \ + } \ + type Context::name() const { \ + DCHECK(IsNativeContext()); \ + return type::cast(get(index)); \ + } +NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS) +#undef NATIVE_CONTEXT_FIELD_ACCESSORS + +#define CHECK_FOLLOWS2(v1, v2) STATIC_ASSERT((v1 + 1) == (v2)) +#define CHECK_FOLLOWS4(v1, v2, v3, v4) \ + CHECK_FOLLOWS2(v1, v2); \ + CHECK_FOLLOWS2(v2, v3); \ + CHECK_FOLLOWS2(v3, v4) + +int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind, + bool has_shared_name, bool needs_home_object) { + if (IsClassConstructor(kind)) { + // Like the strict function map, but with no 'name' accessor. 'name' + // needs to be the last property and it is added during instantiation, + // in case a static property with the same name exists" + return CLASS_FUNCTION_MAP_INDEX; + } + + int base = 0; + if (IsGeneratorFunction(kind)) { + CHECK_FOLLOWS4(GENERATOR_FUNCTION_MAP_INDEX, + GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX, + GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, + GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX); + CHECK_FOLLOWS4( + ASYNC_GENERATOR_FUNCTION_MAP_INDEX, + ASYNC_GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX, + ASYNC_GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, + ASYNC_GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX); + + base = IsAsyncFunction(kind) ? ASYNC_GENERATOR_FUNCTION_MAP_INDEX + : GENERATOR_FUNCTION_MAP_INDEX; + + } else if (IsAsyncFunction(kind)) { + CHECK_FOLLOWS4(ASYNC_FUNCTION_MAP_INDEX, ASYNC_FUNCTION_WITH_NAME_MAP_INDEX, + ASYNC_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, + ASYNC_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX); + + base = ASYNC_FUNCTION_MAP_INDEX; + + } else if (IsStrictFunctionWithoutPrototype(kind)) { + DCHECK_IMPLIES(IsArrowFunction(kind), !needs_home_object); + CHECK_FOLLOWS4(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, + METHOD_WITH_NAME_MAP_INDEX, + METHOD_WITH_HOME_OBJECT_MAP_INDEX, + METHOD_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX); + + base = STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX; + + } else { + DCHECK(!needs_home_object); + CHECK_FOLLOWS2(SLOPPY_FUNCTION_MAP_INDEX, + SLOPPY_FUNCTION_WITH_NAME_MAP_INDEX); + CHECK_FOLLOWS2(STRICT_FUNCTION_MAP_INDEX, + STRICT_FUNCTION_WITH_NAME_MAP_INDEX); + + base = is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX + : SLOPPY_FUNCTION_MAP_INDEX; + } + int offset = static_cast(!has_shared_name) | + (static_cast(needs_home_object) << 1); + DCHECK_EQ(0, offset & ~3); + + return base + offset; +} + +#undef CHECK_FOLLOWS2 +#undef CHECK_FOLLOWS4 + +Map Context::GetInitialJSArrayMap(ElementsKind kind) const { + DCHECK(IsNativeContext()); + if (!IsFastElementsKind(kind)) return Map(); + DisallowHeapAllocation no_gc; + Object const initial_js_array_map = get(Context::ArrayMapIndex(kind)); + DCHECK(!initial_js_array_map.IsUndefined()); + return Map::cast(initial_js_array_map); +} + +MicrotaskQueue* NativeContext::microtask_queue() const { + return reinterpret_cast( + ReadField
(kMicrotaskQueueOffset)); +} + +void NativeContext::set_microtask_queue(MicrotaskQueue* microtask_queue) { + WriteField
(kMicrotaskQueueOffset, + reinterpret_cast
(microtask_queue)); +} + +OBJECT_CONSTRUCTORS_IMPL(NativeContext, Context) + +} // namespace internal +} // namespace v8 + +#include "src/objects/object-macros-undef.h" + +#endif // V8_OBJECTS_CONTEXTS_INL_H_ diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc new file mode 100644 index 0000000000..cddbcb98c0 --- /dev/null +++ b/deps/v8/src/objects/contexts.cc @@ -0,0 +1,512 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/objects/contexts.h" + +#include "src/ast/modules.h" +#include "src/debug/debug.h" +#include "src/execution/isolate-inl.h" +#include "src/init/bootstrapper.h" +#include "src/objects/module-inl.h" + +namespace v8 { +namespace internal { + +Handle ScriptContextTable::Extend( + Handle table, Handle script_context) { + Handle result; + int used = table->used(); + int length = table->length(); + CHECK(used >= 0 && length > 0 && used < length); + if (used + kFirstContextSlotIndex == length) { + CHECK(length < Smi::kMaxValue / 2); + Isolate* isolate = script_context->GetIsolate(); + Handle copy = + isolate->factory()->CopyFixedArrayAndGrow(table, length); + copy->set_map(ReadOnlyRoots(isolate).script_context_table_map()); + result = Handle::cast(copy); + } else { + result = table; + } + result->set_used(used + 1); + + DCHECK(script_context->IsScriptContext()); + result->set(used + kFirstContextSlotIndex, *script_context); + return result; +} + +bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table, + String name, LookupResult* result) { + DisallowHeapAllocation no_gc; + for (int i = 0; i < table.used(); i++) { + Context context = table.get_context(i); + DCHECK(context.IsScriptContext()); + int slot_index = ScopeInfo::ContextSlotIndex( + context.scope_info(), name, &result->mode, &result->init_flag, + &result->maybe_assigned_flag); + + if (slot_index >= 0) { + result->context_index = i; + result->slot_index = slot_index; + return true; + } + } + return false; +} + +bool Context::is_declaration_context() { + if (IsFunctionContext() || IsNativeContext() || IsScriptContext() || + IsModuleContext()) { + return true; + } + if (IsEvalContext()) { + return scope_info().language_mode() == LanguageMode::kStrict; + } + if (!IsBlockContext()) return false; + return scope_info().is_declaration_scope(); +} + +Context Context::declaration_context() { + Context current = *this; + while (!current.is_declaration_context()) { + current = current.previous(); + } + return current; +} + +Context Context::closure_context() { + Context current = *this; + while (!current.IsFunctionContext() && !current.IsScriptContext() && + !current.IsModuleContext() && !current.IsNativeContext() && + !current.IsEvalContext()) { + current = current.previous(); + } + return current; +} + +JSObject Context::extension_object() { + DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext() || + IsEvalContext() || IsCatchContext()); + HeapObject object = extension(); + if (object.IsTheHole()) return JSObject(); + DCHECK(object.IsJSContextExtensionObject() || + (IsNativeContext() && object.IsJSGlobalObject())); + return JSObject::cast(object); +} + +JSReceiver Context::extension_receiver() { + DCHECK(IsNativeContext() || IsWithContext() || IsEvalContext() || + IsFunctionContext() || IsBlockContext()); + return IsWithContext() ? JSReceiver::cast(extension()) : extension_object(); +} + +ScopeInfo Context::scope_info() { + return ScopeInfo::cast(get(SCOPE_INFO_INDEX)); +} + +Module Context::module() { + Context current = *this; + while (!current.IsModuleContext()) { + current = current.previous(); + } + return Module::cast(current.extension()); +} + +JSGlobalObject Context::global_object() { + return JSGlobalObject::cast(native_context().extension()); +} + +Context Context::script_context() { + Context current = *this; + while (!current.IsScriptContext()) { + current = current.previous(); + } + return current; +} + +JSGlobalProxy Context::global_proxy() { + return native_context().global_proxy_object(); +} + +void Context::set_global_proxy(JSGlobalProxy object) { + native_context().set_global_proxy_object(object); +} + +/** + * Lookups a property in an object environment, taking the unscopables into + * account. This is used For HasBinding spec algorithms for ObjectEnvironment. + */ +static Maybe UnscopableLookup(LookupIterator* it) { + Isolate* isolate = it->isolate(); + + Maybe found = JSReceiver::HasProperty(it); + if (found.IsNothing() || !found.FromJust()) return found; + + Handle unscopables; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, unscopables, + JSReceiver::GetProperty(isolate, + Handle::cast(it->GetReceiver()), + isolate->factory()->unscopables_symbol()), + Nothing()); + if (!unscopables->IsJSReceiver()) return Just(true); + Handle blacklist; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, blacklist, + JSReceiver::GetProperty(isolate, Handle::cast(unscopables), + it->name()), + Nothing()); + return Just(!blacklist->BooleanValue(isolate)); +} + +static PropertyAttributes GetAttributesForMode(VariableMode mode) { + DCHECK(IsDeclaredVariableMode(mode)); + return mode == VariableMode::kConst ? READ_ONLY : NONE; +} + +// static +Handle Context::Lookup(Handle context, Handle name, + ContextLookupFlags flags, int* index, + PropertyAttributes* attributes, + InitializationFlag* init_flag, + VariableMode* variable_mode, + bool* is_sloppy_function_name) { + Isolate* isolate = context->GetIsolate(); + + bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0; + bool failed_whitelist = false; + *index = kNotFound; + *attributes = ABSENT; + *init_flag = kCreatedInitialized; + *variable_mode = VariableMode::kVar; + if (is_sloppy_function_name != nullptr) { + *is_sloppy_function_name = false; + } + + if (FLAG_trace_contexts) { + PrintF("Context::Lookup("); + name->ShortPrint(); + PrintF(")\n"); + } + + do { + if (FLAG_trace_contexts) { + PrintF(" - looking in context %p", + reinterpret_cast(context->ptr())); + if (context->IsScriptContext()) PrintF(" (script context)"); + if (context->IsNativeContext()) PrintF(" (native context)"); + PrintF("\n"); + } + + // 1. Check global objects, subjects of with, and extension objects. + DCHECK_IMPLIES(context->IsEvalContext(), + context->extension().IsTheHole(isolate)); + if ((context->IsNativeContext() || context->IsWithContext() || + context->IsFunctionContext() || context->IsBlockContext()) && + !context->extension_receiver().is_null()) { + Handle object(context->extension_receiver(), isolate); + + if (context->IsNativeContext()) { + DisallowHeapAllocation no_gc; + if (FLAG_trace_contexts) { + PrintF(" - trying other script contexts\n"); + } + // Try other script contexts. + ScriptContextTable script_contexts = + context->global_object().native_context().script_context_table(); + ScriptContextTable::LookupResult r; + if (ScriptContextTable::Lookup(isolate, script_contexts, *name, &r)) { + Context context = script_contexts.get_context(r.context_index); + if (FLAG_trace_contexts) { + PrintF("=> found property in script context %d: %p\n", + r.context_index, reinterpret_cast(context.ptr())); + } + *index = r.slot_index; + *variable_mode = r.mode; + *init_flag = r.init_flag; + *attributes = GetAttributesForMode(r.mode); + return handle(context, isolate); + } + } + + // Context extension objects needs to behave as if they have no + // prototype. So even if we want to follow prototype chains, we need + // to only do a local lookup for context extension objects. + Maybe maybe = Nothing(); + if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 || + object->IsJSContextExtensionObject()) { + maybe = JSReceiver::GetOwnPropertyAttributes(object, name); + } else if (context->IsWithContext()) { + // A with context will never bind "this", but debug-eval may look into + // a with context when resolving "this". Other synthetic variables such + // as new.target may be resolved as VariableMode::kDynamicLocal due to + // bug v8:5405 , skipping them here serves as a workaround until a more + // thorough fix can be applied. + // TODO(v8:5405): Replace this check with a DCHECK when resolution of + // of synthetic variables does not go through this code path. + if (ScopeInfo::VariableIsSynthetic(*name)) { + maybe = Just(ABSENT); + } else { + LookupIterator it(object, name, object); + Maybe found = UnscopableLookup(&it); + if (found.IsNothing()) { + maybe = Nothing(); + } else { + // Luckily, consumers of |maybe| only care whether the property + // was absent or not, so we can return a dummy |NONE| value + // for its attributes when it was present. + maybe = Just(found.FromJust() ? NONE : ABSENT); + } + } + } else { + maybe = JSReceiver::GetPropertyAttributes(object, name); + } + + if (maybe.IsNothing()) return Handle(); + DCHECK(!isolate->has_pending_exception()); + *attributes = maybe.FromJust(); + + if (maybe.FromJust() != ABSENT) { + if (FLAG_trace_contexts) { + PrintF("=> found property in context object %p\n", + reinterpret_cast(object->ptr())); + } + return object; + } + } + + // 2. Check the context proper if it has slots. + if (context->IsFunctionContext() || context->IsBlockContext() || + context->IsScriptContext() || context->IsEvalContext() || + context->IsModuleContext() || context->IsCatchContext()) { + DisallowHeapAllocation no_gc; + // Use serialized scope information of functions and blocks to search + // for the context index. + ScopeInfo scope_info = context->scope_info(); + VariableMode mode; + InitializationFlag flag; + MaybeAssignedFlag maybe_assigned_flag; + int slot_index = ScopeInfo::ContextSlotIndex(scope_info, *name, &mode, + &flag, &maybe_assigned_flag); + DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS); + if (slot_index >= 0) { + if (FLAG_trace_contexts) { + PrintF("=> found local in context slot %d (mode = %hhu)\n", + slot_index, static_cast(mode)); + } + *index = slot_index; + *variable_mode = mode; + *init_flag = flag; + *attributes = GetAttributesForMode(mode); + return context; + } + + // Check the slot corresponding to the intermediate context holding + // only the function name variable. It's conceptually (and spec-wise) + // in an outer scope of the function's declaration scope. + if (follow_context_chain && context->IsFunctionContext()) { + int function_index = scope_info.FunctionContextSlotIndex(*name); + if (function_index >= 0) { + if (FLAG_trace_contexts) { + PrintF("=> found intermediate function in context slot %d\n", + function_index); + } + *index = function_index; + *attributes = READ_ONLY; + *init_flag = kCreatedInitialized; + *variable_mode = VariableMode::kConst; + if (is_sloppy_function_name != nullptr && + is_sloppy(scope_info.language_mode())) { + *is_sloppy_function_name = true; + } + return context; + } + } + + // Lookup variable in module imports and exports. + if (context->IsModuleContext()) { + VariableMode mode; + InitializationFlag flag; + MaybeAssignedFlag maybe_assigned_flag; + int cell_index = + scope_info.ModuleIndex(*name, &mode, &flag, &maybe_assigned_flag); + if (cell_index != 0) { + if (FLAG_trace_contexts) { + PrintF("=> found in module imports or exports\n"); + } + *index = cell_index; + *variable_mode = mode; + *init_flag = flag; + *attributes = ModuleDescriptor::GetCellIndexKind(cell_index) == + ModuleDescriptor::kExport + ? GetAttributesForMode(mode) + : READ_ONLY; + return handle(context->module(), isolate); + } + } + } else if (context->IsDebugEvaluateContext()) { + // Check materialized locals. + Object ext = context->get(EXTENSION_INDEX); + if (ext.IsJSReceiver()) { + Handle extension(JSReceiver::cast(ext), isolate); + LookupIterator it(extension, name, extension); + Maybe found = JSReceiver::HasProperty(&it); + if (found.FromMaybe(false)) { + *attributes = NONE; + return extension; + } + } + // Check the original context, but do not follow its context chain. + Object obj = context->get(WRAPPED_CONTEXT_INDEX); + if (obj.IsContext()) { + Handle context(Context::cast(obj), isolate); + Handle result = + Context::Lookup(context, name, DONT_FOLLOW_CHAINS, index, + attributes, init_flag, variable_mode); + if (!result.is_null()) return result; + } + // Check whitelist. Names that do not pass whitelist shall only resolve + // to with, script or native contexts up the context chain. + obj = context->get(WHITE_LIST_INDEX); + if (obj.IsStringSet()) { + failed_whitelist = + failed_whitelist || !StringSet::cast(obj).Has(isolate, name); + } + } + + // 3. Prepare to continue with the previous (next outermost) context. + if (context->IsNativeContext()) break; + + do { + context = Handle(context->previous(), isolate); + // If we come across a whitelist context, and the name is not + // whitelisted, then only consider with, script, module or native + // contexts. + } while (failed_whitelist && !context->IsScriptContext() && + !context->IsNativeContext() && !context->IsWithContext() && + !context->IsModuleContext()); + } while (follow_context_chain); + + if (FLAG_trace_contexts) { + PrintF("=> no property/slot found\n"); + } + return Handle::null(); +} + +void Context::AddOptimizedCode(Code code) { + DCHECK(IsNativeContext()); + DCHECK(code.kind() == Code::OPTIMIZED_FUNCTION); + DCHECK(code.next_code_link().IsUndefined()); + code.set_next_code_link(get(OPTIMIZED_CODE_LIST)); + set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER); +} + +void Context::SetOptimizedCodeListHead(Object head) { + DCHECK(IsNativeContext()); + set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER); +} + +Object Context::OptimizedCodeListHead() { + DCHECK(IsNativeContext()); + return get(OPTIMIZED_CODE_LIST); +} + +void Context::SetDeoptimizedCodeListHead(Object head) { + DCHECK(IsNativeContext()); + set(DEOPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER); +} + +Object Context::DeoptimizedCodeListHead() { + DCHECK(IsNativeContext()); + return get(DEOPTIMIZED_CODE_LIST); +} + +Handle Context::ErrorMessageForCodeGenerationFromStrings() { + Isolate* isolate = GetIsolate(); + Handle result(error_message_for_code_gen_from_strings(), isolate); + if (!result->IsUndefined(isolate)) return result; + return isolate->factory()->NewStringFromStaticChars( + "Code generation from strings disallowed for this context"); +} + +#define COMPARE_NAME(index, type, name) \ + if (string->IsOneByteEqualTo(StaticCharVector(#name))) return index; + +int Context::IntrinsicIndexForName(Handle string) { + NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(COMPARE_NAME); + return kNotFound; +} + +#undef COMPARE_NAME + +#define COMPARE_NAME(index, type, name) \ + if (strncmp(string, #name, length) == 0) return index; + +int Context::IntrinsicIndexForName(const unsigned char* unsigned_string, + int length) { + const char* string = reinterpret_cast(unsigned_string); + NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(COMPARE_NAME); + return kNotFound; +} + +#undef COMPARE_NAME + +#ifdef DEBUG + +bool Context::IsBootstrappingOrNativeContext(Isolate* isolate, Object object) { + // During bootstrapping we allow all objects to pass as global + // objects. This is necessary to fix circular dependencies. + return isolate->heap()->gc_state() != Heap::NOT_IN_GC || + isolate->bootstrapper()->IsActive() || object.IsNativeContext(); +} + +bool Context::IsBootstrappingOrValidParentContext(Object object, + Context child) { + // During bootstrapping we allow all objects to pass as + // contexts. This is necessary to fix circular dependencies. + if (child.GetIsolate()->bootstrapper()->IsActive()) return true; + if (!object.IsContext()) return false; + Context context = Context::cast(object); + return context.IsNativeContext() || context.IsScriptContext() || + context.IsModuleContext() || !child.IsModuleContext(); +} + +#endif + +void Context::ResetErrorsThrown() { + DCHECK(IsNativeContext()); + set_errors_thrown(Smi::FromInt(0)); +} + +void Context::IncrementErrorsThrown() { + DCHECK(IsNativeContext()); + + int previous_value = errors_thrown().value(); + set_errors_thrown(Smi::FromInt(previous_value + 1)); +} + +int Context::GetErrorsThrown() { return errors_thrown().value(); } + +STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); +STATIC_ASSERT(NativeContext::kScopeInfoOffset == + Context::OffsetOfElementAt(NativeContext::SCOPE_INFO_INDEX)); +STATIC_ASSERT(NativeContext::kPreviousOffset == + Context::OffsetOfElementAt(NativeContext::PREVIOUS_INDEX)); +STATIC_ASSERT(NativeContext::kExtensionOffset == + Context::OffsetOfElementAt(NativeContext::EXTENSION_INDEX)); +STATIC_ASSERT(NativeContext::kNativeContextOffset == + Context::OffsetOfElementAt(NativeContext::NATIVE_CONTEXT_INDEX)); + +STATIC_ASSERT(NativeContext::kStartOfStrongFieldsOffset == + Context::OffsetOfElementAt(-1)); +STATIC_ASSERT(NativeContext::kStartOfWeakFieldsOffset == + Context::OffsetOfElementAt(NativeContext::FIRST_WEAK_SLOT)); +STATIC_ASSERT(NativeContext::kMicrotaskQueueOffset == + Context::SizeFor(NativeContext::NATIVE_CONTEXT_SLOTS)); +STATIC_ASSERT(NativeContext::kSize == + (Context::SizeFor(NativeContext::NATIVE_CONTEXT_SLOTS) + + kSystemPointerSize)); + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h new file mode 100644 index 0000000000..d83e351550 --- /dev/null +++ b/deps/v8/src/objects/contexts.h @@ -0,0 +1,720 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_CONTEXTS_H_ +#define V8_OBJECTS_CONTEXTS_H_ + +#include "src/objects/fixed-array.h" +#include "src/objects/function-kind.h" +#include "torque-generated/field-offsets-tq.h" +// Has to be the last include (doesn't have include guards): +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +class JSGlobalObject; +class JSGlobalProxy; +class MicrotaskQueue; +class NativeContext; +class RegExpMatchInfo; + +enum ContextLookupFlags { + FOLLOW_CONTEXT_CHAIN = 1 << 0, + FOLLOW_PROTOTYPE_CHAIN = 1 << 1, + + DONT_FOLLOW_CHAINS = 0, + FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN, +}; + +// Heap-allocated activation contexts. +// +// Contexts are implemented as FixedArray-like objects having a fixed +// header with a set of common fields. +// +// Note: Context must have no virtual functions and Context objects +// must always be allocated via Heap::AllocateContext() or +// Factory::NewContext. + +#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \ + V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \ + async_function_promise_create) \ + V(IS_ARRAYLIKE, JSFunction, is_arraylike) \ + V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \ + V(MAKE_ERROR_INDEX, JSFunction, make_error) \ + V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \ + V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \ + V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \ + V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \ + V(OBJECT_CREATE, JSFunction, object_create) \ + V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \ + V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \ + V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \ + V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \ + V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \ + V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \ + V(OBJECT_KEYS, JSFunction, object_keys) \ + V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \ + V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \ + V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \ + V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \ + V(MATH_FLOOR_INDEX, JSFunction, math_floor) \ + V(MATH_POW_INDEX, JSFunction, math_pow) \ + V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \ + V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \ + promise_internal_constructor) \ + V(IS_PROMISE_INDEX, JSFunction, is_promise) \ + V(PROMISE_THEN_INDEX, JSFunction, promise_then) + +#define NATIVE_CONTEXT_FIELDS(V) \ + V(GLOBAL_PROXY_INDEX, JSGlobalProxy, global_proxy_object) \ + /* TODO(ishell): Actually we store exactly EmbedderDataArray here but */ \ + /* it's already UBSan-fiendly and doesn't require a star... So declare */ \ + /* it as a HeapObject for now. */ \ + V(EMBEDDER_DATA_INDEX, HeapObject, embedder_data) \ + /* Below is alpha-sorted */ \ + V(ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, \ + accessor_property_descriptor_map) \ + V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \ + V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \ + V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \ + V(ARRAY_BUFFER_NOINIT_FUN_INDEX, JSFunction, array_buffer_noinit_fun) \ + V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \ + V(ARRAY_JOIN_STACK_INDEX, HeapObject, array_join_stack) \ + V(ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX, Map, async_from_sync_iterator_map) \ + V(ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \ + async_function_await_reject_shared_fun) \ + V(ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \ + async_function_await_resolve_shared_fun) \ + V(ASYNC_FUNCTION_FUNCTION_INDEX, JSFunction, async_function_constructor) \ + V(ASYNC_FUNCTION_OBJECT_MAP_INDEX, Map, async_function_object_map) \ + V(ASYNC_GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \ + async_generator_function_function) \ + V(ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN, SharedFunctionInfo, \ + async_iterator_value_unwrap_shared_fun) \ + V(ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \ + async_generator_await_reject_shared_fun) \ + V(ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \ + async_generator_await_resolve_shared_fun) \ + V(ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN, SharedFunctionInfo, \ + async_generator_yield_resolve_shared_fun) \ + V(ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN, SharedFunctionInfo, \ + async_generator_return_resolve_shared_fun) \ + V(ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN, SharedFunctionInfo, \ + async_generator_return_closed_resolve_shared_fun) \ + V(ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN, SharedFunctionInfo, \ + async_generator_return_closed_reject_shared_fun) \ + V(ATOMICS_OBJECT, JSObject, atomics_object) \ + V(BIGINT_FUNCTION_INDEX, JSFunction, bigint_function) \ + V(BIGINT64_ARRAY_FUN_INDEX, JSFunction, bigint64_array_fun) \ + V(BIGUINT64_ARRAY_FUN_INDEX, JSFunction, biguint64_array_fun) \ + V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \ + V(BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX, Map, \ + bound_function_with_constructor_map) \ + V(BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX, Map, \ + bound_function_without_constructor_map) \ + V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \ + call_as_constructor_delegate) \ + V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \ + V(CALLSITE_FUNCTION_INDEX, JSFunction, callsite_function) \ + V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ + V(DATA_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, data_property_descriptor_map) \ + V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \ + V(DATE_FUNCTION_INDEX, JSFunction, date_function) \ + V(DEBUG_CONTEXT_ID_INDEX, Object, debug_context_id) \ + V(EMPTY_FUNCTION_INDEX, JSFunction, empty_function) \ + V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \ + error_message_for_code_gen_from_strings) \ + V(ERRORS_THROWN_INDEX, Smi, errors_thrown) \ + V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_binding_object) \ + V(EXTRAS_UTILS_OBJECT_INDEX, Object, extras_utils_object) \ + V(FAST_ALIASED_ARGUMENTS_MAP_INDEX, Map, fast_aliased_arguments_map) \ + V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, FixedArray, \ + fast_template_instantiations_cache) \ + V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \ + V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \ + V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function) \ + V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \ + generator_function_function) \ + V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \ + V(ASYNC_GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \ + async_generator_object_prototype_map) \ + V(INITIAL_ARRAY_ITERATOR_MAP_INDEX, Map, initial_array_iterator_map) \ + V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX, JSObject, \ + initial_array_iterator_prototype) \ + V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \ + V(INITIAL_ERROR_PROTOTYPE_INDEX, JSObject, initial_error_prototype) \ + V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype) \ + V(INITIAL_ASYNC_GENERATOR_PROTOTYPE_INDEX, JSObject, \ + initial_async_generator_prototype) \ + V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype) \ + V(INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX, JSObject, \ + initial_map_iterator_prototype) \ + V(INITIAL_MAP_PROTOTYPE_MAP_INDEX, Map, initial_map_prototype_map) \ + V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \ + V(INITIAL_SET_ITERATOR_PROTOTYPE_INDEX, JSObject, \ + initial_set_iterator_prototype) \ + V(INITIAL_SET_PROTOTYPE_INDEX, JSObject, initial_set_prototype) \ + V(INITIAL_SET_PROTOTYPE_MAP_INDEX, Map, initial_set_prototype_map) \ + V(INITIAL_STRING_ITERATOR_MAP_INDEX, Map, initial_string_iterator_map) \ + V(INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX, JSObject, \ + initial_string_iterator_prototype) \ + V(INITIAL_STRING_PROTOTYPE_INDEX, JSObject, initial_string_prototype) \ + V(INITIAL_WEAKMAP_PROTOTYPE_MAP_INDEX, Map, initial_weakmap_prototype_map) \ + V(INITIAL_WEAKSET_PROTOTYPE_MAP_INDEX, Map, initial_weakset_prototype_map) \ + V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \ + V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \ + V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \ + V(INTL_COLLATOR_FUNCTION_INDEX, JSFunction, intl_collator_function) \ + V(INTL_DATE_TIME_FORMAT_FUNCTION_INDEX, JSFunction, \ + intl_date_time_format_function) \ + V(INTL_NUMBER_FORMAT_FUNCTION_INDEX, JSFunction, \ + intl_number_format_function) \ + V(INTL_LOCALE_FUNCTION_INDEX, JSFunction, intl_locale_function) \ + V(INTL_SEGMENT_ITERATOR_MAP_INDEX, Map, intl_segment_iterator_map) \ + V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \ + V(JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX, Map, \ + js_array_packed_smi_elements_map) \ + V(JS_ARRAY_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map, \ + js_array_holey_smi_elements_map) \ + V(JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX, Map, js_array_packed_elements_map) \ + V(JS_ARRAY_HOLEY_ELEMENTS_MAP_INDEX, Map, js_array_holey_elements_map) \ + V(JS_ARRAY_PACKED_DOUBLE_ELEMENTS_MAP_INDEX, Map, \ + js_array_packed_double_elements_map) \ + V(JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \ + js_array_holey_double_elements_map) \ + V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \ + V(JS_MAP_MAP_INDEX, Map, js_map_map) \ + V(JS_MODULE_NAMESPACE_MAP, Map, js_module_namespace_map) \ + V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \ + V(JS_SET_MAP_INDEX, Map, js_set_map) \ + V(WEAK_CELL_MAP_INDEX, Map, weak_cell_map) \ + V(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_MAP_INDEX, Map, \ + js_finalization_group_cleanup_iterator_map) \ + V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \ + V(JS_WEAK_REF_MAP_INDEX, Map, js_weak_ref_map) \ + V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \ + V(MAP_CACHE_INDEX, Object, map_cache) \ + V(MAP_KEY_ITERATOR_MAP_INDEX, Map, map_key_iterator_map) \ + V(MAP_KEY_VALUE_ITERATOR_MAP_INDEX, Map, map_key_value_iterator_map) \ + V(MAP_VALUE_ITERATOR_MAP_INDEX, Map, map_value_iterator_map) \ + V(MATH_RANDOM_INDEX_INDEX, Smi, math_random_index) \ + V(MATH_RANDOM_STATE_INDEX, ByteArray, math_random_state) \ + V(MATH_RANDOM_CACHE_INDEX, FixedDoubleArray, math_random_cache) \ + V(MESSAGE_LISTENERS_INDEX, TemplateList, message_listeners) \ + V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \ + V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \ + V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \ + V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \ + V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \ + V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \ + V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \ + V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \ + V(PROXY_MAP_INDEX, Map, proxy_map) \ + V(PROXY_REVOCABLE_RESULT_MAP_INDEX, Map, proxy_revocable_result_map) \ + V(PROXY_REVOKE_SHARED_FUN, SharedFunctionInfo, proxy_revoke_shared_fun) \ + V(PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN, SharedFunctionInfo, \ + promise_get_capabilities_executor_shared_fun) \ + V(PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX, SharedFunctionInfo, \ + promise_capability_default_reject_shared_fun) \ + V(PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX, SharedFunctionInfo, \ + promise_capability_default_resolve_shared_fun) \ + V(PROMISE_THEN_FINALLY_SHARED_FUN, SharedFunctionInfo, \ + promise_then_finally_shared_fun) \ + V(PROMISE_CATCH_FINALLY_SHARED_FUN, SharedFunctionInfo, \ + promise_catch_finally_shared_fun) \ + V(PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN, SharedFunctionInfo, \ + promise_value_thunk_finally_shared_fun) \ + V(PROMISE_THROWER_FINALLY_SHARED_FUN, SharedFunctionInfo, \ + promise_thrower_finally_shared_fun) \ + V(PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN, SharedFunctionInfo, \ + promise_all_resolve_element_shared_fun) \ + V(PROMISE_ALL_SETTLED_RESOLVE_ELEMENT_SHARED_FUN, SharedFunctionInfo, \ + promise_all_settled_resolve_element_shared_fun) \ + V(PROMISE_ALL_SETTLED_REJECT_ELEMENT_SHARED_FUN, SharedFunctionInfo, \ + promise_all_settled_reject_element_shared_fun) \ + V(PROMISE_PROTOTYPE_INDEX, JSObject, promise_prototype) \ + V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \ + V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \ + V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info) \ + V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \ + V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \ + initial_regexp_string_iterator_prototype_map) \ + V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \ + V(REGEXP_PROTOTYPE_INDEX, JSObject, regexp_prototype) \ + V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \ + V(SECURITY_TOKEN_INDEX, Object, security_token) \ + V(SERIALIZED_OBJECTS, FixedArray, serialized_objects) \ + V(SET_VALUE_ITERATOR_MAP_INDEX, Map, set_value_iterator_map) \ + V(SET_KEY_VALUE_ITERATOR_MAP_INDEX, Map, set_key_value_iterator_map) \ + V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \ + V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \ + V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \ + V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \ + V(SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, \ + slow_object_with_null_prototype_map) \ + V(SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP, Map, \ + slow_object_with_object_prototype_map) \ + V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, SimpleNumberDictionary, \ + slow_template_instantiations_cache) \ + /* All *_FUNCTION_MAP_INDEX definitions used by Context::FunctionMapIndex */ \ + /* must remain together. */ \ + V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \ + V(SLOPPY_FUNCTION_WITH_NAME_MAP_INDEX, Map, sloppy_function_with_name_map) \ + V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \ + sloppy_function_without_prototype_map) \ + V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \ + sloppy_function_with_readonly_prototype_map) \ + V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \ + V(STRICT_FUNCTION_WITH_NAME_MAP_INDEX, Map, strict_function_with_name_map) \ + V(STRICT_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \ + strict_function_with_readonly_prototype_map) \ + V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \ + strict_function_without_prototype_map) \ + V(METHOD_WITH_NAME_MAP_INDEX, Map, method_with_name_map) \ + V(METHOD_WITH_HOME_OBJECT_MAP_INDEX, Map, method_with_home_object_map) \ + V(METHOD_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \ + method_with_name_and_home_object_map) \ + V(ASYNC_FUNCTION_MAP_INDEX, Map, async_function_map) \ + V(ASYNC_FUNCTION_WITH_NAME_MAP_INDEX, Map, async_function_with_name_map) \ + V(ASYNC_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \ + async_function_with_home_object_map) \ + V(ASYNC_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \ + async_function_with_name_and_home_object_map) \ + V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \ + V(GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX, Map, \ + generator_function_with_name_map) \ + V(GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \ + generator_function_with_home_object_map) \ + V(GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \ + generator_function_with_name_and_home_object_map) \ + V(ASYNC_GENERATOR_FUNCTION_MAP_INDEX, Map, async_generator_function_map) \ + V(ASYNC_GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX, Map, \ + async_generator_function_with_name_map) \ + V(ASYNC_GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \ + async_generator_function_with_home_object_map) \ + V(ASYNC_GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \ + async_generator_function_with_name_and_home_object_map) \ + V(CLASS_FUNCTION_MAP_INDEX, Map, class_function_map) \ + V(STRING_FUNCTION_INDEX, JSFunction, string_function) \ + V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \ + V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \ + V(WASM_EXPORTED_FUNCTION_MAP_INDEX, Map, wasm_exported_function_map) \ + V(WASM_EXCEPTION_CONSTRUCTOR_INDEX, JSFunction, wasm_exception_constructor) \ + V(WASM_FUNCTION_CONSTRUCTOR_INDEX, JSFunction, wasm_function_constructor) \ + V(WASM_GLOBAL_CONSTRUCTOR_INDEX, JSFunction, wasm_global_constructor) \ + V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \ + V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \ + V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \ + V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \ + V(TEMPLATE_WEAKMAP_INDEX, HeapObject, template_weakmap) \ + V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \ + V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \ + V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \ + V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \ + V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \ + V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \ + V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \ + V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \ + V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \ + V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \ + V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \ + V(ERROR_TO_STRING, JSFunction, error_to_string) \ + V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \ + V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \ + V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \ + V(MAP_DELETE_INDEX, JSFunction, map_delete) \ + V(MAP_GET_INDEX, JSFunction, map_get) \ + V(MAP_HAS_INDEX, JSFunction, map_has) \ + V(MAP_SET_INDEX, JSFunction, map_set) \ + V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \ + V(OBJECT_TO_STRING, JSFunction, object_to_string) \ + V(PROMISE_ALL_INDEX, JSFunction, promise_all) \ + V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \ + V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \ + V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \ + V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \ + V(SET_ADD_INDEX, JSFunction, set_add) \ + V(SET_DELETE_INDEX, JSFunction, set_delete) \ + V(SET_HAS_INDEX, JSFunction, set_has) \ + V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \ + V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \ + V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \ + V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \ + wasm_compile_error_function) \ + V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \ + V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \ + wasm_runtime_error_function) \ + V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \ + V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \ + V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \ + NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) + +// A table of all script contexts. Every loaded top-level script with top-level +// lexical declarations contributes its ScriptContext into this table. +// +// The table is a fixed array, its first slot is the current used count and +// the subsequent slots 1..used contain ScriptContexts. +class ScriptContextTable : public FixedArray { + public: + DECL_CAST(ScriptContextTable) + + struct LookupResult { + int context_index; + int slot_index; + VariableMode mode; + InitializationFlag init_flag; + MaybeAssignedFlag maybe_assigned_flag; + }; + + inline int used() const; + inline void set_used(int used); + + static inline Handle GetContext(Isolate* isolate, + Handle table, + int i); + inline Context get_context(int i) const; + + // Lookup a variable `name` in a ScriptContextTable. + // If it returns true, the variable is found and `result` contains + // valid information about its location. + // If it returns false, `result` is untouched. + V8_WARN_UNUSED_RESULT + V8_EXPORT_PRIVATE static bool Lookup(Isolate* isolate, + ScriptContextTable table, String name, + LookupResult* result); + + V8_WARN_UNUSED_RESULT + static Handle Extend(Handle table, + Handle script_context); + + static const int kUsedSlotIndex = 0; + static const int kFirstContextSlotIndex = 1; + static const int kMinLength = kFirstContextSlotIndex; + + OBJECT_CONSTRUCTORS(ScriptContextTable, FixedArray); +}; + +// JSFunctions are pairs (context, function code), sometimes also called +// closures. A Context object is used to represent function contexts and +// dynamically pushed 'with' contexts (or 'scopes' in ECMA-262 speak). +// +// At runtime, the contexts build a stack in parallel to the execution +// stack, with the top-most context being the current context. All contexts +// have the following slots: +// +// [ scope_info ] This is the scope info describing the current context. It +// contains the names of statically allocated context slots, +// and stack-allocated locals. The names are needed for +// dynamic lookups in the presence of 'with' or 'eval', and +// for the debugger. +// +// [ previous ] A pointer to the previous context. +// +// [ extension ] Additional data. +// +// For native contexts, it contains the global object. +// For module contexts, it contains the module object. +// For await contexts, it contains the generator object. +// For block contexts, it may contain an "extension object". +// For with contexts, it contains an "extension object". +// +// An "extension object" is used to dynamically extend a +// context with additional variables, namely in the +// implementation of the 'with' construct and the 'eval' +// construct. For instance, Context::Lookup also searches +// the extension object for properties. (Storing the +// extension object is the original purpose of this context +// slot, hence the name.) +// +// [ native_context ] A pointer to the native context. +// +// In addition, function contexts may have statically allocated context slots +// to store local variables/functions that are accessed from inner functions +// (via static context addresses) or through 'eval' (dynamic context lookups). +// The native context contains additional slots for fast access to native +// properties. +// +// Finally, with Harmony scoping, the JSFunction representing a top level +// script will have the ScriptContext rather than a FunctionContext. +// Script contexts from all top-level scripts are gathered in +// ScriptContextTable. + +class Context : public HeapObject { + public: + NEVER_READ_ONLY_SPACE + + DECL_CAST(Context) + + // [length]: length of the context. + V8_INLINE int length() const; + V8_INLINE void set_length(int value); + + // Setter and getter for elements. + V8_INLINE Object get(int index) const; + V8_INLINE void set(int index, Object value); + // Setter with explicit barrier mode. + V8_INLINE void set(int index, Object value, WriteBarrierMode mode); + + DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, + TORQUE_GENERATED_CONTEXT_FIELDS) + // TODO(v8:8989): [torque] Support marker constants. + /* TODO(ishell): remove this fixedArray-like header size. */ + static const int kHeaderSize = kScopeInfoOffset; + static const int kStartOfTaggedFieldsOffset = kScopeInfoOffset; + /* Header size. */ \ + /* TODO(ishell): use this as header size once MIN_CONTEXT_SLOTS */ \ + /* is removed in favour of offset-based access to common fields. */ \ + static const int kTodoHeaderSize = kSize; + + // Garbage collection support. + V8_INLINE static constexpr int SizeFor(int length) { + // TODO(ishell): switch to kTodoHeaderSize based approach once we no longer + // reference common Context fields via index + return kHeaderSize + length * kTaggedSize; + } + + // Code Generation support. + // Offset of the element from the beginning of object. + V8_INLINE static constexpr int OffsetOfElementAt(int index) { + return SizeFor(index); + } + // Offset of the element from the heap object pointer. + V8_INLINE static constexpr int SlotOffset(int index) { + return SizeFor(index) - kHeapObjectTag; + } + + // TODO(ishell): eventually migrate to the offset based access instead of + // index-based. + // The default context slot layout; indices are FixedArray slot indices. + enum Field { + // TODO(shell): use offset-based approach for accessing common values. + // These slots are in all contexts. + SCOPE_INFO_INDEX, + PREVIOUS_INDEX, + EXTENSION_INDEX, + NATIVE_CONTEXT_INDEX, + +// These slots are only in native contexts. +#define NATIVE_CONTEXT_SLOT(index, type, name) index, + NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_SLOT) +#undef NATIVE_CONTEXT_SLOT + + // Properties from here are treated as weak references by the full GC. + // Scavenge treats them as strong references. + OPTIMIZED_CODE_LIST, // Weak. + DEOPTIMIZED_CODE_LIST, // Weak. + NEXT_CONTEXT_LINK, // Weak. + + // Total number of slots. + NATIVE_CONTEXT_SLOTS, + FIRST_WEAK_SLOT = OPTIMIZED_CODE_LIST, + FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX, + + // TODO(shell): Remove, once it becomes zero + MIN_CONTEXT_SLOTS = GLOBAL_PROXY_INDEX, + + // This slot holds the thrown value in catch contexts. + THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS, + + // These slots hold values in debug evaluate contexts. + WRAPPED_CONTEXT_INDEX = MIN_CONTEXT_SLOTS, + WHITE_LIST_INDEX = MIN_CONTEXT_SLOTS + 1 + }; + + // A region of native context entries containing maps for functions created + // by Builtins::kFastNewClosure. + static const int FIRST_FUNCTION_MAP_INDEX = SLOPPY_FUNCTION_MAP_INDEX; + static const int LAST_FUNCTION_MAP_INDEX = CLASS_FUNCTION_MAP_INDEX; + + static const int kNoContext = 0; + static const int kInvalidContext = 1; + + void ResetErrorsThrown(); + void IncrementErrorsThrown(); + int GetErrorsThrown(); + + // Direct slot access. + inline void set_scope_info(ScopeInfo scope_info); + + inline Object unchecked_previous(); + inline Context previous(); + inline void set_previous(Context context); + + inline Object next_context_link(); + + inline bool has_extension(); + inline HeapObject extension(); + inline void set_extension(HeapObject object); + JSObject extension_object(); + JSReceiver extension_receiver(); + V8_EXPORT_PRIVATE ScopeInfo scope_info(); + + // Find the module context (assuming there is one) and return the associated + // module object. + Module module(); + + // Get the context where var declarations will be hoisted to, which + // may be the context itself. + Context declaration_context(); + bool is_declaration_context(); + + // Get the next closure's context on the context chain. + Context closure_context(); + + // Returns a JSGlobalProxy object or null. + V8_EXPORT_PRIVATE JSGlobalProxy global_proxy(); + void set_global_proxy(JSGlobalProxy global); + + // Get the JSGlobalObject object. + V8_EXPORT_PRIVATE JSGlobalObject global_object(); + + // Get the script context by traversing the context chain. + Context script_context(); + + // Compute the native context. + inline NativeContext native_context() const; + inline void set_native_context(NativeContext context); + + // Predicates for context types. IsNativeContext is already defined on + // Object. + inline bool IsFunctionContext() const; + inline bool IsCatchContext() const; + inline bool IsWithContext() const; + inline bool IsDebugEvaluateContext() const; + inline bool IsAwaitContext() const; + inline bool IsBlockContext() const; + inline bool IsModuleContext() const; + inline bool IsEvalContext() const; + inline bool IsScriptContext() const; + + inline bool HasSameSecurityTokenAs(Context that) const; + + // The native context also stores a list of all optimized code and a + // list of all deoptimized code, which are needed by the deoptimizer. + V8_EXPORT_PRIVATE void AddOptimizedCode(Code code); + void SetOptimizedCodeListHead(Object head); + Object OptimizedCodeListHead(); + void SetDeoptimizedCodeListHead(Object head); + Object DeoptimizedCodeListHead(); + + Handle ErrorMessageForCodeGenerationFromStrings(); + + static int IntrinsicIndexForName(Handle name); + static int IntrinsicIndexForName(const unsigned char* name, int length); + +#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \ + inline void set_##name(type value); \ + inline bool is_##name(type value) const; \ + inline type name() const; + NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS) +#undef NATIVE_CONTEXT_FIELD_ACCESSORS + + // Lookup the slot called name, starting with the current context. + // There are three possibilities: + // + // 1) result->IsContext(): + // The binding was found in a context. *index is always the + // non-negative slot index. *attributes is NONE for var and let + // declarations, READ_ONLY for const declarations (never ABSENT). + // + // 2) result->IsJSObject(): + // The binding was found as a named property in a context extension + // object (i.e., was introduced via eval), as a property on the subject + // of with, or as a property of the global object. *index is -1 and + // *attributes is not ABSENT. + // + // 3) result->IsModule(): + // The binding was found in module imports or exports. + // *attributes is never ABSENT. imports are READ_ONLY. + // + // 4) result.is_null(): + // There was no binding found, *index is always -1 and *attributes is + // always ABSENT. + static Handle Lookup(Handle context, Handle name, + ContextLookupFlags flags, int* index, + PropertyAttributes* attributes, + InitializationFlag* init_flag, + VariableMode* variable_mode, + bool* is_sloppy_function_name = nullptr); + + static inline int FunctionMapIndex(LanguageMode language_mode, + FunctionKind kind, bool has_shared_name, + bool needs_home_object); + + static int ArrayMapIndex(ElementsKind elements_kind) { + DCHECK(IsFastElementsKind(elements_kind)); + return elements_kind + FIRST_JS_ARRAY_MAP_SLOT; + } + + inline Map GetInitialJSArrayMap(ElementsKind kind) const; + + static const int kNotFound = -1; + + // Dispatched behavior. + DECL_PRINTER(Context) + DECL_VERIFIER(Context) + + using BodyDescriptor = FlexibleBodyDescriptor; + + private: +#ifdef DEBUG + // Bootstrapping-aware type checks. + V8_EXPORT_PRIVATE static bool IsBootstrappingOrNativeContext(Isolate* isolate, + Object object); + static bool IsBootstrappingOrValidParentContext(Object object, Context kid); +#endif + + OBJECT_CONSTRUCTORS(Context, HeapObject); +}; + +class NativeContext : public Context { + public: + DECL_CAST(NativeContext) + // TODO(neis): Move some stuff from Context here. + + // [microtask_queue]: pointer to the MicrotaskQueue object. + DECL_PRIMITIVE_ACCESSORS(microtask_queue, MicrotaskQueue*) + + // Dispatched behavior. + DECL_PRINTER(NativeContext) + DECL_VERIFIER(NativeContext) + + // Layout description. +#define NATIVE_CONTEXT_FIELDS_DEF(V) \ + /* TODO(ishell): move definition of common context offsets to Context. */ \ + V(kStartOfNativeContextFieldsOffset, \ + (FIRST_WEAK_SLOT - MIN_CONTEXT_SLOTS) * kTaggedSize) \ + V(kEndOfStrongFieldsOffset, 0) \ + V(kStartOfWeakFieldsOffset, \ + (NATIVE_CONTEXT_SLOTS - FIRST_WEAK_SLOT) * kTaggedSize) \ + V(kEndOfWeakFieldsOffset, 0) \ + V(kEndOfNativeContextFieldsOffset, 0) \ + V(kEndOfTaggedFieldsOffset, 0) \ + /* Raw data. */ \ + V(kMicrotaskQueueOffset, kSystemPointerSize) \ + /* Total size. */ \ + V(kSize, 0) + + DEFINE_FIELD_OFFSET_CONSTANTS(Context::kTodoHeaderSize, + NATIVE_CONTEXT_FIELDS_DEF) +#undef NATIVE_CONTEXT_FIELDS_DEF + + class BodyDescriptor; + + private: + STATIC_ASSERT(OffsetOfElementAt(EMBEDDER_DATA_INDEX) == + Internals::kNativeContextEmbedderDataOffset); + + OBJECT_CONSTRUCTORS(NativeContext, Context); +}; + +using ContextField = Context::Field; + +} // namespace internal +} // namespace v8 + +#include "src/objects/object-macros-undef.h" + +#endif // V8_OBJECTS_CONTEXTS_H_ diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h index 1be71ce8fa..f9496cc342 100644 --- a/deps/v8/src/objects/data-handler-inl.h +++ b/deps/v8/src/objects/data-handler-inl.h @@ -5,8 +5,8 @@ #ifndef V8_OBJECTS_DATA_HANDLER_INL_H_ #define V8_OBJECTS_DATA_HANDLER_INL_H_ -#include "src/objects-inl.h" // Needed for write barriers #include "src/objects/data-handler.h" +#include "src/objects/objects-inl.h" // Needed for write barriers // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -22,15 +22,15 @@ ACCESSORS(DataHandler, smi_handler, Object, kSmiHandlerOffset) ACCESSORS(DataHandler, validity_cell, Object, kValidityCellOffset) int DataHandler::data_field_count() const { - return (map()->instance_size() - kSizeWithData0) / kTaggedSize; + return (map().instance_size() - kSizeWithData0) / kTaggedSize; } WEAK_ACCESSORS_CHECKED(DataHandler, data1, kData1Offset, - map()->instance_size() >= kSizeWithData1) + map().instance_size() >= kSizeWithData1) WEAK_ACCESSORS_CHECKED(DataHandler, data2, kData2Offset, - map()->instance_size() >= kSizeWithData2) + map().instance_size() >= kSizeWithData2) WEAK_ACCESSORS_CHECKED(DataHandler, data3, kData3Offset, - map()->instance_size() >= kSizeWithData3) + map().instance_size() >= kSizeWithData3) } // namespace internal } // namespace v8 diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h index d445174cbc..273f710c3b 100644 --- a/deps/v8/src/objects/debug-objects-inl.h +++ b/deps/v8/src/objects/debug-objects-inl.h @@ -8,8 +8,8 @@ #include "src/objects/debug-objects.h" #include "src/heap/heap-write-barrier-inl.h" -#include "src/objects-inl.h" #include "src/objects/code-inl.h" +#include "src/objects/objects-inl.h" #include "src/objects/shared-function-info.h" // Has to be the last include (doesn't have include guards): @@ -37,7 +37,7 @@ ACCESSORS(DebugInfo, script, Object, kScriptOffset) ACCESSORS(DebugInfo, original_bytecode_array, Object, kOriginalBytecodeArrayOffset) ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayOffset) -ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateOffset) +ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsOffset) ACCESSORS(DebugInfo, coverage_info, Object, kCoverageInfoOffset) BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, side_effect_state, @@ -56,9 +56,9 @@ SMI_ACCESSORS(BreakPoint, id, kIdOffset) ACCESSORS(BreakPoint, condition, String, kConditionOffset) bool DebugInfo::HasInstrumentedBytecodeArray() { - DCHECK_EQ(debug_bytecode_array()->IsBytecodeArray(), - original_bytecode_array()->IsBytecodeArray()); - return debug_bytecode_array()->IsBytecodeArray(); + DCHECK_EQ(debug_bytecode_array().IsBytecodeArray(), + original_bytecode_array().IsBytecodeArray()); + return debug_bytecode_array().IsBytecodeArray(); } BytecodeArray DebugInfo::OriginalBytecodeArray() { @@ -68,7 +68,7 @@ BytecodeArray DebugInfo::OriginalBytecodeArray() { BytecodeArray DebugInfo::DebugBytecodeArray() { DCHECK(HasInstrumentedBytecodeArray()); - DCHECK_EQ(shared()->GetDebugBytecodeArray(), debug_bytecode_array()); + DCHECK_EQ(shared().GetDebugBytecodeArray(), debug_bytecode_array()); return BytecodeArray::cast(debug_bytecode_array()); } diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc index 760edbfbcf..5087918e75 100644 --- a/deps/v8/src/objects/debug-objects.cc +++ b/deps/v8/src/objects/debug-objects.cc @@ -5,9 +5,9 @@ #include "src/objects/debug-objects.h" #include "src/debug/debug-evaluate.h" -#include "src/handles-inl.h" +#include "src/handles/handles-inl.h" #include "src/objects/debug-objects-inl.h" -#include "src/ostreams.h" +#include "src/utils/ostreams.h" namespace v8 { namespace internal { @@ -31,7 +31,7 @@ void DebugInfo::ClearBreakInfo(Isolate* isolate) { if (HasInstrumentedBytecodeArray()) { // Reset function's bytecode array field to point to the original bytecode // array. - shared()->SetDebugBytecodeArray(OriginalBytecodeArray()); + shared().SetDebugBytecodeArray(OriginalBytecodeArray()); // If the function is currently running on the stack, we need to update the // bytecode pointers on the stack so they point to the original @@ -80,19 +80,18 @@ bool DebugInfo::HasBreakPoint(Isolate* isolate, int source_position) { // If there is no break point info object or no break points in the break // point info object there is no break point at this code offset. - if (break_point_info->IsUndefined(isolate)) return false; - return BreakPointInfo::cast(break_point_info)->GetBreakPointCount(isolate) > - 0; + if (break_point_info.IsUndefined(isolate)) return false; + return BreakPointInfo::cast(break_point_info).GetBreakPointCount(isolate) > 0; } // Get the break point info object for this source position. Object DebugInfo::GetBreakPointInfo(Isolate* isolate, int source_position) { DCHECK(HasBreakInfo()); - for (int i = 0; i < break_points()->length(); i++) { - if (!break_points()->get(i)->IsUndefined(isolate)) { + for (int i = 0; i < break_points().length(); i++) { + if (!break_points().get(i).IsUndefined(isolate)) { BreakPointInfo break_point_info = - BreakPointInfo::cast(break_points()->get(i)); - if (break_point_info->source_position() == source_position) { + BreakPointInfo::cast(break_points().get(i)); + if (break_point_info.source_position() == source_position) { return break_point_info; } } @@ -103,10 +102,10 @@ Object DebugInfo::GetBreakPointInfo(Isolate* isolate, int source_position) { bool DebugInfo::ClearBreakPoint(Isolate* isolate, Handle debug_info, Handle break_point) { DCHECK(debug_info->HasBreakInfo()); - for (int i = 0; i < debug_info->break_points()->length(); i++) { - if (debug_info->break_points()->get(i)->IsUndefined(isolate)) continue; + for (int i = 0; i < debug_info->break_points().length(); i++) { + if (debug_info->break_points().get(i).IsUndefined(isolate)) continue; Handle break_point_info = Handle( - BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate); + BreakPointInfo::cast(debug_info->break_points().get(i)), isolate); if (BreakPointInfo::HasBreakPoint(isolate, break_point_info, break_point)) { BreakPointInfo::ClearBreakPoint(isolate, break_point_info, break_point); return true; @@ -131,8 +130,8 @@ void DebugInfo::SetBreakPoint(Isolate* isolate, Handle debug_info, // break points before. Try to find a free slot. static const int kNoBreakPointInfo = -1; int index = kNoBreakPointInfo; - for (int i = 0; i < debug_info->break_points()->length(); i++) { - if (debug_info->break_points()->get(i)->IsUndefined(isolate)) { + for (int i = 0; i < debug_info->break_points().length(); i++) { + if (debug_info->break_points().get(i).IsUndefined(isolate)) { index = i; break; } @@ -157,7 +156,7 @@ void DebugInfo::SetBreakPoint(Isolate* isolate, Handle debug_info, Handle new_break_point_info = isolate->factory()->NewBreakPointInfo(source_position); BreakPointInfo::SetBreakPoint(isolate, new_break_point_info, break_point); - debug_info->break_points()->set(index, *new_break_point_info); + debug_info->break_points().set(index, *new_break_point_info); } // Get the break point objects for a source position. @@ -165,10 +164,10 @@ Handle DebugInfo::GetBreakPoints(Isolate* isolate, int source_position) { DCHECK(HasBreakInfo()); Object break_point_info = GetBreakPointInfo(isolate, source_position); - if (break_point_info->IsUndefined(isolate)) { + if (break_point_info.IsUndefined(isolate)) { return isolate->factory()->undefined_value(); } - return Handle(BreakPointInfo::cast(break_point_info)->break_points(), + return Handle(BreakPointInfo::cast(break_point_info).break_points(), isolate); } @@ -176,11 +175,11 @@ Handle DebugInfo::GetBreakPoints(Isolate* isolate, int DebugInfo::GetBreakPointCount(Isolate* isolate) { DCHECK(HasBreakInfo()); int count = 0; - for (int i = 0; i < break_points()->length(); i++) { - if (!break_points()->get(i)->IsUndefined(isolate)) { + for (int i = 0; i < break_points().length(); i++) { + if (!break_points().get(i).IsUndefined(isolate)) { BreakPointInfo break_point_info = - BreakPointInfo::cast(break_points()->get(i)); - count += break_point_info->GetBreakPointCount(isolate); + BreakPointInfo::cast(break_points().get(i)); + count += break_point_info.GetBreakPointCount(isolate); } } return count; @@ -190,10 +189,10 @@ Handle DebugInfo::FindBreakPointInfo(Isolate* isolate, Handle debug_info, Handle break_point) { DCHECK(debug_info->HasBreakInfo()); - for (int i = 0; i < debug_info->break_points()->length(); i++) { - if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) { + for (int i = 0; i < debug_info->break_points().length(); i++) { + if (!debug_info->break_points().get(i).IsUndefined(isolate)) { Handle break_point_info = Handle( - BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate); + BreakPointInfo::cast(debug_info->break_points().get(i)), isolate); if (BreakPointInfo::HasBreakPoint(isolate, break_point_info, break_point)) { return break_point_info; @@ -228,7 +227,7 @@ DebugInfo::SideEffectState DebugInfo::GetSideEffectState(Isolate* isolate) { namespace { bool IsEqual(BreakPoint break_point1, BreakPoint break_point2) { - return break_point1->id() == break_point2->id(); + return break_point1.id() == break_point2.id(); } } // namespace @@ -237,9 +236,9 @@ void BreakPointInfo::ClearBreakPoint(Isolate* isolate, Handle break_point_info, Handle break_point) { // If there are no break points just ignore. - if (break_point_info->break_points()->IsUndefined(isolate)) return; + if (break_point_info->break_points().IsUndefined(isolate)) return; // If there is a single break point clear it if it is the same. - if (!break_point_info->break_points()->IsFixedArray()) { + if (!break_point_info->break_points().IsFixedArray()) { if (IsEqual(BreakPoint::cast(break_point_info->break_points()), *break_point)) { break_point_info->set_break_points( @@ -248,7 +247,7 @@ void BreakPointInfo::ClearBreakPoint(Isolate* isolate, return; } // If there are multiple break points shrink the array - DCHECK(break_point_info->break_points()->IsFixedArray()); + DCHECK(break_point_info->break_points().IsFixedArray()); Handle old_array = Handle( FixedArray::cast(break_point_info->break_points()), isolate); Handle new_array = @@ -271,14 +270,14 @@ void BreakPointInfo::SetBreakPoint(Isolate* isolate, Handle break_point_info, Handle break_point) { // If there was no break point objects before just set it. - if (break_point_info->break_points()->IsUndefined(isolate)) { + if (break_point_info->break_points().IsUndefined(isolate)) { break_point_info->set_break_points(*break_point); return; } // If the break point object is the same as before just ignore. if (break_point_info->break_points() == *break_point) return; // If there was one break point object before replace with array. - if (!break_point_info->break_points()->IsFixedArray()) { + if (!break_point_info->break_points().IsFixedArray()) { Handle array = isolate->factory()->NewFixedArray(2); array->set(0, break_point_info->break_points()); array->set(1, *break_point); @@ -304,18 +303,18 @@ bool BreakPointInfo::HasBreakPoint(Isolate* isolate, Handle break_point_info, Handle break_point) { // No break point. - if (break_point_info->break_points()->IsUndefined(isolate)) { + if (break_point_info->break_points().IsUndefined(isolate)) { return false; } // Single break point. - if (!break_point_info->break_points()->IsFixedArray()) { + if (!break_point_info->break_points().IsFixedArray()) { return IsEqual(BreakPoint::cast(break_point_info->break_points()), *break_point); } // Multiple break points. FixedArray array = FixedArray::cast(break_point_info->break_points()); - for (int i = 0; i < array->length(); i++) { - if (IsEqual(BreakPoint::cast(array->get(i)), *break_point)) { + for (int i = 0; i < array.length(); i++) { + if (IsEqual(BreakPoint::cast(array.get(i)), *break_point)) { return true; } } @@ -325,11 +324,11 @@ bool BreakPointInfo::HasBreakPoint(Isolate* isolate, // Get the number of break points. int BreakPointInfo::GetBreakPointCount(Isolate* isolate) { // No break point. - if (break_points()->IsUndefined(isolate)) return 0; + if (break_points().IsUndefined(isolate)) return 0; // Single break point. - if (!break_points()->IsFixedArray()) return 1; + if (!break_points().IsFixedArray()) return 1; // Multiple break points. - return FixedArray::cast(break_points())->length(); + return FixedArray::cast(break_points()).length(); } int CoverageInfo::SlotCount() const { diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h index 9839f405f6..243caaa526 100644 --- a/deps/v8/src/objects/debug-objects.h +++ b/deps/v8/src/objects/debug-objects.h @@ -5,8 +5,8 @@ #ifndef V8_OBJECTS_DEBUG_OBJECTS_H_ #define V8_OBJECTS_DEBUG_OBJECTS_H_ -#include "src/objects.h" #include "src/objects/fixed-array.h" +#include "src/objects/objects.h" #include "src/objects/struct.h" // Has to be the last include (doesn't have include guards): @@ -168,21 +168,9 @@ class DebugInfo : public Struct { DECL_PRINTER(DebugInfo) DECL_VERIFIER(DebugInfo) -// Layout description. -#define DEBUG_INFO_FIELDS(V) \ - V(kSharedFunctionInfoOffset, kTaggedSize) \ - V(kDebuggerHintsOffset, kTaggedSize) \ - V(kScriptOffset, kTaggedSize) \ - V(kOriginalBytecodeArrayOffset, kTaggedSize) \ - V(kDebugBytecodeArrayOffset, kTaggedSize) \ - V(kBreakPointsStateOffset, kTaggedSize) \ - V(kFlagsOffset, kTaggedSize) \ - V(kCoverageInfoOffset, kTaggedSize) \ - /* Total size. */ \ - V(kSize, 0) - - DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, DEBUG_INFO_FIELDS) -#undef DEBUG_INFO_FIELDS + // Layout description. + DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, + TORQUE_GENERATED_DEBUG_INFO_FIELDS) static const int kEstimatedNofBreakPointsInFunction = 4; @@ -247,11 +235,6 @@ class CoverageInfo : public FixedArray { // Print debug info. void Print(std::unique_ptr function_name); - private: - static int FirstIndexForSlot(int slot_index) { - return kFirstSlotIndex + slot_index * kSlotIndexCount; - } - static const int kFirstSlotIndex = 0; // Each slot is assigned a group of indices starting at kFirstSlotIndex. @@ -259,7 +242,17 @@ class CoverageInfo : public FixedArray { static const int kSlotStartSourcePositionIndex = 0; static const int kSlotEndSourcePositionIndex = 1; static const int kSlotBlockCountIndex = 2; - static const int kSlotIndexCount = 3; + static const int kSlotPaddingIndex = 3; // Padding to make the index count 4. + static const int kSlotIndexCount = 4; + + static const int kSlotIndexCountLog2 = 2; + static const int kSlotIndexCountMask = (kSlotIndexCount - 1); + STATIC_ASSERT(1 << kSlotIndexCountLog2 == kSlotIndexCount); + + private: + static int FirstIndexForSlot(int slot_index) { + return kFirstSlotIndex + slot_index * kSlotIndexCount; + } OBJECT_CONSTRUCTORS(CoverageInfo, FixedArray); }; diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h index a59d4e5a75..1cd64c1bf1 100644 --- a/deps/v8/src/objects/descriptor-array-inl.h +++ b/deps/v8/src/objects/descriptor-array-inl.h @@ -7,16 +7,16 @@ #include "src/objects/descriptor-array.h" -#include "src/field-type.h" +#include "src/execution/isolate.h" +#include "src/handles/maybe-handles-inl.h" #include "src/heap/heap-write-barrier.h" #include "src/heap/heap.h" -#include "src/isolate.h" -#include "src/lookup-cache-inl.h" -#include "src/maybe-handles-inl.h" +#include "src/objects/field-type.h" #include "src/objects/heap-object-inl.h" -#include "src/objects/maybe-object.h" +#include "src/objects/lookup-cache-inl.h" +#include "src/objects/maybe-object-inl.h" +#include "src/objects/property.h" #include "src/objects/struct-inl.h" -#include "src/property.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -59,25 +59,25 @@ inline int16_t DescriptorArray::CompareAndSwapRawNumberOfMarkedDescriptors( } void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) { - set_enum_cache(array->enum_cache()); + set_enum_cache(array.enum_cache()); } int DescriptorArray::Search(Name name, int valid_descriptors) { - DCHECK(name->IsUniqueName()); + DCHECK(name.IsUniqueName()); return internal::Search(this, name, valid_descriptors, nullptr); } int DescriptorArray::Search(Name name, Map map) { - DCHECK(name->IsUniqueName()); - int number_of_own_descriptors = map->NumberOfOwnDescriptors(); + DCHECK(name.IsUniqueName()); + int number_of_own_descriptors = map.NumberOfOwnDescriptors(); if (number_of_own_descriptors == 0) return kNotFound; return Search(name, number_of_own_descriptors); } int DescriptorArray::SearchWithCache(Isolate* isolate, Name name, Map map) { - DCHECK(name->IsUniqueName()); - int number_of_own_descriptors = map->NumberOfOwnDescriptors(); + DCHECK(name.IsUniqueName()); + int number_of_own_descriptors = map.NumberOfOwnDescriptors(); if (number_of_own_descriptors == 0) return kNotFound; DescriptorLookupCache* cache = isolate->descriptor_lookup_cache(); @@ -92,7 +92,11 @@ int DescriptorArray::SearchWithCache(Isolate* isolate, Name name, Map map) { } ObjectSlot DescriptorArray::GetFirstPointerSlot() { - return RawField(DescriptorArray::kPointersStartOffset); + static_assert(kEndOfStrongFieldsOffset == kStartOfWeakFieldsOffset, + "Weak and strong fields are continuous."); + static_assert(kEndOfWeakFieldsOffset == kHeaderSize, + "Weak fields extend up to the end of the header."); + return RawField(DescriptorArray::kStartOfStrongFieldsOffset); } ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) { @@ -105,7 +109,7 @@ ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) { ObjectSlot DescriptorArray::GetKeySlot(int descriptor) { DCHECK_LE(descriptor, number_of_all_descriptors()); ObjectSlot slot = GetDescriptorSlot(descriptor) + kEntryKeyIndex; - DCHECK((*slot)->IsObject()); + DCHECK((*slot).IsObject()); return slot; } @@ -194,7 +198,7 @@ void DescriptorArray::Append(Descriptor* desc) { for (insertion = descriptor_number; insertion > 0; --insertion) { Name key = GetSortedKey(insertion - 1); - if (key->Hash() <= hash) break; + if (key.Hash() <= hash) break; SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1)); } diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h index 89350514b7..3c1fa98a37 100644 --- a/deps/v8/src/objects/descriptor-array.h +++ b/deps/v8/src/objects/descriptor-array.h @@ -5,10 +5,10 @@ #ifndef V8_OBJECTS_DESCRIPTOR_ARRAY_H_ #define V8_OBJECTS_DESCRIPTOR_ARRAY_H_ -#include "src/objects.h" #include "src/objects/fixed-array.h" +#include "src/objects/objects.h" #include "src/objects/struct.h" -#include "src/utils.h" +#include "src/utils/utils.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -139,20 +139,9 @@ class DescriptorArray : public HeapObject { static const int kNotFound = -1; // Layout description. -#define DESCRIPTOR_ARRAY_FIELDS(V) \ - V(kNumberOfAllDescriptorsOffset, kUInt16Size) \ - V(kNumberOfDescriptorsOffset, kUInt16Size) \ - V(kRawNumberOfMarkedDescriptorsOffset, kUInt16Size) \ - V(kFiller16BitsOffset, kUInt16Size) \ - V(kPointersStartOffset, 0) \ - V(kEnumCacheOffset, kTaggedSize) \ - V(kHeaderSize, 0) - DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, - DESCRIPTOR_ARRAY_FIELDS) -#undef DESCRIPTOR_ARRAY_FIELDS - - STATIC_ASSERT(IsAligned(kPointersStartOffset, kTaggedSize)); + TORQUE_GENERATED_DESCRIPTOR_ARRAY_FIELDS) + STATIC_ASSERT(IsAligned(kStartOfWeakFieldsOffset, kTaggedSize)); STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize)); // Garbage collection support. @@ -174,7 +163,13 @@ class DescriptorArray : public HeapObject { inline ObjectSlot GetKeySlot(int descriptor); inline MaybeObjectSlot GetValueSlot(int descriptor); - using BodyDescriptor = FlexibleWeakBodyDescriptor; + static_assert(kEndOfStrongFieldsOffset == kStartOfWeakFieldsOffset, + "Weak fields follow strong fields."); + static_assert(kEndOfWeakFieldsOffset == kHeaderSize, + "Weak fields extend up to the end of the header."); + // We use this visitor to also visitor to also visit the enum_cache, which is + // the only tagged field in the header, and placed at the end of the header. + using BodyDescriptor = FlexibleWeakBodyDescriptor; // Layout of descriptor. // Naming is consistent with Dictionary classes for easy templating. diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h index caacde21fa..a1692978f3 100644 --- a/deps/v8/src/objects/dictionary-inl.h +++ b/deps/v8/src/objects/dictionary-inl.h @@ -7,7 +7,7 @@ #include "src/objects/dictionary.h" -#include "src/hash-seed-inl.h" +#include "src/numbers/hash-seed-inl.h" #include "src/objects/hash-table-inl.h" #include "src/objects/oddball.h" #include "src/objects/property-cell-inl.h" @@ -53,14 +53,14 @@ SimpleNumberDictionary::SimpleNumberDictionary(Address ptr) bool NumberDictionary::requires_slow_elements() { Object max_index_object = get(kMaxNumberKeyIndex); - if (!max_index_object->IsSmi()) return false; + if (!max_index_object.IsSmi()) return false; return 0 != (Smi::ToInt(max_index_object) & kRequiresSlowElementsMask); } uint32_t NumberDictionary::max_number_key() { DCHECK(!requires_slow_elements()); Object max_index_object = get(kMaxNumberKeyIndex); - if (!max_index_object->IsSmi()) return 0; + if (!max_index_object.IsSmi()) return 0; uint32_t value = static_cast(Smi::ToInt(max_index_object)); return value >> kRequiresSlowElementsTagSize; } @@ -73,7 +73,7 @@ template void Dictionary::ClearEntry(Isolate* isolate, int entry) { Object the_hole = this->GetReadOnlyRoots().the_hole_value(); PropertyDetails details = PropertyDetails::Empty(); - Derived::cast(*this)->SetEntry(isolate, entry, the_hole, the_hole, details); + Derived::cast(*this).SetEntry(isolate, entry, the_hole, the_hole, details); } template @@ -81,7 +81,7 @@ void Dictionary::SetEntry(Isolate* isolate, int entry, Object key, Object value, PropertyDetails details) { DCHECK(Dictionary::kEntrySize == 2 || Dictionary::kEntrySize == 3); - DCHECK(!key->IsName() || details.dictionary_index() > 0); + DCHECK(!key.IsName() || details.dictionary_index() > 0); int index = DerivedHashTable::EntryToIndex(entry); DisallowHeapAllocation no_gc; WriteBarrierMode mode = this->GetWriteBarrierMode(no_gc); @@ -91,7 +91,7 @@ void Dictionary::SetEntry(Isolate* isolate, int entry, } Object GlobalDictionaryShape::Unwrap(Object object) { - return PropertyCell::cast(object)->name(); + return PropertyCell::cast(object).name(); } RootIndex GlobalDictionaryShape::GetMapRootIndex() { @@ -105,7 +105,7 @@ RootIndex NameDictionaryShape::GetMapRootIndex() { } PropertyCell GlobalDictionary::CellAt(int entry) { - DCHECK(KeyAt(entry)->IsPropertyCell()); + DCHECK(KeyAt(entry).IsPropertyCell()); return PropertyCell::cast(KeyAt(entry)); } @@ -115,15 +115,15 @@ bool GlobalDictionaryShape::IsLive(ReadOnlyRoots roots, Object k) { } bool GlobalDictionaryShape::IsKey(ReadOnlyRoots roots, Object k) { - return IsLive(roots, k) && !PropertyCell::cast(k)->value()->IsTheHole(roots); + return IsLive(roots, k) && !PropertyCell::cast(k).value().IsTheHole(roots); } -Name GlobalDictionary::NameAt(int entry) { return CellAt(entry)->name(); } -Object GlobalDictionary::ValueAt(int entry) { return CellAt(entry)->value(); } +Name GlobalDictionary::NameAt(int entry) { return CellAt(entry).name(); } +Object GlobalDictionary::ValueAt(int entry) { return CellAt(entry).value(); } void GlobalDictionary::SetEntry(Isolate* isolate, int entry, Object key, Object value, PropertyDetails details) { - DCHECK_EQ(key, PropertyCell::cast(value)->name()); + DCHECK_EQ(key, PropertyCell::cast(value).name()); set(EntryToIndex(entry) + kEntryKeyIndex, value); DetailsAtPut(isolate, entry, details); } @@ -133,8 +133,8 @@ void GlobalDictionary::ValueAtPut(int entry, Object value) { } bool NumberDictionaryBaseShape::IsMatch(uint32_t key, Object other) { - DCHECK(other->IsNumber()); - return key == static_cast(other->Number()); + DCHECK(other.IsNumber()); + return key == static_cast(other.Number()); } uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) { @@ -143,8 +143,8 @@ uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) { uint32_t NumberDictionaryBaseShape::HashForObject(ReadOnlyRoots roots, Object other) { - DCHECK(other->IsNumber()); - return ComputeSeededHash(static_cast(other->Number()), + DCHECK(other.IsNumber()); + return ComputeSeededHash(static_cast(other.Number()), HashSeed(roots)); } @@ -162,7 +162,7 @@ RootIndex SimpleNumberDictionaryShape::GetMapRootIndex() { } bool NameDictionaryShape::IsMatch(Handle key, Object other) { - DCHECK(other->IsTheHole() || Name::cast(other)->IsUniqueName()); + DCHECK(other.IsTheHole() || Name::cast(other).IsUniqueName()); DCHECK(key->IsUniqueName()); return *key == other; } @@ -172,17 +172,17 @@ uint32_t NameDictionaryShape::Hash(Isolate* isolate, Handle key) { } uint32_t NameDictionaryShape::HashForObject(ReadOnlyRoots roots, Object other) { - return Name::cast(other)->Hash(); + return Name::cast(other).Hash(); } bool GlobalDictionaryShape::IsMatch(Handle key, Object other) { - DCHECK(PropertyCell::cast(other)->name()->IsUniqueName()); - return *key == PropertyCell::cast(other)->name(); + DCHECK(PropertyCell::cast(other).name().IsUniqueName()); + return *key == PropertyCell::cast(other).name(); } uint32_t GlobalDictionaryShape::HashForObject(ReadOnlyRoots roots, Object other) { - return PropertyCell::cast(other)->name()->Hash(); + return PropertyCell::cast(other).name().Hash(); } Handle NameDictionaryShape::AsHandle(Isolate* isolate, @@ -194,19 +194,19 @@ Handle NameDictionaryShape::AsHandle(Isolate* isolate, template PropertyDetails GlobalDictionaryShape::DetailsAt(Dictionary dict, int entry) { DCHECK_LE(0, entry); // Not found is -1, which is not caught by get(). - return dict->CellAt(entry)->property_details(); + return dict.CellAt(entry).property_details(); } template void GlobalDictionaryShape::DetailsAtPut(Isolate* isolate, Dictionary dict, int entry, PropertyDetails value) { DCHECK_LE(0, entry); // Not found is -1, which is not caught by get(). - PropertyCell cell = dict->CellAt(entry); - if (cell->property_details().IsReadOnly() != value.IsReadOnly()) { - cell->dependent_code()->DeoptimizeDependentCodeGroup( + PropertyCell cell = dict.CellAt(entry); + if (cell.property_details().IsReadOnly() != value.IsReadOnly()) { + cell.dependent_code().DeoptimizeDependentCodeGroup( isolate, DependentCode::kPropertyCellChangedGroup); } - cell->set_property_details(value); + cell.set_property_details(value); } } // namespace internal diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h index 0bce08393f..ca709f34d8 100644 --- a/deps/v8/src/objects/dictionary.h +++ b/deps/v8/src/objects/dictionary.h @@ -6,11 +6,11 @@ #define V8_OBJECTS_DICTIONARY_H_ #include "src/base/export-template.h" -#include "src/globals.h" +#include "src/common/globals.h" #include "src/objects/hash-table.h" #include "src/objects/property-array.h" #include "src/objects/smi.h" -#include "src/roots.h" +#include "src/roots/roots.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -99,16 +99,16 @@ class BaseDictionaryShape : public BaseShape { static inline PropertyDetails DetailsAt(Dictionary dict, int entry) { STATIC_ASSERT(Dictionary::kEntrySize == 3); DCHECK_GE(entry, 0); // Not found is -1, which is not caught by get(). - return PropertyDetails(Smi::cast(dict->get( - Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex))); + return PropertyDetails(Smi::cast(dict.get(Dictionary::EntryToIndex(entry) + + Dictionary::kEntryDetailsIndex))); } template static inline void DetailsAtPut(Isolate* isolate, Dictionary dict, int entry, PropertyDetails value) { STATIC_ASSERT(Dictionary::kEntrySize == 3); - dict->set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex, - value.AsSmi()); + dict.set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex, + value.AsSmi()); } }; @@ -341,10 +341,6 @@ class NumberDictionary static const int kMaxNumberKeyIndex = kPrefixStartIndex; void UpdateMaxNumberKey(uint32_t key, Handle dictionary_holder); - // Returns true if the dictionary contains any elements that are non-writable, - // non-configurable, non-enumerable, or have getters/setters. - bool HasComplexElements(); - // Sorting support void CopyValuesTo(FixedArray elements); diff --git a/deps/v8/src/objects/elements-inl.h b/deps/v8/src/objects/elements-inl.h new file mode 100644 index 0000000000..c4f2e2bf78 --- /dev/null +++ b/deps/v8/src/objects/elements-inl.h @@ -0,0 +1,38 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_ELEMENTS_INL_H_ +#define V8_OBJECTS_ELEMENTS_INL_H_ + +#include "src/objects/elements.h" + +#include "src/handles/handles-inl.h" +#include "src/objects/objects-inl.h" + +namespace v8 { +namespace internal { + +inline void ElementsAccessor::CollectElementIndices(Handle object, + KeyAccumulator* keys) { + CollectElementIndices(object, handle(object->elements(), keys->isolate()), + keys); +} + +inline MaybeHandle ElementsAccessor::PrependElementIndices( + Handle object, Handle keys, GetKeysConversion convert, + PropertyFilter filter) { + return PrependElementIndices(object, + handle(object->elements(), object->GetIsolate()), + keys, convert, filter); +} + +inline bool ElementsAccessor::HasElement(JSObject holder, uint32_t index, + PropertyFilter filter) { + return HasElement(holder, index, holder.elements(), filter); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_ELEMENTS_INL_H_ diff --git a/deps/v8/src/objects/elements-kind.cc b/deps/v8/src/objects/elements-kind.cc new file mode 100644 index 0000000000..a819caf459 --- /dev/null +++ b/deps/v8/src/objects/elements-kind.cc @@ -0,0 +1,266 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/objects/elements-kind.h" + +#include "src/base/lazy-instance.h" +#include "src/objects/elements.h" +#include "src/objects/objects-inl.h" +#include "src/objects/objects.h" + +namespace v8 { +namespace internal { + +int ElementsKindToShiftSize(ElementsKind elements_kind) { + switch (elements_kind) { + case UINT8_ELEMENTS: + case INT8_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + return 0; + case UINT16_ELEMENTS: + case INT16_ELEMENTS: + return 1; + case UINT32_ELEMENTS: + case INT32_ELEMENTS: + case FLOAT32_ELEMENTS: + return 2; + case PACKED_DOUBLE_ELEMENTS: + case HOLEY_DOUBLE_ELEMENTS: + case FLOAT64_ELEMENTS: + case BIGINT64_ELEMENTS: + case BIGUINT64_ELEMENTS: + return 3; + case PACKED_SMI_ELEMENTS: + case PACKED_ELEMENTS: + case PACKED_FROZEN_ELEMENTS: + case PACKED_SEALED_ELEMENTS: + case HOLEY_SMI_ELEMENTS: + case HOLEY_ELEMENTS: + case HOLEY_FROZEN_ELEMENTS: + case HOLEY_SEALED_ELEMENTS: + case DICTIONARY_ELEMENTS: + case FAST_SLOPPY_ARGUMENTS_ELEMENTS: + case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: + case FAST_STRING_WRAPPER_ELEMENTS: + case SLOW_STRING_WRAPPER_ELEMENTS: + return kTaggedSizeLog2; + case NO_ELEMENTS: + UNREACHABLE(); + } + UNREACHABLE(); +} + +int ElementsKindToByteSize(ElementsKind elements_kind) { + return 1 << ElementsKindToShiftSize(elements_kind); +} + +int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) { + STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize); + + if (IsTypedArrayElementsKind(elements_kind)) { + return 0; + } else { + return FixedArray::kHeaderSize - kHeapObjectTag; + } +} + +const char* ElementsKindToString(ElementsKind kind) { + switch (kind) { + case PACKED_SMI_ELEMENTS: + return "PACKED_SMI_ELEMENTS"; + case HOLEY_SMI_ELEMENTS: + return "HOLEY_SMI_ELEMENTS"; + case PACKED_ELEMENTS: + return "PACKED_ELEMENTS"; + case HOLEY_ELEMENTS: + return "HOLEY_ELEMENTS"; + case PACKED_DOUBLE_ELEMENTS: + return "PACKED_DOUBLE_ELEMENTS"; + case HOLEY_DOUBLE_ELEMENTS: + return "HOLEY_DOUBLE_ELEMENTS"; + case PACKED_SEALED_ELEMENTS: + return "PACKED_SEALED_ELEMENTS"; + case HOLEY_SEALED_ELEMENTS: + return "HOLEY_SEALED_ELEMENTS"; + case PACKED_FROZEN_ELEMENTS: + return "PACKED_FROZEN_ELEMENTS"; + case HOLEY_FROZEN_ELEMENTS: + return "HOLEY_FROZEN_ELEMENTS"; + case DICTIONARY_ELEMENTS: + return "DICTIONARY_ELEMENTS"; + case FAST_SLOPPY_ARGUMENTS_ELEMENTS: + return "FAST_SLOPPY_ARGUMENTS_ELEMENTS"; + case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: + return "SLOW_SLOPPY_ARGUMENTS_ELEMENTS"; + case FAST_STRING_WRAPPER_ELEMENTS: + return "FAST_STRING_WRAPPER_ELEMENTS"; + case SLOW_STRING_WRAPPER_ELEMENTS: + return "SLOW_STRING_WRAPPER_ELEMENTS"; + +#define PRINT_NAME(Type, type, TYPE, _) \ + case TYPE##_ELEMENTS: \ + return #TYPE "ELEMENTS"; + + TYPED_ARRAYS(PRINT_NAME); +#undef PRINT_NAME + case NO_ELEMENTS: + return "NO_ELEMENTS"; + } +} + +ElementsKind kFastElementsKindSequence[kFastElementsKindCount] = { + PACKED_SMI_ELEMENTS, // 0 + HOLEY_SMI_ELEMENTS, // 1 + PACKED_DOUBLE_ELEMENTS, // 2 + HOLEY_DOUBLE_ELEMENTS, // 3 + PACKED_ELEMENTS, // 4 + HOLEY_ELEMENTS // 5 +}; +STATIC_ASSERT(PACKED_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND); +// Verify that kFastElementsKindPackedToHoley is correct. +STATIC_ASSERT(PACKED_SMI_ELEMENTS + kFastElementsKindPackedToHoley == + HOLEY_SMI_ELEMENTS); +STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS + kFastElementsKindPackedToHoley == + HOLEY_DOUBLE_ELEMENTS); +STATIC_ASSERT(PACKED_ELEMENTS + kFastElementsKindPackedToHoley == + HOLEY_ELEMENTS); + +ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) { + DCHECK(sequence_number >= 0 && sequence_number < kFastElementsKindCount); + return kFastElementsKindSequence[sequence_number]; +} + +int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) { + for (int i = 0; i < kFastElementsKindCount; ++i) { + if (kFastElementsKindSequence[i] == elements_kind) { + return i; + } + } + UNREACHABLE(); +} + +ElementsKind GetNextTransitionElementsKind(ElementsKind kind) { + int index = GetSequenceIndexFromFastElementsKind(kind); + return GetFastElementsKindFromSequenceIndex(index + 1); +} + +static inline bool IsFastTransitionTarget(ElementsKind elements_kind) { + return IsFastElementsKind(elements_kind) || + elements_kind == DICTIONARY_ELEMENTS; +} + +bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, + ElementsKind to_kind) { + if (!IsFastElementsKind(from_kind)) return false; + if (!IsFastTransitionTarget(to_kind)) return false; + DCHECK(!IsTypedArrayElementsKind(from_kind)); + DCHECK(!IsTypedArrayElementsKind(to_kind)); + switch (from_kind) { + case PACKED_SMI_ELEMENTS: + return to_kind != PACKED_SMI_ELEMENTS; + case HOLEY_SMI_ELEMENTS: + return to_kind != PACKED_SMI_ELEMENTS && to_kind != HOLEY_SMI_ELEMENTS; + case PACKED_DOUBLE_ELEMENTS: + return to_kind != PACKED_SMI_ELEMENTS && to_kind != HOLEY_SMI_ELEMENTS && + to_kind != PACKED_DOUBLE_ELEMENTS; + case HOLEY_DOUBLE_ELEMENTS: + return to_kind == PACKED_ELEMENTS || to_kind == HOLEY_ELEMENTS; + case PACKED_ELEMENTS: + return to_kind == HOLEY_ELEMENTS; + case HOLEY_ELEMENTS: + return false; + default: + return false; + } +} + +bool UnionElementsKindUptoSize(ElementsKind* a_out, ElementsKind b) { + // Assert that the union of two ElementKinds can be computed via std::max. + static_assert(PACKED_SMI_ELEMENTS < HOLEY_SMI_ELEMENTS, + "ElementsKind union not computable via std::max."); + static_assert(HOLEY_SMI_ELEMENTS < PACKED_ELEMENTS, + "ElementsKind union not computable via std::max."); + static_assert(PACKED_ELEMENTS < HOLEY_ELEMENTS, + "ElementsKind union not computable via std::max."); + static_assert(PACKED_DOUBLE_ELEMENTS < HOLEY_DOUBLE_ELEMENTS, + "ElementsKind union not computable via std::max."); + ElementsKind a = *a_out; + switch (a) { + case PACKED_SMI_ELEMENTS: + switch (b) { + case PACKED_SMI_ELEMENTS: + case HOLEY_SMI_ELEMENTS: + case PACKED_ELEMENTS: + case HOLEY_ELEMENTS: + *a_out = b; + return true; + default: + return false; + } + case HOLEY_SMI_ELEMENTS: + switch (b) { + case PACKED_SMI_ELEMENTS: + case HOLEY_SMI_ELEMENTS: + *a_out = HOLEY_SMI_ELEMENTS; + return true; + case PACKED_ELEMENTS: + case HOLEY_ELEMENTS: + *a_out = HOLEY_ELEMENTS; + return true; + default: + return false; + } + case PACKED_ELEMENTS: + switch (b) { + case PACKED_SMI_ELEMENTS: + case PACKED_ELEMENTS: + *a_out = PACKED_ELEMENTS; + return true; + case HOLEY_SMI_ELEMENTS: + case HOLEY_ELEMENTS: + *a_out = HOLEY_ELEMENTS; + return true; + default: + return false; + } + case HOLEY_ELEMENTS: + switch (b) { + case PACKED_SMI_ELEMENTS: + case HOLEY_SMI_ELEMENTS: + case PACKED_ELEMENTS: + case HOLEY_ELEMENTS: + *a_out = HOLEY_ELEMENTS; + return true; + default: + return false; + } + break; + case PACKED_DOUBLE_ELEMENTS: + switch (b) { + case PACKED_DOUBLE_ELEMENTS: + case HOLEY_DOUBLE_ELEMENTS: + *a_out = b; + return true; + default: + return false; + } + case HOLEY_DOUBLE_ELEMENTS: + switch (b) { + case PACKED_DOUBLE_ELEMENTS: + case HOLEY_DOUBLE_ELEMENTS: + *a_out = HOLEY_DOUBLE_ELEMENTS; + return true; + default: + return false; + } + + break; + default: + break; + } + return false; +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/objects/elements-kind.h b/deps/v8/src/objects/elements-kind.h new file mode 100644 index 0000000000..3ed6ea66ec --- /dev/null +++ b/deps/v8/src/objects/elements-kind.h @@ -0,0 +1,317 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_ELEMENTS_KIND_H_ +#define V8_OBJECTS_ELEMENTS_KIND_H_ + +#include "src/base/macros.h" +#include "src/common/checks.h" +#include "src/flags/flags.h" +#include "src/utils/utils.h" + +namespace v8 { +namespace internal { + +// V has parameters (Type, type, TYPE, C type) +#define TYPED_ARRAYS(V) \ + V(Uint8, uint8, UINT8, uint8_t) \ + V(Int8, int8, INT8, int8_t) \ + V(Uint16, uint16, UINT16, uint16_t) \ + V(Int16, int16, INT16, int16_t) \ + V(Uint32, uint32, UINT32, uint32_t) \ + V(Int32, int32, INT32, int32_t) \ + V(Float32, float32, FLOAT32, float) \ + V(Float64, float64, FLOAT64, double) \ + V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t) \ + V(BigUint64, biguint64, BIGUINT64, uint64_t) \ + V(BigInt64, bigint64, BIGINT64, int64_t) + +enum ElementsKind : uint8_t { + // The "fast" kind for elements that only contain SMI values. Must be first + // to make it possible to efficiently check maps for this kind. + PACKED_SMI_ELEMENTS, + HOLEY_SMI_ELEMENTS, + + // The "fast" kind for tagged values. Must be second to make it possible to + // efficiently check maps for this and the PACKED_SMI_ELEMENTS kind + // together at once. + PACKED_ELEMENTS, + HOLEY_ELEMENTS, + + // The "fast" kind for unwrapped, non-tagged double values. + PACKED_DOUBLE_ELEMENTS, + HOLEY_DOUBLE_ELEMENTS, + + // The sealed kind for elements. + PACKED_SEALED_ELEMENTS, + HOLEY_SEALED_ELEMENTS, + + // The frozen kind for elements. + PACKED_FROZEN_ELEMENTS, + HOLEY_FROZEN_ELEMENTS, + + // The "slow" kind. + DICTIONARY_ELEMENTS, + + // Elements kind of the "arguments" object (only in sloppy mode). + FAST_SLOPPY_ARGUMENTS_ELEMENTS, + SLOW_SLOPPY_ARGUMENTS_ELEMENTS, + + // For string wrapper objects ("new String('...')"), the string's characters + // are overlaid onto a regular elements backing store. + FAST_STRING_WRAPPER_ELEMENTS, + SLOW_STRING_WRAPPER_ELEMENTS, + +// Fixed typed arrays. +#define TYPED_ARRAY_ELEMENTS_KIND(Type, type, TYPE, ctype) TYPE##_ELEMENTS, + TYPED_ARRAYS(TYPED_ARRAY_ELEMENTS_KIND) +#undef TYPED_ARRAY_ELEMENTS_KIND + + // Sentinel ElementsKind for objects with no elements. + NO_ELEMENTS, + + // Derived constants from ElementsKind. + FIRST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS, + LAST_ELEMENTS_KIND = BIGINT64_ELEMENTS, + FIRST_FAST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS, + LAST_FAST_ELEMENTS_KIND = HOLEY_DOUBLE_ELEMENTS, + FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS, + LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = BIGINT64_ELEMENTS, + TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS, + LAST_FROZEN_ELEMENTS_KIND = HOLEY_FROZEN_ELEMENTS, + +// Alias for kSystemPointerSize-sized elements +#ifdef V8_COMPRESS_POINTERS + SYSTEM_POINTER_ELEMENTS = PACKED_DOUBLE_ELEMENTS, +#else + SYSTEM_POINTER_ELEMENTS = PACKED_ELEMENTS, +#endif +}; + +constexpr int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1; +constexpr int kFastElementsKindCount = + LAST_FAST_ELEMENTS_KIND - FIRST_FAST_ELEMENTS_KIND + 1; + +// The number to add to a packed elements kind to reach a holey elements kind +constexpr int kFastElementsKindPackedToHoley = + HOLEY_SMI_ELEMENTS - PACKED_SMI_ELEMENTS; + +V8_EXPORT_PRIVATE int ElementsKindToShiftSize(ElementsKind elements_kind); +V8_EXPORT_PRIVATE int ElementsKindToByteSize(ElementsKind elements_kind); +int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind); +const char* ElementsKindToString(ElementsKind kind); + +inline ElementsKind GetInitialFastElementsKind() { return PACKED_SMI_ELEMENTS; } + +ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number); +int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind); + +ElementsKind GetNextTransitionElementsKind(ElementsKind elements_kind); + +inline bool IsDictionaryElementsKind(ElementsKind kind) { + return kind == DICTIONARY_ELEMENTS; +} + +inline bool IsSloppyArgumentsElementsKind(ElementsKind kind) { + return IsInRange(kind, FAST_SLOPPY_ARGUMENTS_ELEMENTS, + SLOW_SLOPPY_ARGUMENTS_ELEMENTS); +} + +inline bool IsStringWrapperElementsKind(ElementsKind kind) { + return IsInRange(kind, FAST_STRING_WRAPPER_ELEMENTS, + SLOW_STRING_WRAPPER_ELEMENTS); +} + +inline bool IsTypedArrayElementsKind(ElementsKind kind) { + return IsInRange(kind, FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND, + LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND); +} + +inline bool IsTerminalElementsKind(ElementsKind kind) { + return kind == TERMINAL_FAST_ELEMENTS_KIND || IsTypedArrayElementsKind(kind); +} + +inline bool IsFastElementsKind(ElementsKind kind) { + STATIC_ASSERT(FIRST_FAST_ELEMENTS_KIND == 0); + return kind <= LAST_FAST_ELEMENTS_KIND; +} + +inline bool IsTransitionElementsKind(ElementsKind kind) { + return IsFastElementsKind(kind) || IsTypedArrayElementsKind(kind) || + kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS || + kind == FAST_STRING_WRAPPER_ELEMENTS; +} + +inline bool IsDoubleElementsKind(ElementsKind kind) { + return IsInRange(kind, PACKED_DOUBLE_ELEMENTS, HOLEY_DOUBLE_ELEMENTS); +} + +inline bool IsFixedFloatElementsKind(ElementsKind kind) { + return kind == FLOAT32_ELEMENTS || kind == FLOAT64_ELEMENTS; +} + +inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) { + return IsDoubleElementsKind(kind) || IsFixedFloatElementsKind(kind); +} + +// This predicate is used for disabling respective functionality in builtins. +inline bool IsFrozenOrSealedElementsKindUnchecked(ElementsKind kind) { + return IsInRange(kind, PACKED_SEALED_ELEMENTS, HOLEY_FROZEN_ELEMENTS); +} + +inline bool IsFrozenOrSealedElementsKind(ElementsKind kind) { + DCHECK_IMPLIES(IsFrozenOrSealedElementsKindUnchecked(kind), + FLAG_enable_sealed_frozen_elements_kind); + return IsFrozenOrSealedElementsKindUnchecked(kind); +} + +inline bool IsSealedElementsKind(ElementsKind kind) { + DCHECK_IMPLIES(IsInRange(kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS), + FLAG_enable_sealed_frozen_elements_kind); + return IsInRange(kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS); +} + +inline bool IsFrozenElementsKind(ElementsKind kind) { + DCHECK_IMPLIES(IsInRange(kind, PACKED_FROZEN_ELEMENTS, HOLEY_FROZEN_ELEMENTS), + FLAG_enable_sealed_frozen_elements_kind); + return IsInRange(kind, PACKED_FROZEN_ELEMENTS, HOLEY_FROZEN_ELEMENTS); +} + +inline bool IsSmiOrObjectElementsKind(ElementsKind kind) { + return IsInRange(kind, PACKED_SMI_ELEMENTS, HOLEY_ELEMENTS); +} + +inline bool IsSmiElementsKind(ElementsKind kind) { + return IsInRange(kind, PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS); +} + +inline bool IsFastNumberElementsKind(ElementsKind kind) { + return IsSmiElementsKind(kind) || IsDoubleElementsKind(kind); +} + +inline bool IsObjectElementsKind(ElementsKind kind) { + return IsInRange(kind, PACKED_ELEMENTS, HOLEY_ELEMENTS); +} + +inline bool IsHoleyFrozenOrSealedElementsKind(ElementsKind kind) { + DCHECK_IMPLIES(kind == HOLEY_SEALED_ELEMENTS || kind == HOLEY_FROZEN_ELEMENTS, + FLAG_enable_sealed_frozen_elements_kind); + return kind == HOLEY_SEALED_ELEMENTS || kind == HOLEY_FROZEN_ELEMENTS; +} + +inline bool IsHoleyElementsKind(ElementsKind kind) { + return kind % 2 == 1 && kind <= HOLEY_DOUBLE_ELEMENTS; +} + +inline bool IsHoleyElementsKindForRead(ElementsKind kind) { + return kind % 2 == 1 && kind <= HOLEY_FROZEN_ELEMENTS; +} + +inline bool IsHoleyOrDictionaryElementsKind(ElementsKind kind) { + return IsHoleyElementsKindForRead(kind) || kind == DICTIONARY_ELEMENTS; +} + +inline bool IsFastPackedElementsKind(ElementsKind kind) { + return kind % 2 == 0 && kind <= PACKED_DOUBLE_ELEMENTS; +} + +inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) { + if (holey_kind == HOLEY_SMI_ELEMENTS) { + return PACKED_SMI_ELEMENTS; + } + if (holey_kind == HOLEY_DOUBLE_ELEMENTS) { + return PACKED_DOUBLE_ELEMENTS; + } + if (holey_kind == HOLEY_ELEMENTS) { + return PACKED_ELEMENTS; + } + return holey_kind; +} + +inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) { + if (packed_kind == PACKED_SMI_ELEMENTS) { + return HOLEY_SMI_ELEMENTS; + } + if (packed_kind == PACKED_DOUBLE_ELEMENTS) { + return HOLEY_DOUBLE_ELEMENTS; + } + if (packed_kind == PACKED_ELEMENTS) { + return HOLEY_ELEMENTS; + } + return packed_kind; +} + +inline bool UnionElementsKindUptoPackedness(ElementsKind* a_out, + ElementsKind b) { + // Assert that the union of two ElementKinds can be computed via std::max. + static_assert(PACKED_SMI_ELEMENTS < HOLEY_SMI_ELEMENTS, + "ElementsKind union not computable via std::max."); + static_assert(PACKED_ELEMENTS < HOLEY_ELEMENTS, + "ElementsKind union not computable via std::max."); + static_assert(PACKED_DOUBLE_ELEMENTS < HOLEY_DOUBLE_ELEMENTS, + "ElementsKind union not computable via std::max."); + ElementsKind a = *a_out; + switch (a) { + case HOLEY_SMI_ELEMENTS: + case PACKED_SMI_ELEMENTS: + if (b == PACKED_SMI_ELEMENTS || b == HOLEY_SMI_ELEMENTS) { + *a_out = std::max(a, b); + return true; + } + break; + case PACKED_ELEMENTS: + case HOLEY_ELEMENTS: + if (b == PACKED_ELEMENTS || b == HOLEY_ELEMENTS) { + *a_out = std::max(a, b); + return true; + } + break; + case PACKED_DOUBLE_ELEMENTS: + case HOLEY_DOUBLE_ELEMENTS: + if (b == PACKED_DOUBLE_ELEMENTS || b == HOLEY_DOUBLE_ELEMENTS) { + *a_out = std::max(a, b); + return true; + } + break; + default: + break; + } + return false; +} + +bool UnionElementsKindUptoSize(ElementsKind* a_out, ElementsKind b); + +inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) { + DCHECK(IsSmiElementsKind(from_kind)); + return (from_kind == PACKED_SMI_ELEMENTS) ? PACKED_ELEMENTS : HOLEY_ELEMENTS; +} + +inline bool IsSimpleMapChangeTransition(ElementsKind from_kind, + ElementsKind to_kind) { + return (GetHoleyElementsKind(from_kind) == to_kind) || + (IsSmiElementsKind(from_kind) && IsObjectElementsKind(to_kind)); +} + +bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, + ElementsKind to_kind); + +inline ElementsKind GetMoreGeneralElementsKind(ElementsKind from_kind, + ElementsKind to_kind) { + if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) { + return to_kind; + } + return from_kind; +} + +inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) { + return IsFastElementsKind(from_kind) && + from_kind != TERMINAL_FAST_ELEMENTS_KIND; +} + +inline bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; } + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_ELEMENTS_KIND_H_ diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc new file mode 100644 index 0000000000..e1232a0d5b --- /dev/null +++ b/deps/v8/src/objects/elements.cc @@ -0,0 +1,4798 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/objects/elements.h" + +#include "src/execution/arguments.h" +#include "src/execution/frames.h" +#include "src/execution/isolate-inl.h" +#include "src/execution/message-template.h" +#include "src/heap/factory.h" +#include "src/heap/heap-inl.h" // For MaxNumberToStringCacheSize. +#include "src/heap/heap-write-barrier-inl.h" +#include "src/numbers/conversions.h" +#include "src/objects/arguments-inl.h" +#include "src/objects/hash-table-inl.h" +#include "src/objects/js-array-buffer-inl.h" +#include "src/objects/js-array-inl.h" +#include "src/objects/keys.h" +#include "src/objects/objects-inl.h" +#include "src/objects/slots-atomic-inl.h" +#include "src/objects/slots.h" +#include "src/utils/utils.h" + +// Each concrete ElementsAccessor can handle exactly one ElementsKind, +// several abstract ElementsAccessor classes are used to allow sharing +// common code. +// +// Inheritance hierarchy: +// - ElementsAccessorBase (abstract) +// - FastElementsAccessor (abstract) +// - FastSmiOrObjectElementsAccessor +// - FastPackedSmiElementsAccessor +// - FastHoleySmiElementsAccessor +// - FastPackedObjectElementsAccessor +// - FastSealedObjectElementsAccessor: template +// - FastPackedSealedObjectElementsAccessor +// - FastHoleySealedObjectElementsAccessor +// - FastFrozenObjectElementsAccessor: template +// - FastPackedFrozenObjectElementsAccessor +// - FastHoleyFrozenObjectElementsAccessor +// - FastHoleyObjectElementsAccessor +// - FastDoubleElementsAccessor +// - FastPackedDoubleElementsAccessor +// - FastHoleyDoubleElementsAccessor +// - TypedElementsAccessor: template, with instantiations: +// - Uint8ElementsAccessor +// - Int8ElementsAccessor +// - Uint16ElementsAccessor +// - Int16ElementsAccessor +// - Uint32ElementsAccessor +// - Int32ElementsAccessor +// - Float32ElementsAccessor +// - Float64ElementsAccessor +// - Uint8ClampedElementsAccessor +// - BigUint64ElementsAccessor +// - BigInt64ElementsAccessor +// - DictionaryElementsAccessor +// - SloppyArgumentsElementsAccessor +// - FastSloppyArgumentsElementsAccessor +// - SlowSloppyArgumentsElementsAccessor +// - StringWrapperElementsAccessor +// - FastStringWrapperElementsAccessor +// - SlowStringWrapperElementsAccessor + +namespace v8 { +namespace internal { + +namespace { + +static const int kPackedSizeNotKnown = -1; + +enum Where { AT_START, AT_END }; + +// First argument in list is the accessor class, the second argument is the +// accessor ElementsKind, and the third is the backing store class. Use the +// fast element handler for smi-only arrays. The implementation is currently +// identical. Note that the order must match that of the ElementsKind enum for +// the |accessor_array[]| below to work. +#define ELEMENTS_LIST(V) \ + V(FastPackedSmiElementsAccessor, PACKED_SMI_ELEMENTS, FixedArray) \ + V(FastHoleySmiElementsAccessor, HOLEY_SMI_ELEMENTS, FixedArray) \ + V(FastPackedObjectElementsAccessor, PACKED_ELEMENTS, FixedArray) \ + V(FastHoleyObjectElementsAccessor, HOLEY_ELEMENTS, FixedArray) \ + V(FastPackedDoubleElementsAccessor, PACKED_DOUBLE_ELEMENTS, \ + FixedDoubleArray) \ + V(FastHoleyDoubleElementsAccessor, HOLEY_DOUBLE_ELEMENTS, FixedDoubleArray) \ + V(FastPackedSealedObjectElementsAccessor, PACKED_SEALED_ELEMENTS, \ + FixedArray) \ + V(FastHoleySealedObjectElementsAccessor, HOLEY_SEALED_ELEMENTS, FixedArray) \ + V(FastPackedFrozenObjectElementsAccessor, PACKED_FROZEN_ELEMENTS, \ + FixedArray) \ + V(FastHoleyFrozenObjectElementsAccessor, HOLEY_FROZEN_ELEMENTS, FixedArray) \ + V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, NumberDictionary) \ + V(FastSloppyArgumentsElementsAccessor, FAST_SLOPPY_ARGUMENTS_ELEMENTS, \ + FixedArray) \ + V(SlowSloppyArgumentsElementsAccessor, SLOW_SLOPPY_ARGUMENTS_ELEMENTS, \ + FixedArray) \ + V(FastStringWrapperElementsAccessor, FAST_STRING_WRAPPER_ELEMENTS, \ + FixedArray) \ + V(SlowStringWrapperElementsAccessor, SLOW_STRING_WRAPPER_ELEMENTS, \ + FixedArray) \ + V(Uint8ElementsAccessor, UINT8_ELEMENTS, ByteArray) \ + V(Int8ElementsAccessor, INT8_ELEMENTS, ByteArray) \ + V(Uint16ElementsAccessor, UINT16_ELEMENTS, ByteArray) \ + V(Int16ElementsAccessor, INT16_ELEMENTS, ByteArray) \ + V(Uint32ElementsAccessor, UINT32_ELEMENTS, ByteArray) \ + V(Int32ElementsAccessor, INT32_ELEMENTS, ByteArray) \ + V(Float32ElementsAccessor, FLOAT32_ELEMENTS, ByteArray) \ + V(Float64ElementsAccessor, FLOAT64_ELEMENTS, ByteArray) \ + V(Uint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, ByteArray) \ + V(BigUint64ElementsAccessor, BIGUINT64_ELEMENTS, ByteArray) \ + V(BigInt64ElementsAccessor, BIGINT64_ELEMENTS, ByteArray) + +template +class ElementsKindTraits { + public: + using BackingStore = FixedArrayBase; +}; + +#define ELEMENTS_TRAITS(Class, KindParam, Store) \ + template <> \ + class ElementsKindTraits { \ + public: /* NOLINT */ \ + static constexpr ElementsKind Kind = KindParam; \ + using BackingStore = Store; \ + }; \ + constexpr ElementsKind ElementsKindTraits::Kind; +ELEMENTS_LIST(ELEMENTS_TRAITS) +#undef ELEMENTS_TRAITS + +V8_WARN_UNUSED_RESULT +MaybeHandle ThrowArrayLengthRangeError(Isolate* isolate) { + THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidArrayLength), + Object); +} + +WriteBarrierMode GetWriteBarrierMode(ElementsKind kind) { + if (IsSmiElementsKind(kind)) return SKIP_WRITE_BARRIER; + if (IsDoubleElementsKind(kind)) return SKIP_WRITE_BARRIER; + return UPDATE_WRITE_BARRIER; +} + +void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base, + ElementsKind from_kind, uint32_t from_start, + FixedArrayBase to_base, ElementsKind to_kind, + uint32_t to_start, int raw_copy_size) { + ReadOnlyRoots roots(isolate); + DCHECK(to_base.map() != roots.fixed_cow_array_map()); + DisallowHeapAllocation no_allocation; + int copy_size = raw_copy_size; + if (raw_copy_size < 0) { + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || + raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); + copy_size = + Min(from_base.length() - from_start, to_base.length() - to_start); + if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { + int start = to_start + copy_size; + int length = to_base.length() - start; + if (length > 0) { + MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start), + roots.the_hole_value(), length); + } + } + } + DCHECK((copy_size + static_cast(to_start)) <= to_base.length() && + (copy_size + static_cast(from_start)) <= from_base.length()); + if (copy_size == 0) return; + FixedArray from = FixedArray::cast(from_base); + FixedArray to = FixedArray::cast(to_base); + DCHECK(IsSmiOrObjectElementsKind(from_kind)); + DCHECK(IsSmiOrObjectElementsKind(to_kind)); + + WriteBarrierMode write_barrier_mode = + (IsObjectElementsKind(from_kind) && IsObjectElementsKind(to_kind)) + ? UPDATE_WRITE_BARRIER + : SKIP_WRITE_BARRIER; + to.CopyElements(isolate, to_start, from, from_start, copy_size, + write_barrier_mode); +} + +static void CopyDictionaryToObjectElements( + Isolate* isolate, FixedArrayBase from_base, uint32_t from_start, + FixedArrayBase to_base, ElementsKind to_kind, uint32_t to_start, + int raw_copy_size) { + DisallowHeapAllocation no_allocation; + NumberDictionary from = NumberDictionary::cast(from_base); + int copy_size = raw_copy_size; + if (raw_copy_size < 0) { + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || + raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); + copy_size = from.max_number_key() + 1 - from_start; + if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { + int start = to_start + copy_size; + int length = to_base.length() - start; + if (length > 0) { + MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start), + ReadOnlyRoots(isolate).the_hole_value(), length); + } + } + } + DCHECK(to_base != from_base); + DCHECK(IsSmiOrObjectElementsKind(to_kind)); + if (copy_size == 0) return; + FixedArray to = FixedArray::cast(to_base); + uint32_t to_length = to.length(); + if (to_start + copy_size > to_length) { + copy_size = to_length - to_start; + } + WriteBarrierMode write_barrier_mode = GetWriteBarrierMode(to_kind); + for (int i = 0; i < copy_size; i++) { + int entry = from.FindEntry(isolate, i + from_start); + if (entry != NumberDictionary::kNotFound) { + Object value = from.ValueAt(entry); + DCHECK(!value.IsTheHole(isolate)); + to.set(i + to_start, value, write_barrier_mode); + } else { + to.set_the_hole(isolate, i + to_start); + } + } +} + +// NOTE: this method violates the handlified function signature convention: +// raw pointer parameters in the function that allocates. +// See ElementsAccessorBase::CopyElements() for details. +static void CopyDoubleToObjectElements(Isolate* isolate, + FixedArrayBase from_base, + uint32_t from_start, + FixedArrayBase to_base, + uint32_t to_start, int raw_copy_size) { + int copy_size = raw_copy_size; + if (raw_copy_size < 0) { + DisallowHeapAllocation no_allocation; + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || + raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); + copy_size = + Min(from_base.length() - from_start, to_base.length() - to_start); + if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { + // Also initialize the area that will be copied over since HeapNumber + // allocation below can cause an incremental marking step, requiring all + // existing heap objects to be propertly initialized. + int start = to_start; + int length = to_base.length() - start; + if (length > 0) { + MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start), + ReadOnlyRoots(isolate).the_hole_value(), length); + } + } + } + + DCHECK((copy_size + static_cast(to_start)) <= to_base.length() && + (copy_size + static_cast(from_start)) <= from_base.length()); + if (copy_size == 0) return; + + // From here on, the code below could actually allocate. Therefore the raw + // values are wrapped into handles. + Handle from(FixedDoubleArray::cast(from_base), isolate); + Handle to(FixedArray::cast(to_base), isolate); + + // Use an outer loop to not waste too much time on creating HandleScopes. + // On the other hand we might overflow a single handle scope depending on + // the copy_size. + int offset = 0; + while (offset < copy_size) { + HandleScope scope(isolate); + offset += 100; + for (int i = offset - 100; i < offset && i < copy_size; ++i) { + Handle value = + FixedDoubleArray::get(*from, i + from_start, isolate); + to->set(i + to_start, *value, UPDATE_WRITE_BARRIER); + } + } +} + +static void CopyDoubleToDoubleElements(FixedArrayBase from_base, + uint32_t from_start, + FixedArrayBase to_base, + uint32_t to_start, int raw_copy_size) { + DisallowHeapAllocation no_allocation; + int copy_size = raw_copy_size; + if (raw_copy_size < 0) { + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || + raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); + copy_size = + Min(from_base.length() - from_start, to_base.length() - to_start); + if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { + for (int i = to_start + copy_size; i < to_base.length(); ++i) { + FixedDoubleArray::cast(to_base).set_the_hole(i); + } + } + } + DCHECK((copy_size + static_cast(to_start)) <= to_base.length() && + (copy_size + static_cast(from_start)) <= from_base.length()); + if (copy_size == 0) return; + FixedDoubleArray from = FixedDoubleArray::cast(from_base); + FixedDoubleArray to = FixedDoubleArray::cast(to_base); + Address to_address = to.address() + FixedDoubleArray::kHeaderSize; + Address from_address = from.address() + FixedDoubleArray::kHeaderSize; + to_address += kDoubleSize * to_start; + from_address += kDoubleSize * from_start; +#ifdef V8_COMPRESS_POINTERS + // TODO(ishell, v8:8875): we use CopyTagged() in order to avoid unaligned + // access to double values in the arrays. This will no longed be necessary + // once the allocations alignment issue is fixed. + int words_per_double = (kDoubleSize / kTaggedSize); + CopyTagged(to_address, from_address, + static_cast(words_per_double * copy_size)); +#else + int words_per_double = (kDoubleSize / kSystemPointerSize); + CopyWords(to_address, from_address, + static_cast(words_per_double * copy_size)); +#endif +} + +static void CopySmiToDoubleElements(FixedArrayBase from_base, + uint32_t from_start, FixedArrayBase to_base, + uint32_t to_start, int raw_copy_size) { + DisallowHeapAllocation no_allocation; + int copy_size = raw_copy_size; + if (raw_copy_size < 0) { + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || + raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); + copy_size = from_base.length() - from_start; + if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { + for (int i = to_start + copy_size; i < to_base.length(); ++i) { + FixedDoubleArray::cast(to_base).set_the_hole(i); + } + } + } + DCHECK((copy_size + static_cast(to_start)) <= to_base.length() && + (copy_size + static_cast(from_start)) <= from_base.length()); + if (copy_size == 0) return; + FixedArray from = FixedArray::cast(from_base); + FixedDoubleArray to = FixedDoubleArray::cast(to_base); + Object the_hole = from.GetReadOnlyRoots().the_hole_value(); + for (uint32_t from_end = from_start + static_cast(copy_size); + from_start < from_end; from_start++, to_start++) { + Object hole_or_smi = from.get(from_start); + if (hole_or_smi == the_hole) { + to.set_the_hole(to_start); + } else { + to.set(to_start, Smi::ToInt(hole_or_smi)); + } + } +} + +static void CopyPackedSmiToDoubleElements(FixedArrayBase from_base, + uint32_t from_start, + FixedArrayBase to_base, + uint32_t to_start, int packed_size, + int raw_copy_size) { + DisallowHeapAllocation no_allocation; + int copy_size = raw_copy_size; + uint32_t to_end; + if (raw_copy_size < 0) { + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || + raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); + copy_size = packed_size - from_start; + if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { + to_end = to_base.length(); + for (uint32_t i = to_start + copy_size; i < to_end; ++i) { + FixedDoubleArray::cast(to_base).set_the_hole(i); + } + } else { + to_end = to_start + static_cast(copy_size); + } + } else { + to_end = to_start + static_cast(copy_size); + } + DCHECK(static_cast(to_end) <= to_base.length()); + DCHECK(packed_size >= 0 && packed_size <= copy_size); + DCHECK((copy_size + static_cast(to_start)) <= to_base.length() && + (copy_size + static_cast(from_start)) <= from_base.length()); + if (copy_size == 0) return; + FixedArray from = FixedArray::cast(from_base); + FixedDoubleArray to = FixedDoubleArray::cast(to_base); + for (uint32_t from_end = from_start + static_cast(packed_size); + from_start < from_end; from_start++, to_start++) { + Object smi = from.get(from_start); + DCHECK(!smi.IsTheHole()); + to.set(to_start, Smi::ToInt(smi)); + } +} + +static void CopyObjectToDoubleElements(FixedArrayBase from_base, + uint32_t from_start, + FixedArrayBase to_base, + uint32_t to_start, int raw_copy_size) { + DisallowHeapAllocation no_allocation; + int copy_size = raw_copy_size; + if (raw_copy_size < 0) { + DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || + raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); + copy_size = from_base.length() - from_start; + if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { + for (int i = to_start + copy_size; i < to_base.length(); ++i) { + FixedDoubleArray::cast(to_base).set_the_hole(i); + } + } + } + DCHECK((copy_size + static_cast(to_start)) <= to_base.length() && + (copy_size + static_cast(from_start)) <= from_base.length()); + if (copy_size == 0) return; + FixedArray from = FixedArray::cast(from_base); + FixedDoubleArray to = FixedDoubleArray::cast(to_base); + Object the_hole = from.GetReadOnlyRoots().the_hole_value(); + for (uint32_t from_end = from_start + copy_size; from_start < from_end; + from_start++, to_start++) { + Object hole_or_object = from.get(from_start); + if (hole_or_object == the_hole) { + to.set_the_hole(to_start); + } else { + to.set(to_start, hole_or_object.Number()); + } + } +} + +static void CopyDictionaryToDoubleElements( + Isolate* isolate, FixedArrayBase from_base, uint32_t from_start, + FixedArrayBase to_base, uint32_t to_start, int raw_copy_size) { + DisallowHeapAllocation no_allocation; + NumberDictionary from = NumberDictionary::cast(from_base); + int copy_size = raw_copy_size; + if (copy_size < 0) { + DCHECK(copy_size == ElementsAccessor::kCopyToEnd || + copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); + copy_size = from.max_number_key() + 1 - from_start; + if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { + for (int i = to_start + copy_size; i < to_base.length(); ++i) { + FixedDoubleArray::cast(to_base).set_the_hole(i); + } + } + } + if (copy_size == 0) return; + FixedDoubleArray to = FixedDoubleArray::cast(to_base); + uint32_t to_length = to.length(); + if (to_start + copy_size > to_length) { + copy_size = to_length - to_start; + } + for (int i = 0; i < copy_size; i++) { + int entry = from.FindEntry(isolate, i + from_start); + if (entry != NumberDictionary::kNotFound) { + to.set(i + to_start, from.ValueAt(entry).Number()); + } else { + to.set_the_hole(i + to_start); + } + } +} + +static void SortIndices(Isolate* isolate, Handle indices, + uint32_t sort_size) { + // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and + // store operations that are safe for concurrent marking. + AtomicSlot start(indices->GetFirstElementAddress()); + AtomicSlot end(start + sort_size); + std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) { +#ifdef V8_COMPRESS_POINTERS + DEFINE_ROOT_VALUE(isolate); + Object a(DecompressTaggedAny(ROOT_VALUE, elementA)); + Object b(DecompressTaggedAny(ROOT_VALUE, elementB)); +#else + Object a(elementA); + Object b(elementB); +#endif + if (a.IsSmi() || !a.IsUndefined(isolate)) { + if (!b.IsSmi() && b.IsUndefined(isolate)) { + return true; + } + return a.Number() < b.Number(); + } + return !b.IsSmi() && b.IsUndefined(isolate); + }); + isolate->heap()->WriteBarrierForRange(*indices, ObjectSlot(start), + ObjectSlot(end)); +} + +static Maybe IncludesValueSlowPath(Isolate* isolate, + Handle receiver, + Handle value, + uint32_t start_from, uint32_t length) { + bool search_for_hole = value->IsUndefined(isolate); + for (uint32_t k = start_from; k < length; ++k) { + LookupIterator it(isolate, receiver, k); + if (!it.IsFound()) { + if (search_for_hole) return Just(true); + continue; + } + Handle element_k; + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k, + Object::GetProperty(&it), Nothing()); + + if (value->SameValueZero(*element_k)) return Just(true); + } + + return Just(false); +} + +static Maybe IndexOfValueSlowPath(Isolate* isolate, + Handle receiver, + Handle value, + uint32_t start_from, + uint32_t length) { + for (uint32_t k = start_from; k < length; ++k) { + LookupIterator it(isolate, receiver, k); + if (!it.IsFound()) { + continue; + } + Handle element_k; + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, element_k, Object::GetProperty(&it), Nothing()); + + if (value->StrictEquals(*element_k)) return Just(k); + } + + return Just(-1); +} + +// The InternalElementsAccessor is a helper class to expose otherwise protected +// methods to its subclasses. Namely, we don't want to publicly expose methods +// that take an entry (instead of an index) as an argument. +class InternalElementsAccessor : public ElementsAccessor { + public: + uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index) override = 0; + + PropertyDetails GetDetails(JSObject holder, uint32_t entry) override = 0; +}; + +// Base class for element handler implementations. Contains the +// the common logic for objects with different ElementsKinds. +// Subclasses must specialize method for which the element +// implementation differs from the base class implementation. +// +// This class is intended to be used in the following way: +// +// class SomeElementsAccessor : +// public ElementsAccessorBase { +// ... +// } +// +// This is an example of the Curiously Recurring Template Pattern (see +// http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern). We use +// CRTP to guarantee aggressive compile time optimizations (i.e. inlining and +// specialization of SomeElementsAccessor methods). +template +class ElementsAccessorBase : public InternalElementsAccessor { + public: + ElementsAccessorBase() = default; + + using ElementsTraits = ElementsTraitsParam; + using BackingStore = typename ElementsTraitsParam::BackingStore; + + static ElementsKind kind() { return ElementsTraits::Kind; } + + static void ValidateContents(JSObject holder, int length) {} + + static void ValidateImpl(JSObject holder) { + FixedArrayBase fixed_array_base = holder.elements(); + if (!fixed_array_base.IsHeapObject()) return; + // Arrays that have been shifted in place can't be verified. + if (fixed_array_base.IsFiller()) return; + int length = 0; + if (holder.IsJSArray()) { + Object length_obj = JSArray::cast(holder).length(); + if (length_obj.IsSmi()) { + length = Smi::ToInt(length_obj); + } + } else if (holder.IsJSTypedArray()) { + // TODO(bmeurer, v8:4153): Change this to size_t later. + length = static_cast(JSTypedArray::cast(holder).length()); + } else { + length = fixed_array_base.length(); + } + Subclass::ValidateContents(holder, length); + } + + void Validate(JSObject holder) final { + DisallowHeapAllocation no_gc; + Subclass::ValidateImpl(holder); + } + + static bool IsPackedImpl(JSObject holder, FixedArrayBase backing_store, + uint32_t start, uint32_t end) { + DisallowHeapAllocation no_gc; + if (IsFastPackedElementsKind(kind())) return true; + Isolate* isolate = holder.GetIsolate(); + for (uint32_t i = start; i < end; i++) { + if (!Subclass::HasElementImpl(isolate, holder, i, backing_store, + ALL_PROPERTIES)) { + return false; + } + } + return true; + } + + static void TryTransitionResultArrayToPacked(Handle array) { + if (!IsHoleyElementsKind(kind())) return; + Handle backing_store(array->elements(), + array->GetIsolate()); + int length = Smi::ToInt(array->length()); + if (!Subclass::IsPackedImpl(*array, *backing_store, 0, length)) return; + + ElementsKind packed_kind = GetPackedElementsKind(kind()); + Handle new_map = + JSObject::GetElementsTransitionMap(array, packed_kind); + JSObject::MigrateToMap(array, new_map); + if (FLAG_trace_elements_transitions) { + JSObject::PrintElementsTransition(stdout, array, kind(), backing_store, + packed_kind, backing_store); + } + } + + bool HasElement(JSObject holder, uint32_t index, FixedArrayBase backing_store, + PropertyFilter filter) final { + return Subclass::HasElementImpl(holder.GetIsolate(), holder, index, + backing_store, filter); + } + + static bool HasElementImpl(Isolate* isolate, JSObject holder, uint32_t index, + FixedArrayBase backing_store, + PropertyFilter filter = ALL_PROPERTIES) { + return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index, + filter) != kMaxUInt32; + } + + bool HasEntry(JSObject holder, uint32_t entry) final { + return Subclass::HasEntryImpl(holder.GetIsolate(), holder.elements(), + entry); + } + + static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store, + uint32_t entry) { + UNIMPLEMENTED(); + } + + bool HasAccessors(JSObject holder) final { + return Subclass::HasAccessorsImpl(holder, holder.elements()); + } + + static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) { + return false; + } + + Handle Get(Handle holder, uint32_t entry) final { + return Subclass::GetInternalImpl(holder, entry); + } + + static Handle GetInternalImpl(Handle holder, + uint32_t entry) { + return Subclass::GetImpl(holder->GetIsolate(), holder->elements(), entry); + } + + static Handle GetImpl(Isolate* isolate, FixedArrayBase backing_store, + uint32_t entry) { + uint32_t index = GetIndexForEntryImpl(backing_store, entry); + return handle(BackingStore::cast(backing_store).get(index), isolate); + } + + void Set(Handle holder, uint32_t entry, Object value) final { + Subclass::SetImpl(holder, entry, value); + } + + void Reconfigure(Handle object, Handle store, + uint32_t entry, Handle value, + PropertyAttributes attributes) final { + Subclass::ReconfigureImpl(object, store, entry, value, attributes); + } + + static void ReconfigureImpl(Handle object, + Handle store, uint32_t entry, + Handle value, + PropertyAttributes attributes) { + UNREACHABLE(); + } + + void Add(Handle object, uint32_t index, Handle value, + PropertyAttributes attributes, uint32_t new_capacity) final { + Subclass::AddImpl(object, index, value, attributes, new_capacity); + } + + static void AddImpl(Handle object, uint32_t index, + Handle value, PropertyAttributes attributes, + uint32_t new_capacity) { + UNREACHABLE(); + } + + uint32_t Push(Handle receiver, Arguments* args, + uint32_t push_size) final { + return Subclass::PushImpl(receiver, args, push_size); + } + + static uint32_t PushImpl(Handle receiver, Arguments* args, + uint32_t push_sized) { + UNREACHABLE(); + } + + uint32_t Unshift(Handle receiver, Arguments* args, + uint32_t unshift_size) final { + return Subclass::UnshiftImpl(receiver, args, unshift_size); + } + + static uint32_t UnshiftImpl(Handle receiver, Arguments* args, + uint32_t unshift_size) { + UNREACHABLE(); + } + + Handle Pop(Handle receiver) final { + return Subclass::PopImpl(receiver); + } + + static Handle PopImpl(Handle receiver) { UNREACHABLE(); } + + Handle Shift(Handle receiver) final { + return Subclass::ShiftImpl(receiver); + } + + static Handle ShiftImpl(Handle receiver) { UNREACHABLE(); } + + void SetLength(Handle array, uint32_t length) final { + Subclass::SetLengthImpl(array->GetIsolate(), array, length, + handle(array->elements(), array->GetIsolate())); + } + + static void SetLengthImpl(Isolate* isolate, Handle array, + uint32_t length, + Handle backing_store) { + DCHECK(!array->SetLengthWouldNormalize(length)); + DCHECK(IsFastElementsKind(array->GetElementsKind())); + uint32_t old_length = 0; + CHECK(array->length().ToArrayIndex(&old_length)); + + if (old_length < length) { + ElementsKind kind = array->GetElementsKind(); + if (!IsHoleyElementsKind(kind)) { + kind = GetHoleyElementsKind(kind); + JSObject::TransitionElementsKind(array, kind); + } + } + + // Check whether the backing store should be shrunk. + uint32_t capacity = backing_store->length(); + old_length = Min(old_length, capacity); + if (length == 0) { + array->initialize_elements(); + } else if (length <= capacity) { + if (IsSmiOrObjectElementsKind(kind())) { + JSObject::EnsureWritableFastElements(array); + if (array->elements() != *backing_store) { + backing_store = handle(array->elements(), isolate); + } + } + if (2 * length + JSObject::kMinAddedElementsCapacity <= capacity) { + // If more than half the elements won't be used, trim the array. + // Do not trim from short arrays to prevent frequent trimming on + // repeated pop operations. + // Leave some space to allow for subsequent push operations. + int elements_to_trim = length + 1 == old_length + ? (capacity - length) / 2 + : capacity - length; + isolate->heap()->RightTrimFixedArray(*backing_store, elements_to_trim); + // Fill the non-trimmed elements with holes. + BackingStore::cast(*backing_store) + .FillWithHoles(length, + std::min(old_length, capacity - elements_to_trim)); + } else { + // Otherwise, fill the unused tail with holes. + BackingStore::cast(*backing_store).FillWithHoles(length, old_length); + } + } else { + // Check whether the backing store should be expanded. + capacity = Max(length, JSObject::NewElementsCapacity(capacity)); + Subclass::GrowCapacityAndConvertImpl(array, capacity); + } + + array->set_length(Smi::FromInt(length)); + JSObject::ValidateElements(*array); + } + + uint32_t NumberOfElements(JSObject receiver) final { + return Subclass::NumberOfElementsImpl(receiver, receiver.elements()); + } + + static uint32_t NumberOfElementsImpl(JSObject receiver, + FixedArrayBase backing_store) { + UNREACHABLE(); + } + + static uint32_t GetMaxIndex(JSObject receiver, FixedArrayBase elements) { + if (receiver.IsJSArray()) { + DCHECK(JSArray::cast(receiver).length().IsSmi()); + return static_cast( + Smi::ToInt(JSArray::cast(receiver).length())); + } + return Subclass::GetCapacityImpl(receiver, elements); + } + + static uint32_t GetMaxNumberOfEntries(JSObject receiver, + FixedArrayBase elements) { + return Subclass::GetMaxIndex(receiver, elements); + } + + static Handle ConvertElementsWithCapacity( + Handle object, Handle old_elements, + ElementsKind from_kind, uint32_t capacity) { + return ConvertElementsWithCapacity( + object, old_elements, from_kind, capacity, 0, 0, + ElementsAccessor::kCopyToEndAndInitializeToHole); + } + + static Handle ConvertElementsWithCapacity( + Handle object, Handle old_elements, + ElementsKind from_kind, uint32_t capacity, int copy_size) { + return ConvertElementsWithCapacity(object, old_elements, from_kind, + capacity, 0, 0, copy_size); + } + + static Handle ConvertElementsWithCapacity( + Handle object, Handle old_elements, + ElementsKind from_kind, uint32_t capacity, uint32_t src_index, + uint32_t dst_index, int copy_size) { + Isolate* isolate = object->GetIsolate(); + Handle new_elements; + if (IsDoubleElementsKind(kind())) { + new_elements = isolate->factory()->NewFixedDoubleArray(capacity); + } else { + new_elements = isolate->factory()->NewUninitializedFixedArray(capacity); + } + + int packed_size = kPackedSizeNotKnown; + if (IsFastPackedElementsKind(from_kind) && object->IsJSArray()) { + packed_size = Smi::ToInt(JSArray::cast(*object).length()); + } + + Subclass::CopyElementsImpl(isolate, *old_elements, src_index, *new_elements, + from_kind, dst_index, packed_size, copy_size); + + return new_elements; + } + + static void TransitionElementsKindImpl(Handle object, + Handle to_map) { + Handle from_map = handle(object->map(), object->GetIsolate()); + ElementsKind from_kind = from_map->elements_kind(); + ElementsKind to_kind = to_map->elements_kind(); + if (IsHoleyElementsKind(from_kind)) { + to_kind = GetHoleyElementsKind(to_kind); + } + if (from_kind != to_kind) { + // This method should never be called for any other case. + DCHECK(IsFastElementsKind(from_kind)); + DCHECK(IsFastElementsKind(to_kind)); + DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind); + + Handle from_elements(object->elements(), + object->GetIsolate()); + if (object->elements() == + object->GetReadOnlyRoots().empty_fixed_array() || + IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) { + // No change is needed to the elements() buffer, the transition + // only requires a map change. + JSObject::MigrateToMap(object, to_map); + } else { + DCHECK( + (IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) || + (IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind))); + uint32_t capacity = static_cast(object->elements().length()); + Handle elements = ConvertElementsWithCapacity( + object, from_elements, from_kind, capacity); + JSObject::SetMapAndElements(object, to_map, elements); + } + if (FLAG_trace_elements_transitions) { + JSObject::PrintElementsTransition( + stdout, object, from_kind, from_elements, to_kind, + handle(object->elements(), object->GetIsolate())); + } + } + } + + static void GrowCapacityAndConvertImpl(Handle object, + uint32_t capacity) { + ElementsKind from_kind = object->GetElementsKind(); + if (IsSmiOrObjectElementsKind(from_kind)) { + // Array optimizations rely on the prototype lookups of Array objects + // always returning undefined. If there is a store to the initial + // prototype object, make sure all of these optimizations are invalidated. + object->GetIsolate()->UpdateNoElementsProtectorOnSetLength(object); + } + Handle old_elements(object->elements(), + object->GetIsolate()); + // This method should only be called if there's a reason to update the + // elements. + DCHECK(IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(kind()) || + IsDictionaryElementsKind(from_kind) || + static_cast(old_elements->length()) < capacity); + Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind, + kind(), capacity); + } + + static void BasicGrowCapacityAndConvertImpl( + Handle object, Handle old_elements, + ElementsKind from_kind, ElementsKind to_kind, uint32_t capacity) { + Handle elements = + ConvertElementsWithCapacity(object, old_elements, from_kind, capacity); + + if (IsHoleyElementsKind(from_kind)) { + to_kind = GetHoleyElementsKind(to_kind); + } + Handle new_map = JSObject::GetElementsTransitionMap(object, to_kind); + JSObject::SetMapAndElements(object, new_map, elements); + + // Transition through the allocation site as well if present. + JSObject::UpdateAllocationSite(object, to_kind); + + if (FLAG_trace_elements_transitions) { + JSObject::PrintElementsTransition(stdout, object, from_kind, old_elements, + to_kind, elements); + } + } + + void TransitionElementsKind(Handle object, Handle map) final { + Subclass::TransitionElementsKindImpl(object, map); + } + + void GrowCapacityAndConvert(Handle object, + uint32_t capacity) final { + Subclass::GrowCapacityAndConvertImpl(object, capacity); + } + + bool GrowCapacity(Handle object, uint32_t index) final { + // This function is intended to be called from optimized code. We don't + // want to trigger lazy deopts there, so refuse to handle cases that would. + if (object->map().is_prototype_map() || + object->WouldConvertToSlowElements(index)) { + return false; + } + Handle old_elements(object->elements(), + object->GetIsolate()); + uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1); + DCHECK(static_cast(old_elements->length()) < new_capacity); + Handle elements = + ConvertElementsWithCapacity(object, old_elements, kind(), new_capacity); + + DCHECK_EQ(object->GetElementsKind(), kind()); + // Transition through the allocation site as well if present. + if (JSObject::UpdateAllocationSite( + object, kind())) { + return false; + } + + object->set_elements(*elements); + return true; + } + + void Delete(Handle obj, uint32_t entry) final { + Subclass::DeleteImpl(obj, entry); + } + + static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from, + uint32_t from_start, FixedArrayBase to, + ElementsKind from_kind, uint32_t to_start, + int packed_size, int copy_size) { + UNREACHABLE(); + } + + void CopyElements(JSObject from_holder, uint32_t from_start, + ElementsKind from_kind, Handle to, + uint32_t to_start, int copy_size) final { + int packed_size = kPackedSizeNotKnown; + bool is_packed = + IsFastPackedElementsKind(from_kind) && from_holder.IsJSArray(); + if (is_packed) { + packed_size = Smi::ToInt(JSArray::cast(from_holder).length()); + if (copy_size >= 0 && packed_size > copy_size) { + packed_size = copy_size; + } + } + FixedArrayBase from = from_holder.elements(); + // NOTE: the Subclass::CopyElementsImpl() methods + // violate the handlified function signature convention: + // raw pointer parameters in the function that allocates. This is done + // intentionally to avoid ArrayConcat() builtin performance degradation. + // + // Details: The idea is that allocations actually happen only in case of + // copying from object with fast double elements to object with object + // elements. In all the other cases there are no allocations performed and + // handle creation causes noticeable performance degradation of the builtin. + Subclass::CopyElementsImpl(from_holder.GetIsolate(), from, from_start, *to, + from_kind, to_start, packed_size, copy_size); + } + + void CopyElements(Isolate* isolate, Handle source, + ElementsKind source_kind, + Handle destination, int size) override { + Subclass::CopyElementsImpl(isolate, *source, 0, *destination, source_kind, + 0, kPackedSizeNotKnown, size); + } + + void CopyTypedArrayElementsSlice(JSTypedArray source, + JSTypedArray destination, size_t start, + size_t end) override { + Subclass::CopyTypedArrayElementsSliceImpl(source, destination, start, end); + } + + static void CopyTypedArrayElementsSliceImpl(JSTypedArray source, + JSTypedArray destination, + size_t start, size_t end) { + UNREACHABLE(); + } + + Object CopyElements(Handle source, Handle destination, + size_t length, uint32_t offset) final { + return Subclass::CopyElementsHandleImpl(source, destination, length, + offset); + } + + static Object CopyElementsHandleImpl(Handle source, + Handle destination, + size_t length, uint32_t offset) { + UNREACHABLE(); + } + + Handle Normalize(Handle object) final { + return Subclass::NormalizeImpl( + object, handle(object->elements(), object->GetIsolate())); + } + + static Handle NormalizeImpl( + Handle object, Handle elements) { + UNREACHABLE(); + } + + Maybe CollectValuesOrEntries(Isolate* isolate, Handle object, + Handle values_or_entries, + bool get_entries, int* nof_items, + PropertyFilter filter) override { + return Subclass::CollectValuesOrEntriesImpl( + isolate, object, values_or_entries, get_entries, nof_items, filter); + } + + static Maybe CollectValuesOrEntriesImpl( + Isolate* isolate, Handle object, + Handle values_or_entries, bool get_entries, int* nof_items, + PropertyFilter filter) { + DCHECK_EQ(*nof_items, 0); + KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly, + ALL_PROPERTIES); + Subclass::CollectElementIndicesImpl( + object, handle(object->elements(), isolate), &accumulator); + Handle keys = accumulator.GetKeys(); + + int count = 0; + int i = 0; + ElementsKind original_elements_kind = object->GetElementsKind(); + + for (; i < keys->length(); ++i) { + Handle key(keys->get(i), isolate); + uint32_t index; + if (!key->ToUint32(&index)) continue; + + DCHECK_EQ(object->GetElementsKind(), original_elements_kind); + uint32_t entry = Subclass::GetEntryForIndexImpl( + isolate, *object, object->elements(), index, filter); + if (entry == kMaxUInt32) continue; + PropertyDetails details = Subclass::GetDetailsImpl(*object, entry); + + Handle value; + if (details.kind() == kData) { + value = Subclass::GetInternalImpl(object, entry); + } else { + // This might modify the elements and/or change the elements kind. + LookupIterator it(isolate, object, index, LookupIterator::OWN); + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, value, Object::GetProperty(&it), Nothing()); + } + if (get_entries) value = MakeEntryPair(isolate, index, value); + values_or_entries->set(count++, *value); + if (object->GetElementsKind() != original_elements_kind) break; + } + + // Slow path caused by changes in elements kind during iteration. + for (; i < keys->length(); i++) { + Handle key(keys->get(i), isolate); + uint32_t index; + if (!key->ToUint32(&index)) continue; + + if (filter & ONLY_ENUMERABLE) { + InternalElementsAccessor* accessor = + reinterpret_cast( + object->GetElementsAccessor()); + uint32_t entry = accessor->GetEntryForIndex(isolate, *object, + object->elements(), index); + if (entry == kMaxUInt32) continue; + PropertyDetails details = accessor->GetDetails(*object, entry); + if (!details.IsEnumerable()) continue; + } + + Handle value; + LookupIterator it(isolate, object, index, LookupIterator::OWN); + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::GetProperty(&it), + Nothing()); + + if (get_entries) value = MakeEntryPair(isolate, index, value); + values_or_entries->set(count++, *value); + } + + *nof_items = count; + return Just(true); + } + + void CollectElementIndices(Handle object, + Handle backing_store, + KeyAccumulator* keys) final { + if (keys->filter() & ONLY_ALL_CAN_READ) return; + Subclass::CollectElementIndicesImpl(object, backing_store, keys); + } + + static void CollectElementIndicesImpl(Handle object, + Handle backing_store, + KeyAccumulator* keys) { + DCHECK_NE(DICTIONARY_ELEMENTS, kind()); + // Non-dictionary elements can't have all-can-read accessors. + uint32_t length = Subclass::GetMaxIndex(*object, *backing_store); + PropertyFilter filter = keys->filter(); + Isolate* isolate = keys->isolate(); + Factory* factory = isolate->factory(); + for (uint32_t i = 0; i < length; i++) { + if (Subclass::HasElementImpl(isolate, *object, i, *backing_store, + filter)) { + keys->AddKey(factory->NewNumberFromUint(i)); + } + } + } + + static Handle DirectCollectElementIndicesImpl( + Isolate* isolate, Handle object, + Handle backing_store, GetKeysConversion convert, + PropertyFilter filter, Handle list, uint32_t* nof_indices, + uint32_t insertion_index = 0) { + uint32_t length = Subclass::GetMaxIndex(*object, *backing_store); + uint32_t const kMaxStringTableEntries = + isolate->heap()->MaxNumberToStringCacheSize(); + for (uint32_t i = 0; i < length; i++) { + if (Subclass::HasElementImpl(isolate, *object, i, *backing_store, + filter)) { + if (convert == GetKeysConversion::kConvertToString) { + bool use_cache = i < kMaxStringTableEntries; + Handle index_string = + isolate->factory()->Uint32ToString(i, use_cache); + list->set(insertion_index, *index_string); + } else { + list->set(insertion_index, Smi::FromInt(i)); + } + insertion_index++; + } + } + *nof_indices = insertion_index; + return list; + } + + MaybeHandle PrependElementIndices( + Handle object, Handle backing_store, + Handle keys, GetKeysConversion convert, + PropertyFilter filter) final { + return Subclass::PrependElementIndicesImpl(object, backing_store, keys, + convert, filter); + } + + static MaybeHandle PrependElementIndicesImpl( + Handle object, Handle backing_store, + Handle keys, GetKeysConversion convert, + PropertyFilter filter) { + Isolate* isolate = object->GetIsolate(); + uint32_t nof_property_keys = keys->length(); + uint32_t initial_list_length = + Subclass::GetMaxNumberOfEntries(*object, *backing_store); + + initial_list_length += nof_property_keys; + if (initial_list_length > FixedArray::kMaxLength || + initial_list_length < nof_property_keys) { + return isolate->Throw(isolate->factory()->NewRangeError( + MessageTemplate::kInvalidArrayLength)); + } + + // Collect the element indices into a new list. + MaybeHandle raw_array = + isolate->factory()->TryNewFixedArray(initial_list_length); + Handle combined_keys; + + // If we have a holey backing store try to precisely estimate the backing + // store size as a last emergency measure if we cannot allocate the big + // array. + if (!raw_array.ToHandle(&combined_keys)) { + if (IsHoleyOrDictionaryElementsKind(kind())) { + // If we overestimate the result list size we might end up in the + // large-object space which doesn't free memory on shrinking the list. + // Hence we try to estimate the final size for holey backing stores more + // precisely here. + initial_list_length = + Subclass::NumberOfElementsImpl(*object, *backing_store); + initial_list_length += nof_property_keys; + } + combined_keys = isolate->factory()->NewFixedArray(initial_list_length); + } + + uint32_t nof_indices = 0; + bool needs_sorting = IsDictionaryElementsKind(kind()) || + IsSloppyArgumentsElementsKind(kind()); + combined_keys = Subclass::DirectCollectElementIndicesImpl( + isolate, object, backing_store, + needs_sorting ? GetKeysConversion::kKeepNumbers : convert, filter, + combined_keys, &nof_indices); + + if (needs_sorting) { + SortIndices(isolate, combined_keys, nof_indices); + // Indices from dictionary elements should only be converted after + // sorting. + if (convert == GetKeysConversion::kConvertToString) { + for (uint32_t i = 0; i < nof_indices; i++) { + Handle index_string = isolate->factory()->Uint32ToString( + combined_keys->get(i).Number()); + combined_keys->set(i, *index_string); + } + } + } + + // Copy over the passed-in property keys. + CopyObjectToObjectElements(isolate, *keys, PACKED_ELEMENTS, 0, + *combined_keys, PACKED_ELEMENTS, nof_indices, + nof_property_keys); + + // For holey elements and arguments we might have to shrink the collected + // keys since the estimates might be off. + if (IsHoleyOrDictionaryElementsKind(kind()) || + IsSloppyArgumentsElementsKind(kind())) { + // Shrink combined_keys to the final size. + int final_size = nof_indices + nof_property_keys; + DCHECK_LE(final_size, combined_keys->length()); + return FixedArray::ShrinkOrEmpty(isolate, combined_keys, final_size); + } + + return combined_keys; + } + + void AddElementsToKeyAccumulator(Handle receiver, + KeyAccumulator* accumulator, + AddKeyConversion convert) final { + Subclass::AddElementsToKeyAccumulatorImpl(receiver, accumulator, convert); + } + + static uint32_t GetCapacityImpl(JSObject holder, + FixedArrayBase backing_store) { + return backing_store.length(); + } + + uint32_t GetCapacity(JSObject holder, FixedArrayBase backing_store) final { + return Subclass::GetCapacityImpl(holder, backing_store); + } + + static Object FillImpl(Handle receiver, Handle obj_value, + uint32_t start, uint32_t end) { + UNREACHABLE(); + } + + Object Fill(Handle receiver, Handle obj_value, + uint32_t start, uint32_t end) override { + return Subclass::FillImpl(receiver, obj_value, start, end); + } + + static Maybe IncludesValueImpl(Isolate* isolate, + Handle receiver, + Handle value, + uint32_t start_from, uint32_t length) { + return IncludesValueSlowPath(isolate, receiver, value, start_from, length); + } + + Maybe IncludesValue(Isolate* isolate, Handle receiver, + Handle value, uint32_t start_from, + uint32_t length) final { + return Subclass::IncludesValueImpl(isolate, receiver, value, start_from, + length); + } + + static Maybe IndexOfValueImpl(Isolate* isolate, + Handle receiver, + Handle value, + uint32_t start_from, uint32_t length) { + return IndexOfValueSlowPath(isolate, receiver, value, start_from, length); + } + + Maybe IndexOfValue(Isolate* isolate, Handle receiver, + Handle value, uint32_t start_from, + uint32_t length) final { + return Subclass::IndexOfValueImpl(isolate, receiver, value, start_from, + length); + } + + static Maybe LastIndexOfValueImpl(Handle receiver, + Handle value, + uint32_t start_from) { + UNREACHABLE(); + } + + Maybe LastIndexOfValue(Handle receiver, + Handle value, + uint32_t start_from) final { + return Subclass::LastIndexOfValueImpl(receiver, value, start_from); + } + + static void ReverseImpl(JSObject receiver) { UNREACHABLE(); } + + void Reverse(JSObject receiver) final { Subclass::ReverseImpl(receiver); } + + static uint32_t GetIndexForEntryImpl(FixedArrayBase backing_store, + uint32_t entry) { + return entry; + } + + static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index, PropertyFilter filter) { + DCHECK(IsFastElementsKind(kind()) || IsFrozenOrSealedElementsKind(kind())); + uint32_t length = Subclass::GetMaxIndex(holder, backing_store); + if (IsHoleyElementsKindForRead(kind())) { + return index < length && !BackingStore::cast(backing_store) + .is_the_hole(isolate, index) + ? index + : kMaxUInt32; + } else { + return index < length ? index : kMaxUInt32; + } + } + + uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index) final { + return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index, + ALL_PROPERTIES); + } + + static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store, + uint32_t entry) { + return PropertyDetails(kData, NONE, PropertyCellType::kNoCell); + } + + static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) { + return PropertyDetails(kData, NONE, PropertyCellType::kNoCell); + } + + PropertyDetails GetDetails(JSObject holder, uint32_t entry) final { + return Subclass::GetDetailsImpl(holder, entry); + } + + Handle CreateListFromArrayLike(Isolate* isolate, + Handle object, + uint32_t length) final { + return Subclass::CreateListFromArrayLikeImpl(isolate, object, length); + } + + static Handle CreateListFromArrayLikeImpl(Isolate* isolate, + Handle object, + uint32_t length) { + UNREACHABLE(); + } + + private: + DISALLOW_COPY_AND_ASSIGN(ElementsAccessorBase); +}; + +class DictionaryElementsAccessor + : public ElementsAccessorBase> { + public: + static uint32_t GetMaxIndex(JSObject receiver, FixedArrayBase elements) { + // We cannot properly estimate this for dictionaries. + UNREACHABLE(); + } + + static uint32_t GetMaxNumberOfEntries(JSObject receiver, + FixedArrayBase backing_store) { + return NumberOfElementsImpl(receiver, backing_store); + } + + static uint32_t NumberOfElementsImpl(JSObject receiver, + FixedArrayBase backing_store) { + NumberDictionary dict = NumberDictionary::cast(backing_store); + return dict.NumberOfElements(); + } + + static void SetLengthImpl(Isolate* isolate, Handle array, + uint32_t length, + Handle backing_store) { + Handle dict = + Handle::cast(backing_store); + int capacity = dict->Capacity(); + uint32_t old_length = 0; + CHECK(array->length().ToArrayLength(&old_length)); + { + DisallowHeapAllocation no_gc; + ReadOnlyRoots roots(isolate); + if (length < old_length) { + if (dict->requires_slow_elements()) { + // Find last non-deletable element in range of elements to be + // deleted and adjust range accordingly. + for (int entry = 0; entry < capacity; entry++) { + Object index = dict->KeyAt(entry); + if (dict->IsKey(roots, index)) { + uint32_t number = static_cast(index.Number()); + if (length <= number && number < old_length) { + PropertyDetails details = dict->DetailsAt(entry); + if (!details.IsConfigurable()) length = number + 1; + } + } + } + } + + if (length == 0) { + // Flush the backing store. + array->initialize_elements(); + } else { + // Remove elements that should be deleted. + int removed_entries = 0; + for (int entry = 0; entry < capacity; entry++) { + Object index = dict->KeyAt(entry); + if (dict->IsKey(roots, index)) { + uint32_t number = static_cast(index.Number()); + if (length <= number && number < old_length) { + dict->ClearEntry(isolate, entry); + removed_entries++; + } + } + } + + if (removed_entries > 0) { + // Update the number of elements. + dict->ElementsRemoved(removed_entries); + } + } + } + } + + Handle length_obj = isolate->factory()->NewNumberFromUint(length); + array->set_length(*length_obj); + } + + static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from, + uint32_t from_start, FixedArrayBase to, + ElementsKind from_kind, uint32_t to_start, + int packed_size, int copy_size) { + UNREACHABLE(); + } + + static void DeleteImpl(Handle obj, uint32_t entry) { + Handle dict(NumberDictionary::cast(obj->elements()), + obj->GetIsolate()); + dict = NumberDictionary::DeleteEntry(obj->GetIsolate(), dict, entry); + obj->set_elements(*dict); + } + + static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) { + DisallowHeapAllocation no_gc; + NumberDictionary dict = NumberDictionary::cast(backing_store); + if (!dict.requires_slow_elements()) return false; + int capacity = dict.Capacity(); + ReadOnlyRoots roots = holder.GetReadOnlyRoots(); + for (int i = 0; i < capacity; i++) { + Object key = dict.KeyAt(i); + if (!dict.IsKey(roots, key)) continue; + PropertyDetails details = dict.DetailsAt(i); + if (details.kind() == kAccessor) return true; + } + return false; + } + + static Object GetRaw(FixedArrayBase store, uint32_t entry) { + NumberDictionary backing_store = NumberDictionary::cast(store); + return backing_store.ValueAt(entry); + } + + static Handle GetImpl(Isolate* isolate, FixedArrayBase backing_store, + uint32_t entry) { + return handle(GetRaw(backing_store, entry), isolate); + } + + static inline void SetImpl(Handle holder, uint32_t entry, + Object value) { + SetImpl(holder->elements(), entry, value); + } + + static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + Object value) { + NumberDictionary::cast(backing_store).ValueAtPut(entry, value); + } + + static void ReconfigureImpl(Handle object, + Handle store, uint32_t entry, + Handle value, + PropertyAttributes attributes) { + NumberDictionary dictionary = NumberDictionary::cast(*store); + if (attributes != NONE) object->RequireSlowElements(dictionary); + dictionary.ValueAtPut(entry, *value); + PropertyDetails details = dictionary.DetailsAt(entry); + details = PropertyDetails(kData, attributes, PropertyCellType::kNoCell, + details.dictionary_index()); + + dictionary.DetailsAtPut(object->GetIsolate(), entry, details); + } + + static void AddImpl(Handle object, uint32_t index, + Handle value, PropertyAttributes attributes, + uint32_t new_capacity) { + PropertyDetails details(kData, attributes, PropertyCellType::kNoCell); + Handle dictionary = + object->HasFastElements() || object->HasFastStringWrapperElements() + ? JSObject::NormalizeElements(object) + : handle(NumberDictionary::cast(object->elements()), + object->GetIsolate()); + Handle new_dictionary = NumberDictionary::Add( + object->GetIsolate(), dictionary, index, value, details); + new_dictionary->UpdateMaxNumberKey(index, object); + if (attributes != NONE) object->RequireSlowElements(*new_dictionary); + if (dictionary.is_identical_to(new_dictionary)) return; + object->set_elements(*new_dictionary); + } + + static bool HasEntryImpl(Isolate* isolate, FixedArrayBase store, + uint32_t entry) { + DisallowHeapAllocation no_gc; + NumberDictionary dict = NumberDictionary::cast(store); + Object index = dict.KeyAt(entry); + return !index.IsTheHole(isolate); + } + + static uint32_t GetIndexForEntryImpl(FixedArrayBase store, uint32_t entry) { + DisallowHeapAllocation no_gc; + NumberDictionary dict = NumberDictionary::cast(store); + uint32_t result = 0; + CHECK(dict.KeyAt(entry).ToArrayIndex(&result)); + return result; + } + + static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder, + FixedArrayBase store, uint32_t index, + PropertyFilter filter) { + DisallowHeapAllocation no_gc; + NumberDictionary dictionary = NumberDictionary::cast(store); + int entry = dictionary.FindEntry(isolate, index); + if (entry == NumberDictionary::kNotFound) return kMaxUInt32; + if (filter != ALL_PROPERTIES) { + PropertyDetails details = dictionary.DetailsAt(entry); + PropertyAttributes attr = details.attributes(); + if ((attr & filter) != 0) return kMaxUInt32; + } + return static_cast(entry); + } + + static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) { + return GetDetailsImpl(holder.elements(), entry); + } + + static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store, + uint32_t entry) { + return NumberDictionary::cast(backing_store).DetailsAt(entry); + } + + static uint32_t FilterKey(Handle dictionary, int entry, + Object raw_key, PropertyFilter filter) { + DCHECK(raw_key.IsNumber()); + DCHECK_LE(raw_key.Number(), kMaxUInt32); + PropertyDetails details = dictionary->DetailsAt(entry); + PropertyAttributes attr = details.attributes(); + if ((attr & filter) != 0) return kMaxUInt32; + return static_cast(raw_key.Number()); + } + + static uint32_t GetKeyForEntryImpl(Isolate* isolate, + Handle dictionary, + int entry, PropertyFilter filter) { + DisallowHeapAllocation no_gc; + Object raw_key = dictionary->KeyAt(entry); + if (!dictionary->IsKey(ReadOnlyRoots(isolate), raw_key)) return kMaxUInt32; + return FilterKey(dictionary, entry, raw_key, filter); + } + + static void CollectElementIndicesImpl(Handle object, + Handle backing_store, + KeyAccumulator* keys) { + if (keys->filter() & SKIP_STRINGS) return; + Isolate* isolate = keys->isolate(); + Handle dictionary = + Handle::cast(backing_store); + int capacity = dictionary->Capacity(); + Handle elements = isolate->factory()->NewFixedArray( + GetMaxNumberOfEntries(*object, *backing_store)); + int insertion_index = 0; + PropertyFilter filter = keys->filter(); + ReadOnlyRoots roots(isolate); + for (int i = 0; i < capacity; i++) { + Object raw_key = dictionary->KeyAt(i); + if (!dictionary->IsKey(roots, raw_key)) continue; + uint32_t key = FilterKey(dictionary, i, raw_key, filter); + if (key == kMaxUInt32) { + keys->AddShadowingKey(raw_key); + continue; + } + elements->set(insertion_index, raw_key); + insertion_index++; + } + SortIndices(isolate, elements, insertion_index); + for (int i = 0; i < insertion_index; i++) { + keys->AddKey(elements->get(i)); + } + } + + static Handle DirectCollectElementIndicesImpl( + Isolate* isolate, Handle object, + Handle backing_store, GetKeysConversion convert, + PropertyFilter filter, Handle list, uint32_t* nof_indices, + uint32_t insertion_index = 0) { + if (filter & SKIP_STRINGS) return list; + if (filter & ONLY_ALL_CAN_READ) return list; + + Handle dictionary = + Handle::cast(backing_store); + uint32_t capacity = dictionary->Capacity(); + for (uint32_t i = 0; i < capacity; i++) { + uint32_t key = GetKeyForEntryImpl(isolate, dictionary, i, filter); + if (key == kMaxUInt32) continue; + Handle index = isolate->factory()->NewNumberFromUint(key); + list->set(insertion_index, *index); + insertion_index++; + } + *nof_indices = insertion_index; + return list; + } + + static void AddElementsToKeyAccumulatorImpl(Handle receiver, + KeyAccumulator* accumulator, + AddKeyConversion convert) { + Isolate* isolate = accumulator->isolate(); + Handle dictionary( + NumberDictionary::cast(receiver->elements()), isolate); + int capacity = dictionary->Capacity(); + ReadOnlyRoots roots(isolate); + for (int i = 0; i < capacity; i++) { + Object k = dictionary->KeyAt(i); + if (!dictionary->IsKey(roots, k)) continue; + Object value = dictionary->ValueAt(i); + DCHECK(!value.IsTheHole(isolate)); + DCHECK(!value.IsAccessorPair()); + DCHECK(!value.IsAccessorInfo()); + accumulator->AddKey(value, convert); + } + } + + static bool IncludesValueFastPath(Isolate* isolate, Handle receiver, + Handle value, uint32_t start_from, + uint32_t length, Maybe* result) { + DisallowHeapAllocation no_gc; + NumberDictionary dictionary = NumberDictionary::cast(receiver->elements()); + int capacity = dictionary.Capacity(); + Object the_hole = ReadOnlyRoots(isolate).the_hole_value(); + Object undefined = ReadOnlyRoots(isolate).undefined_value(); + + // Scan for accessor properties. If accessors are present, then elements + // must be accessed in order via the slow path. + bool found = false; + for (int i = 0; i < capacity; ++i) { + Object k = dictionary.KeyAt(i); + if (k == the_hole) continue; + if (k == undefined) continue; + + uint32_t index; + if (!k.ToArrayIndex(&index) || index < start_from || index >= length) { + continue; + } + + if (dictionary.DetailsAt(i).kind() == kAccessor) { + // Restart from beginning in slow path, otherwise we may observably + // access getters out of order + return false; + } else if (!found) { + Object element_k = dictionary.ValueAt(i); + if (value->SameValueZero(element_k)) found = true; + } + } + + *result = Just(found); + return true; + } + + static Maybe IncludesValueImpl(Isolate* isolate, + Handle receiver, + Handle value, + uint32_t start_from, uint32_t length) { + DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver)); + bool search_for_hole = value->IsUndefined(isolate); + + if (!search_for_hole) { + Maybe result = Nothing(); + if (DictionaryElementsAccessor::IncludesValueFastPath( + isolate, receiver, value, start_from, length, &result)) { + return result; + } + } + ElementsKind original_elements_kind = receiver->GetElementsKind(); + USE(original_elements_kind); + Handle dictionary( + NumberDictionary::cast(receiver->elements()), isolate); + // Iterate through entire range, as accessing elements out of order is + // observable + for (uint32_t k = start_from; k < length; ++k) { + DCHECK_EQ(receiver->GetElementsKind(), original_elements_kind); + int entry = dictionary->FindEntry(isolate, k); + if (entry == NumberDictionary::kNotFound) { + if (search_for_hole) return Just(true); + continue; + } + + PropertyDetails details = GetDetailsImpl(*dictionary, entry); + switch (details.kind()) { + case kData: { + Object element_k = dictionary->ValueAt(entry); + if (value->SameValueZero(element_k)) return Just(true); + break; + } + case kAccessor: { + LookupIterator it(isolate, receiver, k, + LookupIterator::OWN_SKIP_INTERCEPTOR); + DCHECK(it.IsFound()); + DCHECK_EQ(it.state(), LookupIterator::ACCESSOR); + Handle element_k; + + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k, + Object::GetPropertyWithAccessor(&it), + Nothing()); + + if (value->SameValueZero(*element_k)) return Just(true); + + // Bailout to slow path if elements on prototype changed + if (!JSObject::PrototypeHasNoElements(isolate, *receiver)) { + return IncludesValueSlowPath(isolate, receiver, value, k + 1, + length); + } + + // Continue if elements unchanged + if (*dictionary == receiver->elements()) continue; + + // Otherwise, bailout or update elements + + // If switched to initial elements, return true if searching for + // undefined, and false otherwise. + if (receiver->map().GetInitialElements() == receiver->elements()) { + return Just(search_for_hole); + } + + // If switched to fast elements, continue with the correct accessor. + if (receiver->GetElementsKind() != DICTIONARY_ELEMENTS) { + ElementsAccessor* accessor = receiver->GetElementsAccessor(); + return accessor->IncludesValue(isolate, receiver, value, k + 1, + length); + } + dictionary = + handle(NumberDictionary::cast(receiver->elements()), isolate); + break; + } + } + } + return Just(false); + } + + static Maybe IndexOfValueImpl(Isolate* isolate, + Handle receiver, + Handle value, + uint32_t start_from, uint32_t length) { + DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver)); + + ElementsKind original_elements_kind = receiver->GetElementsKind(); + USE(original_elements_kind); + Handle dictionary( + NumberDictionary::cast(receiver->elements()), isolate); + // Iterate through entire range, as accessing elements out of order is + // observable. + for (uint32_t k = start_from; k < length; ++k) { + DCHECK_EQ(receiver->GetElementsKind(), original_elements_kind); + int entry = dictionary->FindEntry(isolate, k); + if (entry == NumberDictionary::kNotFound) continue; + + PropertyDetails details = GetDetailsImpl(*dictionary, entry); + switch (details.kind()) { + case kData: { + Object element_k = dictionary->ValueAt(entry); + if (value->StrictEquals(element_k)) { + return Just(k); + } + break; + } + case kAccessor: { + LookupIterator it(isolate, receiver, k, + LookupIterator::OWN_SKIP_INTERCEPTOR); + DCHECK(it.IsFound()); + DCHECK_EQ(it.state(), LookupIterator::ACCESSOR); + Handle element_k; + + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k, + Object::GetPropertyWithAccessor(&it), + Nothing()); + + if (value->StrictEquals(*element_k)) return Just(k); + + // Bailout to slow path if elements on prototype changed. + if (!JSObject::PrototypeHasNoElements(isolate, *receiver)) { + return IndexOfValueSlowPath(isolate, receiver, value, k + 1, + length); + } + + // Continue if elements unchanged. + if (*dictionary == receiver->elements()) continue; + + // Otherwise, bailout or update elements. + if (receiver->GetElementsKind() != DICTIONARY_ELEMENTS) { + // Otherwise, switch to slow path. + return IndexOfValueSlowPath(isolate, receiver, value, k + 1, + length); + } + dictionary = + handle(NumberDictionary::cast(receiver->elements()), isolate); + break; + } + } + } + return Just(-1); + } + + static void ValidateContents(JSObject holder, int length) { + DisallowHeapAllocation no_gc; +#if DEBUG + DCHECK_EQ(holder.map().elements_kind(), DICTIONARY_ELEMENTS); + if (!FLAG_enable_slow_asserts) return; + ReadOnlyRoots roots = holder.GetReadOnlyRoots(); + NumberDictionary dictionary = NumberDictionary::cast(holder.elements()); + // Validate the requires_slow_elements and max_number_key values. + int capacity = dictionary.Capacity(); + bool requires_slow_elements = false; + int max_key = 0; + for (int i = 0; i < capacity; ++i) { + Object k; + if (!dictionary.ToKey(roots, i, &k)) continue; + DCHECK_LE(0.0, k.Number()); + if (k.Number() > NumberDictionary::kRequiresSlowElementsLimit) { + requires_slow_elements = true; + } else { + max_key = Max(max_key, Smi::ToInt(k)); + } + } + if (requires_slow_elements) { + DCHECK(dictionary.requires_slow_elements()); + } else if (!dictionary.requires_slow_elements()) { + DCHECK_LE(max_key, dictionary.max_number_key()); + } +#endif + } +}; + +// Super class for all fast element arrays. +template +class FastElementsAccessor : public ElementsAccessorBase { + public: + using BackingStore = typename KindTraits::BackingStore; + + static Handle NormalizeImpl(Handle object, + Handle store) { + Isolate* isolate = object->GetIsolate(); + ElementsKind kind = Subclass::kind(); + + // Ensure that notifications fire if the array or object prototypes are + // normalizing. + if (IsSmiOrObjectElementsKind(kind) || + kind == FAST_STRING_WRAPPER_ELEMENTS) { + isolate->UpdateNoElementsProtectorOnNormalizeElements(object); + } + + int capacity = object->GetFastElementsUsage(); + Handle dictionary = + NumberDictionary::New(isolate, capacity); + + PropertyDetails details = PropertyDetails::Empty(); + int j = 0; + int max_number_key = -1; + for (int i = 0; j < capacity; i++) { + if (IsHoleyElementsKindForRead(kind)) { + if (BackingStore::cast(*store).is_the_hole(isolate, i)) continue; + } + max_number_key = i; + Handle value = Subclass::GetImpl(isolate, *store, i); + dictionary = + NumberDictionary::Add(isolate, dictionary, i, value, details); + j++; + } + + if (max_number_key > 0) { + dictionary->UpdateMaxNumberKey(static_cast(max_number_key), + object); + } + return dictionary; + } + + static void DeleteAtEnd(Handle obj, + Handle backing_store, uint32_t entry) { + uint32_t length = static_cast(backing_store->length()); + Isolate* isolate = obj->GetIsolate(); + for (; entry > 0; entry--) { + if (!backing_store->is_the_hole(isolate, entry - 1)) break; + } + if (entry == 0) { + FixedArray empty = ReadOnlyRoots(isolate).empty_fixed_array(); + // Dynamically ask for the elements kind here since we manually redirect + // the operations for argument backing stores. + if (obj->GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) { + SloppyArgumentsElements::cast(obj->elements()).set_arguments(empty); + } else { + obj->set_elements(empty); + } + return; + } + + isolate->heap()->RightTrimFixedArray(*backing_store, length - entry); + } + + static void DeleteCommon(Handle obj, uint32_t entry, + Handle store) { + DCHECK(obj->HasSmiOrObjectElements() || obj->HasDoubleElements() || + obj->HasFastArgumentsElements() || + obj->HasFastStringWrapperElements()); + Handle backing_store = Handle::cast(store); + if (!obj->IsJSArray() && + entry == static_cast(store->length()) - 1) { + DeleteAtEnd(obj, backing_store, entry); + return; + } + + Isolate* isolate = obj->GetIsolate(); + backing_store->set_the_hole(isolate, entry); + + // TODO(verwaest): Move this out of elements.cc. + // If an old space backing store is larger than a certain size and + // has too few used values, normalize it. + const int kMinLengthForSparsenessCheck = 64; + if (backing_store->length() < kMinLengthForSparsenessCheck) return; + // TODO(ulan): Check if it works with young large objects. + if (ObjectInYoungGeneration(*backing_store)) return; + uint32_t length = 0; + if (obj->IsJSArray()) { + JSArray::cast(*obj).length().ToArrayLength(&length); + } else { + length = static_cast(store->length()); + } + + // To avoid doing the check on every delete, use a counter-based heuristic. + const int kLengthFraction = 16; + // The above constant must be large enough to ensure that we check for + // normalization frequently enough. At a minimum, it should be large + // enough to reliably hit the "window" of remaining elements count where + // normalization would be beneficial. + STATIC_ASSERT(kLengthFraction >= + NumberDictionary::kEntrySize * + NumberDictionary::kPreferFastElementsSizeFactor); + size_t current_counter = isolate->elements_deletion_counter(); + if (current_counter < length / kLengthFraction) { + isolate->set_elements_deletion_counter(current_counter + 1); + return; + } + // Reset the counter whenever the full check is performed. + isolate->set_elements_deletion_counter(0); + + if (!obj->IsJSArray()) { + uint32_t i; + for (i = entry + 1; i < length; i++) { + if (!backing_store->is_the_hole(isolate, i)) break; + } + if (i == length) { + DeleteAtEnd(obj, backing_store, entry); + return; + } + } + int num_used = 0; + for (int i = 0; i < backing_store->length(); ++i) { + if (!backing_store->is_the_hole(isolate, i)) { + ++num_used; + // Bail out if a number dictionary wouldn't be able to save much space. + if (NumberDictionary::kPreferFastElementsSizeFactor * + NumberDictionary::ComputeCapacity(num_used) * + NumberDictionary::kEntrySize > + static_cast(backing_store->length())) { + return; + } + } + } + JSObject::NormalizeElements(obj); + } + + static void ReconfigureImpl(Handle object, + Handle store, uint32_t entry, + Handle value, + PropertyAttributes attributes) { + Handle dictionary = JSObject::NormalizeElements(object); + entry = dictionary->FindEntry(object->GetIsolate(), entry); + DictionaryElementsAccessor::ReconfigureImpl(object, dictionary, entry, + value, attributes); + } + + static void AddImpl(Handle object, uint32_t index, + Handle value, PropertyAttributes attributes, + uint32_t new_capacity) { + DCHECK_EQ(NONE, attributes); + ElementsKind from_kind = object->GetElementsKind(); + ElementsKind to_kind = Subclass::kind(); + if (IsDictionaryElementsKind(from_kind) || + IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(to_kind) || + Subclass::GetCapacityImpl(*object, object->elements()) != + new_capacity) { + Subclass::GrowCapacityAndConvertImpl(object, new_capacity); + } else { + if (IsFastElementsKind(from_kind) && from_kind != to_kind) { + JSObject::TransitionElementsKind(object, to_kind); + } + if (IsSmiOrObjectElementsKind(from_kind)) { + DCHECK(IsSmiOrObjectElementsKind(to_kind)); + JSObject::EnsureWritableFastElements(object); + } + } + Subclass::SetImpl(object, index, *value); + } + + static void DeleteImpl(Handle obj, uint32_t entry) { + ElementsKind kind = KindTraits::Kind; + if (IsFastPackedElementsKind(kind)) { + JSObject::TransitionElementsKind(obj, GetHoleyElementsKind(kind)); + } + if (IsSmiOrObjectElementsKind(KindTraits::Kind)) { + JSObject::EnsureWritableFastElements(obj); + } + DeleteCommon(obj, entry, handle(obj->elements(), obj->GetIsolate())); + } + + static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store, + uint32_t entry) { + return !BackingStore::cast(backing_store).is_the_hole(isolate, entry); + } + + static uint32_t NumberOfElementsImpl(JSObject receiver, + FixedArrayBase backing_store) { + uint32_t max_index = Subclass::GetMaxIndex(receiver, backing_store); + if (IsFastPackedElementsKind(Subclass::kind())) return max_index; + Isolate* isolate = receiver.GetIsolate(); + uint32_t count = 0; + for (uint32_t i = 0; i < max_index; i++) { + if (Subclass::HasEntryImpl(isolate, backing_store, i)) count++; + } + return count; + } + + static void AddElementsToKeyAccumulatorImpl(Handle receiver, + KeyAccumulator* accumulator, + AddKeyConversion convert) { + Isolate* isolate = accumulator->isolate(); + Handle elements(receiver->elements(), isolate); + uint32_t length = Subclass::GetMaxNumberOfEntries(*receiver, *elements); + for (uint32_t i = 0; i < length; i++) { + if (IsFastPackedElementsKind(KindTraits::Kind) || + HasEntryImpl(isolate, *elements, i)) { + accumulator->AddKey(Subclass::GetImpl(isolate, *elements, i), convert); + } + } + } + + static void ValidateContents(JSObject holder, int length) { +#if DEBUG + Isolate* isolate = holder.GetIsolate(); + Heap* heap = isolate->heap(); + FixedArrayBase elements = holder.elements(); + Map map = elements.map(); + if (IsSmiOrObjectElementsKind(KindTraits::Kind)) { + DCHECK_NE(map, ReadOnlyRoots(heap).fixed_double_array_map()); + } else if (IsDoubleElementsKind(KindTraits::Kind)) { + DCHECK_NE(map, ReadOnlyRoots(heap).fixed_cow_array_map()); + if (map == ReadOnlyRoots(heap).fixed_array_map()) DCHECK_EQ(0, length); + } else { + UNREACHABLE(); + } + if (length == 0) return; // nothing to do! +#if ENABLE_SLOW_DCHECKS + DisallowHeapAllocation no_gc; + BackingStore backing_store = BackingStore::cast(elements); + if (IsSmiElementsKind(KindTraits::Kind)) { + HandleScope scope(isolate); + for (int i = 0; i < length; i++) { + DCHECK(BackingStore::get(backing_store, i, isolate)->IsSmi() || + (IsHoleyElementsKind(KindTraits::Kind) && + backing_store.is_the_hole(isolate, i))); + } + } else if (KindTraits::Kind == PACKED_ELEMENTS || + KindTraits::Kind == PACKED_DOUBLE_ELEMENTS) { + for (int i = 0; i < length; i++) { + DCHECK(!backing_store.is_the_hole(isolate, i)); + } + } else { + DCHECK(IsHoleyElementsKind(KindTraits::Kind)); + } +#endif +#endif + } + + static Handle PopImpl(Handle receiver) { + return Subclass::RemoveElement(receiver, AT_END); + } + + static Handle ShiftImpl(Handle receiver) { + return Subclass::RemoveElement(receiver, AT_START); + } + + static uint32_t PushImpl(Handle receiver, Arguments* args, + uint32_t push_size) { + Handle backing_store(receiver->elements(), + receiver->GetIsolate()); + return Subclass::AddArguments(receiver, backing_store, args, push_size, + AT_END); + } + + static uint32_t UnshiftImpl(Handle receiver, Arguments* args, + uint32_t unshift_size) { + Handle backing_store(receiver->elements(), + receiver->GetIsolate()); + return Subclass::AddArguments(receiver, backing_store, args, unshift_size, + AT_START); + } + + static void MoveElements(Isolate* isolate, Handle receiver, + Handle backing_store, int dst_index, + int src_index, int len, int hole_start, + int hole_end) { + Handle dst_elms = Handle::cast(backing_store); + if (len > JSArray::kMaxCopyElements && dst_index == 0 && + isolate->heap()->CanMoveObjectStart(*dst_elms)) { + // Update all the copies of this backing_store handle. + *dst_elms.location() = + BackingStore::cast( + isolate->heap()->LeftTrimFixedArray(*dst_elms, src_index)) + .ptr(); + receiver->set_elements(*dst_elms); + // Adjust the hole offset as the array has been shrunk. + hole_end -= src_index; + DCHECK_LE(hole_start, backing_store->length()); + DCHECK_LE(hole_end, backing_store->length()); + } else if (len != 0) { + WriteBarrierMode mode = GetWriteBarrierMode(KindTraits::Kind); + dst_elms->MoveElements(isolate, dst_index, src_index, len, mode); + } + if (hole_start != hole_end) { + dst_elms->FillWithHoles(hole_start, hole_end); + } + } + + static Object FillImpl(Handle receiver, Handle obj_value, + uint32_t start, uint32_t end) { + // Ensure indexes are within array bounds + DCHECK_LE(0, start); + DCHECK_LE(start, end); + + // Make sure COW arrays are copied. + if (IsSmiOrObjectElementsKind(Subclass::kind())) { + JSObject::EnsureWritableFastElements(receiver); + } + + // Make sure we have enough space. + uint32_t capacity = + Subclass::GetCapacityImpl(*receiver, receiver->elements()); + if (end > capacity) { + Subclass::GrowCapacityAndConvertImpl(receiver, end); + CHECK_EQ(Subclass::kind(), receiver->GetElementsKind()); + } + DCHECK_LE(end, Subclass::GetCapacityImpl(*receiver, receiver->elements())); + + for (uint32_t index = start; index < end; ++index) { + Subclass::SetImpl(receiver, index, *obj_value); + } + return *receiver; + } + + static Maybe IncludesValueImpl(Isolate* isolate, + Handle receiver, + Handle search_value, + uint32_t start_from, uint32_t length) { + DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver)); + DisallowHeapAllocation no_gc; + FixedArrayBase elements_base = receiver->elements(); + Object the_hole = ReadOnlyRoots(isolate).the_hole_value(); + Object undefined = ReadOnlyRoots(isolate).undefined_value(); + Object value = *search_value; + + if (start_from >= length) return Just(false); + + // Elements beyond the capacity of the backing store treated as undefined. + uint32_t elements_length = static_cast(elements_base.length()); + if (value == undefined && elements_length < length) return Just(true); + if (elements_length == 0) { + DCHECK_NE(value, undefined); + return Just(false); + } + + length = std::min(elements_length, length); + + if (!value.IsNumber()) { + if (value == undefined) { + // Search for `undefined` or The Hole. Even in the case of + // PACKED_DOUBLE_ELEMENTS or PACKED_SMI_ELEMENTS, we might encounter The + // Hole here, since the {length} used here can be larger than + // JSArray::length. + if (IsSmiOrObjectElementsKind(Subclass::kind()) || + IsFrozenOrSealedElementsKind(Subclass::kind())) { + auto elements = FixedArray::cast(receiver->elements()); + + for (uint32_t k = start_from; k < length; ++k) { + Object element_k = elements.get(k); + + if (element_k == the_hole || element_k == undefined) { + return Just(true); + } + } + return Just(false); + } else { + // Search for The Hole in HOLEY_DOUBLE_ELEMENTS or + // PACKED_DOUBLE_ELEMENTS. + DCHECK(IsDoubleElementsKind(Subclass::kind())); + auto elements = FixedDoubleArray::cast(receiver->elements()); + + for (uint32_t k = start_from; k < length; ++k) { + if (elements.is_the_hole(k)) { + return Just(true); + } + } + return Just(false); + } + } else if (!IsObjectElementsKind(Subclass::kind()) && + !IsFrozenOrSealedElementsKind(Subclass::kind())) { + // Search for non-number, non-Undefined value, with either + // PACKED_SMI_ELEMENTS, PACKED_DOUBLE_ELEMENTS, HOLEY_SMI_ELEMENTS or + // HOLEY_DOUBLE_ELEMENTS. Guaranteed to return false, since these + // elements kinds can only contain Number values or undefined. + return Just(false); + } else { + // Search for non-number, non-Undefined value with either + // PACKED_ELEMENTS or HOLEY_ELEMENTS. + DCHECK(IsObjectElementsKind(Subclass::kind()) || + IsFrozenOrSealedElementsKind(Subclass::kind())); + auto elements = FixedArray::cast(receiver->elements()); + + for (uint32_t k = start_from; k < length; ++k) { + Object element_k = elements.get(k); + if (element_k == the_hole) { + continue; + } + + if (value.SameValueZero(element_k)) return Just(true); + } + return Just(false); + } + } else { + if (!value.IsNaN()) { + double search_value = value.Number(); + if (IsDoubleElementsKind(Subclass::kind())) { + // Search for non-NaN Number in PACKED_DOUBLE_ELEMENTS or + // HOLEY_DOUBLE_ELEMENTS --- Skip TheHole, and trust UCOMISD or + // similar operation for result. + auto elements = FixedDoubleArray::cast(receiver->elements()); + + for (uint32_t k = start_from; k < length; ++k) { + if (elements.is_the_hole(k)) { + continue; + } + if (elements.get_scalar(k) == search_value) return Just(true); + } + return Just(false); + } else { + // Search for non-NaN Number in PACKED_ELEMENTS, HOLEY_ELEMENTS, + // PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS --- Skip non-Numbers, + // and trust UCOMISD or similar operation for result + auto elements = FixedArray::cast(receiver->elements()); + + for (uint32_t k = start_from; k < length; ++k) { + Object element_k = elements.get(k); + if (element_k.IsNumber() && element_k.Number() == search_value) { + return Just(true); + } + } + return Just(false); + } + } else { + // Search for NaN --- NaN cannot be represented with Smi elements, so + // abort if ElementsKind is PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS + if (IsSmiElementsKind(Subclass::kind())) return Just(false); + + if (IsDoubleElementsKind(Subclass::kind())) { + // Search for NaN in PACKED_DOUBLE_ELEMENTS or + // HOLEY_DOUBLE_ELEMENTS --- Skip The Hole and trust + // std::isnan(elementK) for result + auto elements = FixedDoubleArray::cast(receiver->elements()); + + for (uint32_t k = start_from; k < length; ++k) { + if (elements.is_the_hole(k)) { + continue; + } + if (std::isnan(elements.get_scalar(k))) return Just(true); + } + return Just(false); + } else { + // Search for NaN in PACKED_ELEMENTS, HOLEY_ELEMENTS, + // PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS. Return true if + // elementK->IsHeapNumber() && std::isnan(elementK->Number()) + DCHECK(IsSmiOrObjectElementsKind(Subclass::kind()) || + IsFrozenOrSealedElementsKind(Subclass::kind())); + auto elements = FixedArray::cast(receiver->elements()); + + for (uint32_t k = start_from; k < length; ++k) { + if (elements.get(k).IsNaN()) return Just(true); + } + return Just(false); + } + } + } + } + + static Handle CreateListFromArrayLikeImpl(Isolate* isolate, + Handle object, + uint32_t length) { + Handle result = isolate->factory()->NewFixedArray(length); + Handle elements(object->elements(), isolate); + for (uint32_t i = 0; i < length; i++) { + if (!Subclass::HasElementImpl(isolate, *object, i, *elements)) continue; + Handle value; + value = Subclass::GetImpl(isolate, *elements, i); + if (value->IsName()) { + value = isolate->factory()->InternalizeName(Handle::cast(value)); + } + result->set(i, *value); + } + return result; + } + + static Handle RemoveElement(Handle receiver, + Where remove_position) { + Isolate* isolate = receiver->GetIsolate(); + ElementsKind kind = KindTraits::Kind; + if (IsSmiOrObjectElementsKind(kind)) { + HandleScope scope(isolate); + JSObject::EnsureWritableFastElements(receiver); + } + Handle backing_store(receiver->elements(), isolate); + uint32_t length = static_cast(Smi::ToInt(receiver->length())); + DCHECK_GT(length, 0); + int new_length = length - 1; + int remove_index = remove_position == AT_START ? 0 : new_length; + Handle result = + Subclass::GetImpl(isolate, *backing_store, remove_index); + if (remove_position == AT_START) { + Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length, + 0, 0); + } + Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store); + + if (IsHoleyElementsKind(kind) && result->IsTheHole(isolate)) { + return isolate->factory()->undefined_value(); + } + return result; + } + + static uint32_t AddArguments(Handle receiver, + Handle backing_store, + Arguments* args, uint32_t add_size, + Where add_position) { + uint32_t length = Smi::ToInt(receiver->length()); + DCHECK_LT(0, add_size); + uint32_t elms_len = backing_store->length(); + // Check we do not overflow the new_length. + DCHECK(add_size <= static_cast(Smi::kMaxValue - length)); + uint32_t new_length = length + add_size; + + if (new_length > elms_len) { + // New backing storage is needed. + uint32_t capacity = JSObject::NewElementsCapacity(new_length); + // If we add arguments to the start we have to shift the existing objects. + int copy_dst_index = add_position == AT_START ? add_size : 0; + // Copy over all objects to a new backing_store. + backing_store = Subclass::ConvertElementsWithCapacity( + receiver, backing_store, KindTraits::Kind, capacity, 0, + copy_dst_index, ElementsAccessor::kCopyToEndAndInitializeToHole); + receiver->set_elements(*backing_store); + } else if (add_position == AT_START) { + // If the backing store has enough capacity and we add elements to the + // start we have to shift the existing objects. + Isolate* isolate = receiver->GetIsolate(); + Subclass::MoveElements(isolate, receiver, backing_store, add_size, 0, + length, 0, 0); + } + + int insertion_index = add_position == AT_START ? 0 : length; + // Copy the arguments to the start. + Subclass::CopyArguments(args, backing_store, add_size, 1, insertion_index); + // Set the length. + receiver->set_length(Smi::FromInt(new_length)); + return new_length; + } + + static void CopyArguments(Arguments* args, Handle dst_store, + uint32_t copy_size, uint32_t src_index, + uint32_t dst_index) { + // Add the provided values. + DisallowHeapAllocation no_gc; + FixedArrayBase raw_backing_store = *dst_store; + WriteBarrierMode mode = raw_backing_store.GetWriteBarrierMode(no_gc); + for (uint32_t i = 0; i < copy_size; i++) { + Object argument = (*args)[src_index + i]; + DCHECK(!argument.IsTheHole()); + Subclass::SetImpl(raw_backing_store, dst_index + i, argument, mode); + } + } +}; + +template +class FastSmiOrObjectElementsAccessor + : public FastElementsAccessor { + public: + static inline void SetImpl(Handle holder, uint32_t entry, + Object value) { + SetImpl(holder->elements(), entry, value); + } + + static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + Object value) { + FixedArray::cast(backing_store).set(entry, value); + } + + static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + Object value, WriteBarrierMode mode) { + FixedArray::cast(backing_store).set(entry, value, mode); + } + + static Object GetRaw(FixedArray backing_store, uint32_t entry) { + uint32_t index = Subclass::GetIndexForEntryImpl(backing_store, entry); + return backing_store.get(index); + } + + // NOTE: this method violates the handlified function signature convention: + // raw pointer parameters in the function that allocates. + // See ElementsAccessor::CopyElements() for details. + // This method could actually allocate if copying from double elements to + // object elements. + static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from, + uint32_t from_start, FixedArrayBase to, + ElementsKind from_kind, uint32_t to_start, + int packed_size, int copy_size) { + DisallowHeapAllocation no_gc; + ElementsKind to_kind = KindTraits::Kind; + switch (from_kind) { + case PACKED_SMI_ELEMENTS: + case HOLEY_SMI_ELEMENTS: + case PACKED_ELEMENTS: + case PACKED_FROZEN_ELEMENTS: + case PACKED_SEALED_ELEMENTS: + case HOLEY_ELEMENTS: + case HOLEY_FROZEN_ELEMENTS: + case HOLEY_SEALED_ELEMENTS: + CopyObjectToObjectElements(isolate, from, from_kind, from_start, to, + to_kind, to_start, copy_size); + break; + case PACKED_DOUBLE_ELEMENTS: + case HOLEY_DOUBLE_ELEMENTS: { + AllowHeapAllocation allow_allocation; + DCHECK(IsObjectElementsKind(to_kind)); + CopyDoubleToObjectElements(isolate, from, from_start, to, to_start, + copy_size); + break; + } + case DICTIONARY_ELEMENTS: + CopyDictionaryToObjectElements(isolate, from, from_start, to, to_kind, + to_start, copy_size); + break; + case FAST_SLOPPY_ARGUMENTS_ELEMENTS: + case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: + case FAST_STRING_WRAPPER_ELEMENTS: + case SLOW_STRING_WRAPPER_ELEMENTS: +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS: + TYPED_ARRAYS(TYPED_ARRAY_CASE) +#undef TYPED_ARRAY_CASE + // This function is currently only used for JSArrays with non-zero + // length. + UNREACHABLE(); + case NO_ELEMENTS: + break; // Nothing to do. + } + } + + static Maybe CollectValuesOrEntriesImpl( + Isolate* isolate, Handle object, + Handle values_or_entries, bool get_entries, int* nof_items, + PropertyFilter filter) { + int count = 0; + if (get_entries) { + // Collecting entries needs to allocate, so this code must be handlified. + Handle elements(FixedArray::cast(object->elements()), + isolate); + uint32_t length = elements->length(); + for (uint32_t index = 0; index < length; ++index) { + if (!Subclass::HasEntryImpl(isolate, *elements, index)) continue; + Handle value = Subclass::GetImpl(isolate, *elements, index); + value = MakeEntryPair(isolate, index, value); + values_or_entries->set(count++, *value); + } + } else { + // No allocations here, so we can avoid handlification overhead. + DisallowHeapAllocation no_gc; + FixedArray elements = FixedArray::cast(object->elements()); + uint32_t length = elements.length(); + for (uint32_t index = 0; index < length; ++index) { + if (!Subclass::HasEntryImpl(isolate, elements, index)) continue; + Object value = GetRaw(elements, index); + values_or_entries->set(count++, value); + } + } + *nof_items = count; + return Just(true); + } + + static Maybe IndexOfValueImpl(Isolate* isolate, + Handle receiver, + Handle search_value, + uint32_t start_from, uint32_t length) { + DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver)); + DisallowHeapAllocation no_gc; + FixedArrayBase elements_base = receiver->elements(); + Object value = *search_value; + + if (start_from >= length) return Just(-1); + + length = std::min(static_cast(elements_base.length()), length); + + // Only FAST_{,HOLEY_}ELEMENTS can store non-numbers. + if (!value.IsNumber() && !IsObjectElementsKind(Subclass::kind()) && + !IsFrozenOrSealedElementsKind(Subclass::kind())) { + return Just(-1); + } + // NaN can never be found by strict equality. + if (value.IsNaN()) return Just(-1); + + // k can be greater than receiver->length() below, but it is bounded by + // elements_base->length() so we never read out of bounds. This means that + // elements->get(k) can return the hole, for which the StrictEquals will + // always fail. + FixedArray elements = FixedArray::cast(receiver->elements()); + for (uint32_t k = start_from; k < length; ++k) { + if (value.StrictEquals(elements.get(k))) return Just(k); + } + return Just(-1); + } +}; + +class FastPackedSmiElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastPackedSmiElementsAccessor, + ElementsKindTraits> {}; + +class FastHoleySmiElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastHoleySmiElementsAccessor, + ElementsKindTraits> {}; + +class FastPackedObjectElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastPackedObjectElementsAccessor, + ElementsKindTraits> {}; + +template +class FastSealedObjectElementsAccessor + : public FastSmiOrObjectElementsAccessor { + public: + using BackingStore = typename KindTraits::BackingStore; + + static Handle RemoveElement(Handle receiver, + Where remove_position) { + UNREACHABLE(); + } + + static void DeleteImpl(Handle obj, uint32_t entry) { + UNREACHABLE(); + } + + static void DeleteAtEnd(Handle obj, + Handle backing_store, uint32_t entry) { + UNREACHABLE(); + } + + static void DeleteCommon(Handle obj, uint32_t entry, + Handle store) { + UNREACHABLE(); + } + + static Handle PopImpl(Handle receiver) { UNREACHABLE(); } + + static uint32_t PushImpl(Handle receiver, Arguments* args, + uint32_t push_size) { + UNREACHABLE(); + } + + static void AddImpl(Handle object, uint32_t index, + Handle value, PropertyAttributes attributes, + uint32_t new_capacity) { + UNREACHABLE(); + } + + static void SetLengthImpl(Isolate* isolate, Handle array, + uint32_t length, + Handle backing_store) { + uint32_t old_length = 0; + CHECK(array->length().ToArrayIndex(&old_length)); + if (length == old_length) { + // Do nothing. + return; + } + + // Transition to DICTIONARY_ELEMENTS. + // Convert to dictionary mode + Handle new_element_dictionary = + old_length == 0 ? isolate->factory()->empty_slow_element_dictionary() + : array->GetElementsAccessor()->Normalize(array); + + // Migrate map. + Handle new_map = Map::Copy(isolate, handle(array->map(), isolate), + "SlowCopyForSetLengthImpl"); + new_map->set_is_extensible(false); + new_map->set_elements_kind(DICTIONARY_ELEMENTS); + JSObject::MigrateToMap(array, new_map); + + if (!new_element_dictionary.is_null()) { + array->set_elements(*new_element_dictionary); + } + + if (array->elements() != + ReadOnlyRoots(isolate).empty_slow_element_dictionary()) { + Handle dictionary(array->element_dictionary(), isolate); + // Make sure we never go back to the fast case + array->RequireSlowElements(*dictionary); + JSObject::ApplyAttributesToDictionary(isolate, ReadOnlyRoots(isolate), + dictionary, + PropertyAttributes::SEALED); + } + + // Set length + Handle new_backing_store(array->elements(), isolate); + DictionaryElementsAccessor::SetLengthImpl(isolate, array, length, + new_backing_store); + } +}; + +class FastPackedSealedObjectElementsAccessor + : public FastSealedObjectElementsAccessor< + FastPackedSealedObjectElementsAccessor, + ElementsKindTraits> { +}; + +class FastHoleySealedObjectElementsAccessor + : public FastSealedObjectElementsAccessor< + FastHoleySealedObjectElementsAccessor, + ElementsKindTraits> { +}; + +template +class FastFrozenObjectElementsAccessor + : public FastSmiOrObjectElementsAccessor { + public: + using BackingStore = typename KindTraits::BackingStore; + + static inline void SetImpl(Handle holder, uint32_t entry, + Object value) { + UNREACHABLE(); + } + + static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + Object value) { + UNREACHABLE(); + } + + static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + Object value, WriteBarrierMode mode) { + UNREACHABLE(); + } + + static Handle RemoveElement(Handle receiver, + Where remove_position) { + UNREACHABLE(); + } + + static void DeleteImpl(Handle obj, uint32_t entry) { + UNREACHABLE(); + } + + static void DeleteAtEnd(Handle obj, + Handle backing_store, uint32_t entry) { + UNREACHABLE(); + } + + static void DeleteCommon(Handle obj, uint32_t entry, + Handle store) { + UNREACHABLE(); + } + + static Handle PopImpl(Handle receiver) { UNREACHABLE(); } + + static uint32_t PushImpl(Handle receiver, Arguments* args, + uint32_t push_size) { + UNREACHABLE(); + } + + static void AddImpl(Handle object, uint32_t index, + Handle value, PropertyAttributes attributes, + uint32_t new_capacity) { + UNREACHABLE(); + } + + static void SetLengthImpl(Isolate* isolate, Handle array, + uint32_t length, + Handle backing_store) { + UNREACHABLE(); + } + + static void ReconfigureImpl(Handle object, + Handle store, uint32_t entry, + Handle value, + PropertyAttributes attributes) { + UNREACHABLE(); + } +}; + +class FastPackedFrozenObjectElementsAccessor + : public FastFrozenObjectElementsAccessor< + FastPackedFrozenObjectElementsAccessor, + ElementsKindTraits> { +}; + +class FastHoleyFrozenObjectElementsAccessor + : public FastFrozenObjectElementsAccessor< + FastHoleyFrozenObjectElementsAccessor, + ElementsKindTraits> { +}; + +class FastHoleyObjectElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastHoleyObjectElementsAccessor, ElementsKindTraits> { +}; + +template +class FastDoubleElementsAccessor + : public FastElementsAccessor { + public: + static Handle GetImpl(Isolate* isolate, FixedArrayBase backing_store, + uint32_t entry) { + return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), entry, + isolate); + } + + static inline void SetImpl(Handle holder, uint32_t entry, + Object value) { + SetImpl(holder->elements(), entry, value); + } + + static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + Object value) { + FixedDoubleArray::cast(backing_store).set(entry, value.Number()); + } + + static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + Object value, WriteBarrierMode mode) { + FixedDoubleArray::cast(backing_store).set(entry, value.Number()); + } + + static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from, + uint32_t from_start, FixedArrayBase to, + ElementsKind from_kind, uint32_t to_start, + int packed_size, int copy_size) { + DisallowHeapAllocation no_allocation; + switch (from_kind) { + case PACKED_SMI_ELEMENTS: + CopyPackedSmiToDoubleElements(from, from_start, to, to_start, + packed_size, copy_size); + break; + case HOLEY_SMI_ELEMENTS: + CopySmiToDoubleElements(from, from_start, to, to_start, copy_size); + break; + case PACKED_DOUBLE_ELEMENTS: + case HOLEY_DOUBLE_ELEMENTS: + CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size); + break; + case PACKED_ELEMENTS: + case PACKED_FROZEN_ELEMENTS: + case PACKED_SEALED_ELEMENTS: + case HOLEY_ELEMENTS: + case HOLEY_FROZEN_ELEMENTS: + case HOLEY_SEALED_ELEMENTS: + CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size); + break; + case DICTIONARY_ELEMENTS: + CopyDictionaryToDoubleElements(isolate, from, from_start, to, to_start, + copy_size); + break; + case FAST_SLOPPY_ARGUMENTS_ELEMENTS: + case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: + case FAST_STRING_WRAPPER_ELEMENTS: + case SLOW_STRING_WRAPPER_ELEMENTS: + case NO_ELEMENTS: +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS: + TYPED_ARRAYS(TYPED_ARRAY_CASE) +#undef TYPED_ARRAY_CASE + // This function is currently only used for JSArrays with non-zero + // length. + UNREACHABLE(); + } + } + + static Maybe CollectValuesOrEntriesImpl( + Isolate* isolate, Handle object, + Handle values_or_entries, bool get_entries, int* nof_items, + PropertyFilter filter) { + Handle elements( + FixedDoubleArray::cast(object->elements()), isolate); + int count = 0; + uint32_t length = elements->length(); + for (uint32_t index = 0; index < length; ++index) { + if (!Subclass::HasEntryImpl(isolate, *elements, index)) continue; + Handle value = Subclass::GetImpl(isolate, *elements, index); + if (get_entries) { + value = MakeEntryPair(isolate, index, value); + } + values_or_entries->set(count++, *value); + } + *nof_items = count; + return Just(true); + } + + static Maybe IndexOfValueImpl(Isolate* isolate, + Handle receiver, + Handle search_value, + uint32_t start_from, uint32_t length) { + DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver)); + DisallowHeapAllocation no_gc; + FixedArrayBase elements_base = receiver->elements(); + Object value = *search_value; + + length = std::min(static_cast(elements_base.length()), length); + + if (start_from >= length) return Just(-1); + + if (!value.IsNumber()) { + return Just(-1); + } + if (value.IsNaN()) { + return Just(-1); + } + double numeric_search_value = value.Number(); + FixedDoubleArray elements = FixedDoubleArray::cast(receiver->elements()); + + for (uint32_t k = start_from; k < length; ++k) { + if (elements.is_the_hole(k)) { + continue; + } + if (elements.get_scalar(k) == numeric_search_value) { + return Just(k); + } + } + return Just(-1); + } +}; + +class FastPackedDoubleElementsAccessor + : public FastDoubleElementsAccessor< + FastPackedDoubleElementsAccessor, + ElementsKindTraits> {}; + +class FastHoleyDoubleElementsAccessor + : public FastDoubleElementsAccessor< + FastHoleyDoubleElementsAccessor, + ElementsKindTraits> {}; + +// Super class for all external element arrays. +template +class TypedElementsAccessor + : public ElementsAccessorBase, + ElementsKindTraits> { + public: + using BackingStore = typename ElementsKindTraits::BackingStore; + using AccessorClass = TypedElementsAccessor; + + // Conversions from (other) scalar values. + static ElementType FromScalar(int value) { + return static_cast(value); + } + static ElementType FromScalar(uint32_t value) { + return static_cast(value); + } + static ElementType FromScalar(double value) { + return FromScalar(DoubleToInt32(value)); + } + static ElementType FromScalar(int64_t value) { UNREACHABLE(); } + static ElementType FromScalar(uint64_t value) { UNREACHABLE(); } + + // Conversions from objects / handles. + static ElementType FromObject(Object value, bool* lossless = nullptr) { + if (value.IsSmi()) { + return FromScalar(Smi::ToInt(value)); + } else if (value.IsHeapNumber()) { + return FromScalar(HeapNumber::cast(value).value()); + } else { + // Clamp undefined here as well. All other types have been + // converted to a number type further up in the call chain. + DCHECK(value.IsUndefined()); + return FromScalar(Oddball::cast(value).to_number_raw()); + } + } + static ElementType FromHandle(Handle value, + bool* lossless = nullptr) { + return FromObject(*value, lossless); + } + + // Conversion of scalar value to handlified object. + static Handle ToHandle(Isolate* isolate, ElementType value); + + static void SetImpl(Handle holder, uint32_t entry, Object value) { + Handle typed_array = Handle::cast(holder); + DCHECK_LE(entry, typed_array->length()); + SetImpl(static_cast(typed_array->DataPtr()), entry, + FromObject(value)); + } + + static void SetImpl(ElementType* data_ptr, size_t entry, ElementType value) { + // The JavaScript memory model allows for racy reads and writes to a + // SharedArrayBuffer's backing store. ThreadSanitizer will catch these + // racy accesses and warn about them, so we disable TSAN for these reads + // and writes using annotations. + // + // We don't use relaxed atomics here, as it is not a requirement of the + // JavaScript memory model to have tear-free reads of overlapping accesses, + // and using relaxed atomics may introduce overhead. + TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) { + // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size + // fields (external pointers, doubles and BigInt data) are only + // kTaggedSize aligned so we have to use unaligned pointer friendly way of + // accessing them in order to avoid undefined behavior in C++ code. + WriteUnalignedValue( + reinterpret_cast
(data_ptr + entry), value); + } else { + data_ptr[entry] = value; + } + TSAN_ANNOTATE_IGNORE_WRITES_END; + } + + static Handle GetInternalImpl(Handle holder, + uint32_t entry) { + Handle typed_array = Handle::cast(holder); + Isolate* isolate = typed_array->GetIsolate(); + DCHECK_LE(entry, typed_array->length()); + DCHECK(!typed_array->WasDetached()); + ElementType elem = + GetImpl(static_cast(typed_array->DataPtr()), entry); + return ToHandle(isolate, elem); + } + + static Handle GetImpl(Isolate* isolate, FixedArrayBase backing_store, + uint32_t entry) { + UNREACHABLE(); + } + + static ElementType GetImpl(ElementType* data_ptr, size_t entry) { + // The JavaScript memory model allows for racy reads and writes to a + // SharedArrayBuffer's backing store. ThreadSanitizer will catch these + // racy accesses and warn about them, so we disable TSAN for these reads + // and writes using annotations. + // + // We don't use relaxed atomics here, as it is not a requirement of the + // JavaScript memory model to have tear-free reads of overlapping accesses, + // and using relaxed atomics may introduce overhead. + TSAN_ANNOTATE_IGNORE_READS_BEGIN; + ElementType result; + if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) { + // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size + // fields (external pointers, doubles and BigInt data) are only + // kTaggedSize aligned so we have to use unaligned pointer friendly way of + // accessing them in order to avoid undefined behavior in C++ code. + result = ReadUnalignedValue( + reinterpret_cast
(data_ptr + entry)); + } else { + result = data_ptr[entry]; + } + TSAN_ANNOTATE_IGNORE_READS_END; + return result; + } + + static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) { + return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell); + } + + static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store, + uint32_t entry) { + return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell); + } + + static bool HasElementImpl(Isolate* isolate, JSObject holder, uint32_t index, + FixedArrayBase backing_store, + PropertyFilter filter) { + return index < AccessorClass::GetCapacityImpl(holder, backing_store); + } + + static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) { + return false; + } + + static void SetLengthImpl(Isolate* isolate, Handle array, + uint32_t length, + Handle backing_store) { + // External arrays do not support changing their length. + UNREACHABLE(); + } + + static void DeleteImpl(Handle obj, uint32_t entry) { + UNREACHABLE(); + } + + static uint32_t GetIndexForEntryImpl(FixedArrayBase backing_store, + uint32_t entry) { + return entry; + } + + static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index, PropertyFilter filter) { + return index < AccessorClass::GetCapacityImpl(holder, backing_store) + ? index + : kMaxUInt32; + } + + static uint32_t GetCapacityImpl(JSObject holder, + FixedArrayBase backing_store) { + JSTypedArray typed_array = JSTypedArray::cast(holder); + if (typed_array.WasDetached()) return 0; + // TODO(bmeurer, v8:4153): We need to support arbitrary size_t here. + return static_cast(typed_array.length()); + } + + static uint32_t NumberOfElementsImpl(JSObject receiver, + FixedArrayBase backing_store) { + return AccessorClass::GetCapacityImpl(receiver, backing_store); + } + + static void AddElementsToKeyAccumulatorImpl(Handle receiver, + KeyAccumulator* accumulator, + AddKeyConversion convert) { + Isolate* isolate = receiver->GetIsolate(); + Handle elements(receiver->elements(), isolate); + uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements); + for (uint32_t i = 0; i < length; i++) { + Handle value = AccessorClass::GetInternalImpl(receiver, i); + accumulator->AddKey(value, convert); + } + } + + static Maybe CollectValuesOrEntriesImpl( + Isolate* isolate, Handle object, + Handle values_or_entries, bool get_entries, int* nof_items, + PropertyFilter filter) { + int count = 0; + if ((filter & ONLY_CONFIGURABLE) == 0) { + Handle elements(object->elements(), isolate); + uint32_t length = AccessorClass::GetCapacityImpl(*object, *elements); + for (uint32_t index = 0; index < length; ++index) { + Handle value = AccessorClass::GetInternalImpl(object, index); + if (get_entries) { + value = MakeEntryPair(isolate, index, value); + } + values_or_entries->set(count++, *value); + } + } + *nof_items = count; + return Just(true); + } + + static Object FillImpl(Handle receiver, Handle value, + uint32_t start, uint32_t end) { + Handle typed_array = Handle::cast(receiver); + DCHECK(!typed_array->WasDetached()); + DCHECK_LE(0, start); + DCHECK_LE(start, end); + DCHECK_LE(end, typed_array->length()); + DisallowHeapAllocation no_gc; + ElementType scalar = FromHandle(value); + ElementType* data = static_cast(typed_array->DataPtr()); + if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) { + // TODO(ishell, v8:8875): See UnalignedSlot for details. + std::fill(UnalignedSlot(data + start), + UnalignedSlot(data + end), scalar); + } else { + std::fill(data + start, data + end, scalar); + } + return *typed_array; + } + + static Maybe IncludesValueImpl(Isolate* isolate, + Handle receiver, + Handle value, + uint32_t start_from, uint32_t length) { + DisallowHeapAllocation no_gc; + JSTypedArray typed_array = JSTypedArray::cast(*receiver); + + // TODO(caitp): return Just(false) here when implementing strict throwing on + // detached views. + if (typed_array.WasDetached()) { + return Just(value->IsUndefined(isolate) && length > start_from); + } + + if (value->IsUndefined(isolate) && length > typed_array.length()) { + return Just(true); + } + + // Prototype has no elements, and not searching for the hole --- limit + // search to backing store length. + if (typed_array.length() < length) { + // TODO(bmeurer, v8:4153): Don't cast to uint32_t here. + length = static_cast(typed_array.length()); + } + + ElementType typed_search_value; + ElementType* data_ptr = + reinterpret_cast(typed_array.DataPtr()); + if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) { + if (!value->IsBigInt()) return Just(false); + bool lossless; + typed_search_value = FromHandle(value, &lossless); + if (!lossless) return Just(false); + } else { + if (!value->IsNumber()) return Just(false); + double search_value = value->Number(); + if (!std::isfinite(search_value)) { + // Integral types cannot represent +Inf or NaN. + if (Kind < FLOAT32_ELEMENTS || Kind > FLOAT64_ELEMENTS) { + return Just(false); + } + if (std::isnan(search_value)) { + for (uint32_t k = start_from; k < length; ++k) { + double elem_k = + static_cast(AccessorClass::GetImpl(data_ptr, k)); + if (std::isnan(elem_k)) return Just(true); + } + return Just(false); + } + } else if (search_value < std::numeric_limits::lowest() || + search_value > std::numeric_limits::max()) { + // Return false if value can't be represented in this space. + return Just(false); + } + typed_search_value = static_cast(search_value); + if (static_cast(typed_search_value) != search_value) { + return Just(false); // Loss of precision. + } + } + + for (uint32_t k = start_from; k < length; ++k) { + ElementType elem_k = AccessorClass::GetImpl(data_ptr, k); + if (elem_k == typed_search_value) return Just(true); + } + return Just(false); + } + + static Maybe IndexOfValueImpl(Isolate* isolate, + Handle receiver, + Handle value, + uint32_t start_from, uint32_t length) { + DisallowHeapAllocation no_gc; + JSTypedArray typed_array = JSTypedArray::cast(*receiver); + + if (typed_array.WasDetached()) return Just(-1); + + ElementType typed_search_value; + + ElementType* data_ptr = + reinterpret_cast(typed_array.DataPtr()); + if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) { + if (!value->IsBigInt()) return Just(-1); + bool lossless; + typed_search_value = FromHandle(value, &lossless); + if (!lossless) return Just(-1); + } else { + if (!value->IsNumber()) return Just(-1); + double search_value = value->Number(); + if (!std::isfinite(search_value)) { + // Integral types cannot represent +Inf or NaN. + if (Kind < FLOAT32_ELEMENTS || Kind > FLOAT64_ELEMENTS) { + return Just(-1); + } + if (std::isnan(search_value)) { + return Just(-1); + } + } else if (search_value < std::numeric_limits::lowest() || + search_value > std::numeric_limits::max()) { + // Return false if value can't be represented in this ElementsKind. + return Just(-1); + } + typed_search_value = static_cast(search_value); + if (static_cast(typed_search_value) != search_value) { + return Just(-1); // Loss of precision. + } + } + + // Prototype has no elements, and not searching for the hole --- limit + // search to backing store length. + if (typed_array.length() < length) { + // TODO(bmeurer, v8:4153): Don't cast to uint32_t here. + length = static_cast(typed_array.length()); + } + + for (uint32_t k = start_from; k < length; ++k) { + ElementType elem_k = AccessorClass::GetImpl(data_ptr, k); + if (elem_k == typed_search_value) return Just(k); + } + return Just(-1); + } + + static Maybe LastIndexOfValueImpl(Handle receiver, + Handle value, + uint32_t start_from) { + DisallowHeapAllocation no_gc; + JSTypedArray typed_array = JSTypedArray::cast(*receiver); + + DCHECK(!typed_array.WasDetached()); + + ElementType typed_search_value; + + ElementType* data_ptr = + reinterpret_cast(typed_array.DataPtr()); + if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) { + if (!value->IsBigInt()) return Just(-1); + bool lossless; + typed_search_value = FromHandle(value, &lossless); + if (!lossless) return Just(-1); + } else { + if (!value->IsNumber()) return Just(-1); + double search_value = value->Number(); + if (!std::isfinite(search_value)) { + if (std::is_integral::value) { + // Integral types cannot represent +Inf or NaN. + return Just(-1); + } else if (std::isnan(search_value)) { + // Strict Equality Comparison of NaN is always false. + return Just(-1); + } + } else if (search_value < std::numeric_limits::lowest() || + search_value > std::numeric_limits::max()) { + // Return -1 if value can't be represented in this ElementsKind. + return Just(-1); + } + typed_search_value = static_cast(search_value); + if (static_cast(typed_search_value) != search_value) { + return Just(-1); // Loss of precision. + } + } + + DCHECK_LT(start_from, typed_array.length()); + uint32_t k = start_from; + do { + ElementType elem_k = AccessorClass::GetImpl(data_ptr, k); + if (elem_k == typed_search_value) return Just(k); + } while (k-- != 0); + return Just(-1); + } + + static void ReverseImpl(JSObject receiver) { + DisallowHeapAllocation no_gc; + JSTypedArray typed_array = JSTypedArray::cast(receiver); + + DCHECK(!typed_array.WasDetached()); + + size_t len = typed_array.length(); + if (len == 0) return; + + ElementType* data = static_cast(typed_array.DataPtr()); + if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) { + // TODO(ishell, v8:8875): See UnalignedSlot for details. + std::reverse(UnalignedSlot(data), + UnalignedSlot(data + len)); + } else { + std::reverse(data, data + len); + } + } + + static Handle CreateListFromArrayLikeImpl(Isolate* isolate, + Handle object, + uint32_t length) { + Handle typed_array = Handle::cast(object); + Handle result = isolate->factory()->NewFixedArray(length); + for (uint32_t i = 0; i < length; i++) { + Handle value = AccessorClass::GetInternalImpl(typed_array, i); + result->set(i, *value); + } + return result; + } + + static void CopyTypedArrayElementsSliceImpl(JSTypedArray source, + JSTypedArray destination, + size_t start, size_t end) { + DisallowHeapAllocation no_gc; + DCHECK_EQ(destination.GetElementsKind(), AccessorClass::kind()); + CHECK(!source.WasDetached()); + CHECK(!destination.WasDetached()); + DCHECK_LE(start, end); + DCHECK_LE(end, source.length()); + size_t count = end - start; + DCHECK_LE(count, destination.length()); + ElementType* dest_data = static_cast(destination.DataPtr()); + switch (source.GetElementsKind()) { +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ + case TYPE##_ELEMENTS: { \ + ctype* source_data = reinterpret_cast(source.DataPtr()) + start; \ + CopyBetweenBackingStores(source_data, dest_data, \ + count); \ + break; \ + } + TYPED_ARRAYS(TYPED_ARRAY_CASE) +#undef TYPED_ARRAY_CASE + default: + UNREACHABLE(); + break; + } + } + + static bool HasSimpleRepresentation(ExternalArrayType type) { + return !(type == kExternalFloat32Array || type == kExternalFloat64Array || + type == kExternalUint8ClampedArray); + } + + template + static void CopyBetweenBackingStores(SourceElementType* source_data_ptr, + ElementType* dest_data_ptr, + size_t length) { + DisallowHeapAllocation no_gc; + for (size_t i = 0; i < length; i++) { + // We use scalar accessors to avoid boxing/unboxing, so there are no + // allocations. + SourceElementType source_elem = + TypedElementsAccessor::GetImpl( + source_data_ptr, i); + ElementType dest_elem = FromScalar(source_elem); + SetImpl(dest_data_ptr, i, dest_elem); + } + } + + static void CopyElementsFromTypedArray(JSTypedArray source, + JSTypedArray destination, + size_t length, uint32_t offset) { + // The source is a typed array, so we know we don't need to do ToNumber + // side-effects, as the source elements will always be a number. + DisallowHeapAllocation no_gc; + + CHECK(!source.WasDetached()); + CHECK(!destination.WasDetached()); + + DCHECK_LE(offset, destination.length()); + DCHECK_LE(length, destination.length() - offset); + DCHECK_LE(length, source.length()); + + ExternalArrayType source_type = source.type(); + ExternalArrayType destination_type = destination.type(); + + bool same_type = source_type == destination_type; + bool same_size = source.element_size() == destination.element_size(); + bool both_are_simple = HasSimpleRepresentation(source_type) && + HasSimpleRepresentation(destination_type); + + uint8_t* source_data = static_cast(source.DataPtr()); + uint8_t* dest_data = static_cast(destination.DataPtr()); + size_t source_byte_length = source.byte_length(); + size_t dest_byte_length = destination.byte_length(); + + // We can simply copy the backing store if the types are the same, or if + // we are converting e.g. Uint8 <-> Int8, as the binary representation + // will be the same. This is not the case for floats or clamped Uint8, + // which have special conversion operations. + if (same_type || (same_size && both_are_simple)) { + size_t element_size = source.element_size(); + std::memmove(dest_data + offset * element_size, source_data, + length * element_size); + } else { + std::unique_ptr cloned_source_elements; + + // If the typedarrays are overlapped, clone the source. + if (dest_data + dest_byte_length > source_data && + source_data + source_byte_length > dest_data) { + cloned_source_elements.reset(new uint8_t[source_byte_length]); + std::memcpy(cloned_source_elements.get(), source_data, + source_byte_length); + source_data = cloned_source_elements.get(); + } + + switch (source.GetElementsKind()) { +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ + case TYPE##_ELEMENTS: \ + CopyBetweenBackingStores( \ + reinterpret_cast(source_data), \ + reinterpret_cast(dest_data) + offset, length); \ + break; + TYPED_ARRAYS(TYPED_ARRAY_CASE) + default: + UNREACHABLE(); + break; + } +#undef TYPED_ARRAY_CASE + } + } + + static bool HoleyPrototypeLookupRequired(Isolate* isolate, Context context, + JSArray source) { + DisallowHeapAllocation no_gc; + DisallowJavascriptExecution no_js(isolate); + +#ifdef V8_ENABLE_FORCE_SLOW_PATH + if (isolate->force_slow_path()) return true; +#endif + + Object source_proto = source.map().prototype(); + + // Null prototypes are OK - we don't need to do prototype chain lookups on + // them. + if (source_proto.IsNull(isolate)) return false; + if (source_proto.IsJSProxy()) return true; + if (!context.native_context().is_initial_array_prototype( + JSObject::cast(source_proto))) { + return true; + } + + return !isolate->IsNoElementsProtectorIntact(context); + } + + static bool TryCopyElementsFastNumber(Context context, JSArray source, + JSTypedArray destination, size_t length, + uint32_t offset) { + if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) return false; + Isolate* isolate = source.GetIsolate(); + DisallowHeapAllocation no_gc; + DisallowJavascriptExecution no_js(isolate); + + CHECK(!destination.WasDetached()); + + size_t current_length; + DCHECK(source.length().IsNumber() && + TryNumberToSize(source.length(), ¤t_length) && + length <= current_length); + USE(current_length); + + size_t dest_length = destination.length(); + DCHECK(length + offset <= dest_length); + USE(dest_length); + + ElementsKind kind = source.GetElementsKind(); + + // When we find the hole, we normally have to look up the element on the + // prototype chain, which is not handled here and we return false instead. + // When the array has the original array prototype, and that prototype has + // not been changed in a way that would affect lookups, we can just convert + // the hole into undefined. + if (HoleyPrototypeLookupRequired(isolate, context, source)) return false; + + Oddball undefined = ReadOnlyRoots(isolate).undefined_value(); + ElementType* dest_data = + reinterpret_cast(destination.DataPtr()) + offset; + + // Fast-path for packed Smi kind. + if (kind == PACKED_SMI_ELEMENTS) { + FixedArray source_store = FixedArray::cast(source.elements()); + + for (uint32_t i = 0; i < length; i++) { + Object elem = source_store.get(i); + SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem))); + } + return true; + } else if (kind == HOLEY_SMI_ELEMENTS) { + FixedArray source_store = FixedArray::cast(source.elements()); + for (uint32_t i = 0; i < length; i++) { + if (source_store.is_the_hole(isolate, i)) { + SetImpl(dest_data, i, FromObject(undefined)); + } else { + Object elem = source_store.get(i); + SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem))); + } + } + return true; + } else if (kind == PACKED_DOUBLE_ELEMENTS) { + // Fast-path for packed double kind. We avoid boxing and then immediately + // unboxing the double here by using get_scalar. + FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements()); + + for (uint32_t i = 0; i < length; i++) { + // Use the from_double conversion for this specific TypedArray type, + // rather than relying on C++ to convert elem. + double elem = source_store.get_scalar(i); + SetImpl(dest_data, i, FromScalar(elem)); + } + return true; + } else if (kind == HOLEY_DOUBLE_ELEMENTS) { + FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements()); + for (uint32_t i = 0; i < length; i++) { + if (source_store.is_the_hole(i)) { + SetImpl(dest_data, i, FromObject(undefined)); + } else { + double elem = source_store.get_scalar(i); + SetImpl(dest_data, i, FromScalar(elem)); + } + } + return true; + } + return false; + } + + static Object CopyElementsHandleSlow(Handle source, + Handle destination, + size_t length, uint32_t offset) { + Isolate* isolate = destination->GetIsolate(); + for (size_t i = 0; i < length; i++) { + Handle elem; + if (i <= kMaxUInt32) { + LookupIterator it(isolate, source, static_cast(i)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem, + Object::GetProperty(&it)); + } else { + char buffer[kDoubleToCStringMinBufferSize]; + Vector string(buffer, arraysize(buffer)); + DoubleToCString(static_cast(i), string); + Handle name = isolate->factory()->InternalizeUtf8String(string); + LookupIterator it(isolate, source, name); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem, + Object::GetProperty(&it)); + } + if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) { + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem, + BigInt::FromObject(isolate, elem)); + } else { + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem, + Object::ToNumber(isolate, elem)); + } + + if (V8_UNLIKELY(destination->WasDetached())) { + const char* op = "set"; + const MessageTemplate message = MessageTemplate::kDetachedOperation; + Handle operation = + isolate->factory()->NewStringFromAsciiChecked(op); + THROW_NEW_ERROR_RETURN_FAILURE(isolate, + NewTypeError(message, operation)); + } + // The spec says we store the length, then get each element, so we don't + // need to check changes to length. + // TODO(bmeurer, v8:4153): Remove this static_cast. + SetImpl(destination, static_cast(offset + i), *elem); + } + return *isolate->factory()->undefined_value(); + } + + // This doesn't guarantee that the destination array will be completely + // filled. The caller must do this by passing a source with equal length, if + // that is required. + static Object CopyElementsHandleImpl(Handle source, + Handle destination, + size_t length, uint32_t offset) { + Isolate* isolate = destination->GetIsolate(); + Handle destination_ta = + Handle::cast(destination); + DCHECK_LE(offset + length, destination_ta->length()); + CHECK(!destination_ta->WasDetached()); + + if (length == 0) return *isolate->factory()->undefined_value(); + + // All conversions from TypedArrays can be done without allocation. + if (source->IsJSTypedArray()) { + Handle source_ta = Handle::cast(source); + ElementsKind source_kind = source_ta->GetElementsKind(); + bool source_is_bigint = + source_kind == BIGINT64_ELEMENTS || source_kind == BIGUINT64_ELEMENTS; + bool target_is_bigint = + Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS; + // If we have to copy more elements than we have in the source, we need to + // do special handling and conversion; that happens in the slow case. + if (source_is_bigint == target_is_bigint && !source_ta->WasDetached() && + length + offset <= source_ta->length()) { + CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset); + return *isolate->factory()->undefined_value(); + } + } else if (source->IsJSArray()) { + // Fast cases for packed numbers kinds where we don't need to allocate. + Handle source_js_array = Handle::cast(source); + size_t current_length; + if (source_js_array->length().IsNumber() && + TryNumberToSize(source_js_array->length(), ¤t_length)) { + if (length <= current_length) { + Handle source_array = Handle::cast(source); + if (TryCopyElementsFastNumber(isolate->context(), *source_array, + *destination_ta, length, offset)) { + return *isolate->factory()->undefined_value(); + } + } + } + } + // Final generic case that handles prototype chain lookups, getters, proxies + // and observable side effects via valueOf, etc. + return CopyElementsHandleSlow(source, destination_ta, length, offset); + } +}; + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, int8_t value) { + return handle(Smi::FromInt(value), isolate); +} + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, uint8_t value) { + return handle(Smi::FromInt(value), isolate); +} + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, int16_t value) { + return handle(Smi::FromInt(value), isolate); +} + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, uint16_t value) { + return handle(Smi::FromInt(value), isolate); +} + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, int32_t value) { + return isolate->factory()->NewNumberFromInt(value); +} + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, uint32_t value) { + return isolate->factory()->NewNumberFromUint(value); +} + +// static +template <> +float TypedElementsAccessor::FromScalar(double value) { + using limits = std::numeric_limits; + if (value > limits::max()) return limits::infinity(); + if (value < limits::lowest()) return -limits::infinity(); + return static_cast(value); +} + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, float value) { + return isolate->factory()->NewNumber(value); +} + +// static +template <> +double TypedElementsAccessor::FromScalar( + double value) { + return value; +} + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, double value) { + return isolate->factory()->NewNumber(value); +} + +// static +template <> +uint8_t TypedElementsAccessor::FromScalar( + int value) { + if (value < 0x00) return 0x00; + if (value > 0xFF) return 0xFF; + return static_cast(value); +} + +// static +template <> +uint8_t TypedElementsAccessor::FromScalar( + uint32_t value) { + // We need this special case for Uint32 -> Uint8Clamped, because the highest + // Uint32 values will be negative as an int, clamping to 0, rather than 255. + if (value > 0xFF) return 0xFF; + return static_cast(value); +} + +// static +template <> +uint8_t TypedElementsAccessor::FromScalar( + double value) { + // Handle NaNs and less than zero values which clamp to zero. + if (!(value > 0)) return 0; + if (value > 0xFF) return 0xFF; + return static_cast(lrint(value)); +} + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, uint8_t value) { + return handle(Smi::FromInt(value), isolate); +} + +// static +template <> +int64_t TypedElementsAccessor::FromScalar( + int value) { + UNREACHABLE(); +} + +// static +template <> +int64_t TypedElementsAccessor::FromScalar( + uint32_t value) { + UNREACHABLE(); +} + +// static +template <> +int64_t TypedElementsAccessor::FromScalar( + double value) { + UNREACHABLE(); +} + +// static +template <> +int64_t TypedElementsAccessor::FromScalar( + int64_t value) { + return value; +} + +// static +template <> +int64_t TypedElementsAccessor::FromScalar( + uint64_t value) { + return static_cast(value); +} + +// static +template <> +int64_t TypedElementsAccessor::FromObject( + Object value, bool* lossless) { + return BigInt::cast(value).AsInt64(lossless); +} + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, int64_t value) { + return BigInt::FromInt64(isolate, value); +} + +// static +template <> +uint64_t TypedElementsAccessor::FromScalar( + int value) { + UNREACHABLE(); +} + +// static +template <> +uint64_t TypedElementsAccessor::FromScalar( + uint32_t value) { + UNREACHABLE(); +} + +// static +template <> +uint64_t TypedElementsAccessor::FromScalar( + double value) { + UNREACHABLE(); +} + +// static +template <> +uint64_t TypedElementsAccessor::FromScalar( + int64_t value) { + return static_cast(value); +} + +// static +template <> +uint64_t TypedElementsAccessor::FromScalar( + uint64_t value) { + return value; +} + +// static +template <> +uint64_t TypedElementsAccessor::FromObject( + Object value, bool* lossless) { + return BigInt::cast(value).AsUint64(lossless); +} + +// static +template <> +Handle TypedElementsAccessor::ToHandle( + Isolate* isolate, uint64_t value) { + return BigInt::FromUint64(isolate, value); +} + +#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype) \ + using Type##ElementsAccessor = TypedElementsAccessor; +TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR) +#undef FIXED_ELEMENTS_ACCESSOR + +template +class SloppyArgumentsElementsAccessor + : public ElementsAccessorBase { + public: + static void ConvertArgumentsStoreResult( + Handle elements, Handle result) { + UNREACHABLE(); + } + + static Handle GetImpl(Isolate* isolate, FixedArrayBase parameters, + uint32_t entry) { + Handle elements( + SloppyArgumentsElements::cast(parameters), isolate); + uint32_t length = elements->parameter_map_length(); + if (entry < length) { + // Read context mapped entry. + DisallowHeapAllocation no_gc; + Object probe = elements->get_mapped_entry(entry); + DCHECK(!probe.IsTheHole(isolate)); + Context context = elements->context(); + int context_entry = Smi::ToInt(probe); + DCHECK(!context.get(context_entry).IsTheHole(isolate)); + return handle(context.get(context_entry), isolate); + } else { + // Entry is not context mapped, defer to the arguments. + Handle result = ArgumentsAccessor::GetImpl( + isolate, elements->arguments(), entry - length); + return Subclass::ConvertArgumentsStoreResult(isolate, elements, result); + } + } + + static void TransitionElementsKindImpl(Handle object, + Handle map) { + UNREACHABLE(); + } + + static void GrowCapacityAndConvertImpl(Handle object, + uint32_t capacity) { + UNREACHABLE(); + } + + static inline void SetImpl(Handle holder, uint32_t entry, + Object value) { + SetImpl(holder->elements(), entry, value); + } + + static inline void SetImpl(FixedArrayBase store, uint32_t entry, + Object value) { + SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store); + uint32_t length = elements.parameter_map_length(); + if (entry < length) { + // Store context mapped entry. + DisallowHeapAllocation no_gc; + Object probe = elements.get_mapped_entry(entry); + DCHECK(!probe.IsTheHole()); + Context context = elements.context(); + int context_entry = Smi::ToInt(probe); + DCHECK(!context.get(context_entry).IsTheHole()); + context.set(context_entry, value); + } else { + // Entry is not context mapped defer to arguments. + FixedArray arguments = elements.arguments(); + Object current = ArgumentsAccessor::GetRaw(arguments, entry - length); + if (current.IsAliasedArgumentsEntry()) { + AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(current); + Context context = elements.context(); + int context_entry = alias.aliased_context_slot(); + DCHECK(!context.get(context_entry).IsTheHole()); + context.set(context_entry, value); + } else { + ArgumentsAccessor::SetImpl(arguments, entry - length, value); + } + } + } + + static void SetLengthImpl(Isolate* isolate, Handle array, + uint32_t length, + Handle parameter_map) { + // Sloppy arguments objects are not arrays. + UNREACHABLE(); + } + + static uint32_t GetCapacityImpl(JSObject holder, FixedArrayBase store) { + SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store); + FixedArray arguments = elements.arguments(); + return elements.parameter_map_length() + + ArgumentsAccessor::GetCapacityImpl(holder, arguments); + } + + static uint32_t GetMaxNumberOfEntries(JSObject holder, + FixedArrayBase backing_store) { + SloppyArgumentsElements elements = + SloppyArgumentsElements::cast(backing_store); + FixedArrayBase arguments = elements.arguments(); + return elements.parameter_map_length() + + ArgumentsAccessor::GetMaxNumberOfEntries(holder, arguments); + } + + static uint32_t NumberOfElementsImpl(JSObject receiver, + FixedArrayBase backing_store) { + Isolate* isolate = receiver.GetIsolate(); + SloppyArgumentsElements elements = + SloppyArgumentsElements::cast(backing_store); + FixedArrayBase arguments = elements.arguments(); + uint32_t nof_elements = 0; + uint32_t length = elements.parameter_map_length(); + for (uint32_t entry = 0; entry < length; entry++) { + if (HasParameterMapArg(isolate, elements, entry)) nof_elements++; + } + return nof_elements + + ArgumentsAccessor::NumberOfElementsImpl(receiver, arguments); + } + + static void AddElementsToKeyAccumulatorImpl(Handle receiver, + KeyAccumulator* accumulator, + AddKeyConversion convert) { + Isolate* isolate = accumulator->isolate(); + Handle elements(receiver->elements(), isolate); + uint32_t length = GetCapacityImpl(*receiver, *elements); + for (uint32_t entry = 0; entry < length; entry++) { + if (!HasEntryImpl(isolate, *elements, entry)) continue; + Handle value = GetImpl(isolate, *elements, entry); + accumulator->AddKey(value, convert); + } + } + + static bool HasEntryImpl(Isolate* isolate, FixedArrayBase parameters, + uint32_t entry) { + SloppyArgumentsElements elements = + SloppyArgumentsElements::cast(parameters); + uint32_t length = elements.parameter_map_length(); + if (entry < length) { + return HasParameterMapArg(isolate, elements, entry); + } + FixedArrayBase arguments = elements.arguments(); + return ArgumentsAccessor::HasEntryImpl(isolate, arguments, entry - length); + } + + static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) { + SloppyArgumentsElements elements = + SloppyArgumentsElements::cast(backing_store); + FixedArray arguments = elements.arguments(); + return ArgumentsAccessor::HasAccessorsImpl(holder, arguments); + } + + static uint32_t GetIndexForEntryImpl(FixedArrayBase parameters, + uint32_t entry) { + SloppyArgumentsElements elements = + SloppyArgumentsElements::cast(parameters); + uint32_t length = elements.parameter_map_length(); + if (entry < length) return entry; + FixedArray arguments = elements.arguments(); + return ArgumentsAccessor::GetIndexForEntryImpl(arguments, entry - length); + } + + static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder, + FixedArrayBase parameters, + uint32_t index, PropertyFilter filter) { + SloppyArgumentsElements elements = + SloppyArgumentsElements::cast(parameters); + if (HasParameterMapArg(isolate, elements, index)) return index; + FixedArray arguments = elements.arguments(); + uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl( + isolate, holder, arguments, index, filter); + if (entry == kMaxUInt32) return kMaxUInt32; + // Arguments entries could overlap with the dictionary entries, hence offset + // them by the number of context mapped entries. + return elements.parameter_map_length() + entry; + } + + static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) { + SloppyArgumentsElements elements = + SloppyArgumentsElements::cast(holder.elements()); + uint32_t length = elements.parameter_map_length(); + if (entry < length) { + return PropertyDetails(kData, NONE, PropertyCellType::kNoCell); + } + FixedArray arguments = elements.arguments(); + return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length); + } + + static bool HasParameterMapArg(Isolate* isolate, + SloppyArgumentsElements elements, + uint32_t index) { + uint32_t length = elements.parameter_map_length(); + if (index >= length) return false; + return !elements.get_mapped_entry(index).IsTheHole(isolate); + } + + static void DeleteImpl(Handle obj, uint32_t entry) { + Handle elements( + SloppyArgumentsElements::cast(obj->elements()), obj->GetIsolate()); + uint32_t length = elements->parameter_map_length(); + uint32_t delete_or_entry = entry; + if (entry < length) { + delete_or_entry = kMaxUInt32; + } + Subclass::SloppyDeleteImpl(obj, elements, delete_or_entry); + // SloppyDeleteImpl allocates a new dictionary elements store. For making + // heap verification happy we postpone clearing out the mapped entry. + if (entry < length) { + elements->set_mapped_entry(entry, + obj->GetReadOnlyRoots().the_hole_value()); + } + } + + static void SloppyDeleteImpl(Handle obj, + Handle elements, + uint32_t entry) { + // Implemented in subclasses. + UNREACHABLE(); + } + + static void CollectElementIndicesImpl(Handle object, + Handle backing_store, + KeyAccumulator* keys) { + Isolate* isolate = keys->isolate(); + uint32_t nof_indices = 0; + Handle indices = isolate->factory()->NewFixedArray( + GetCapacityImpl(*object, *backing_store)); + DirectCollectElementIndicesImpl(isolate, object, backing_store, + GetKeysConversion::kKeepNumbers, + ENUMERABLE_STRINGS, indices, &nof_indices); + SortIndices(isolate, indices, nof_indices); + for (uint32_t i = 0; i < nof_indices; i++) { + keys->AddKey(indices->get(i)); + } + } + + static Handle DirectCollectElementIndicesImpl( + Isolate* isolate, Handle object, + Handle backing_store, GetKeysConversion convert, + PropertyFilter filter, Handle list, uint32_t* nof_indices, + uint32_t insertion_index = 0) { + Handle elements = + Handle::cast(backing_store); + uint32_t length = elements->parameter_map_length(); + + for (uint32_t i = 0; i < length; ++i) { + if (elements->get_mapped_entry(i).IsTheHole(isolate)) continue; + if (convert == GetKeysConversion::kConvertToString) { + Handle index_string = isolate->factory()->Uint32ToString(i); + list->set(insertion_index, *index_string); + } else { + list->set(insertion_index, Smi::FromInt(i)); + } + insertion_index++; + } + + Handle store(elements->arguments(), isolate); + return ArgumentsAccessor::DirectCollectElementIndicesImpl( + isolate, object, store, convert, filter, list, nof_indices, + insertion_index); + } + + static Maybe IncludesValueImpl(Isolate* isolate, + Handle object, + Handle value, + uint32_t start_from, uint32_t length) { + DCHECK(JSObject::PrototypeHasNoElements(isolate, *object)); + Handle original_map(object->map(), isolate); + Handle elements( + SloppyArgumentsElements::cast(object->elements()), isolate); + bool search_for_hole = value->IsUndefined(isolate); + + for (uint32_t k = start_from; k < length; ++k) { + DCHECK_EQ(object->map(), *original_map); + uint32_t entry = + GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES); + if (entry == kMaxUInt32) { + if (search_for_hole) return Just(true); + continue; + } + + Handle element_k = Subclass::GetImpl(isolate, *elements, entry); + + if (element_k->IsAccessorPair()) { + LookupIterator it(isolate, object, k, LookupIterator::OWN); + DCHECK(it.IsFound()); + DCHECK_EQ(it.state(), LookupIterator::ACCESSOR); + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k, + Object::GetPropertyWithAccessor(&it), + Nothing()); + + if (value->SameValueZero(*element_k)) return Just(true); + + if (object->map() != *original_map) { + // Some mutation occurred in accessor. Abort "fast" path + return IncludesValueSlowPath(isolate, object, value, k + 1, length); + } + } else if (value->SameValueZero(*element_k)) { + return Just(true); + } + } + return Just(false); + } + + static Maybe IndexOfValueImpl(Isolate* isolate, + Handle object, + Handle value, + uint32_t start_from, uint32_t length) { + DCHECK(JSObject::PrototypeHasNoElements(isolate, *object)); + Handle original_map(object->map(), isolate); + Handle elements( + SloppyArgumentsElements::cast(object->elements()), isolate); + + for (uint32_t k = start_from; k < length; ++k) { + DCHECK_EQ(object->map(), *original_map); + uint32_t entry = + GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES); + if (entry == kMaxUInt32) { + continue; + } + + Handle element_k = Subclass::GetImpl(isolate, *elements, entry); + + if (element_k->IsAccessorPair()) { + LookupIterator it(isolate, object, k, LookupIterator::OWN); + DCHECK(it.IsFound()); + DCHECK_EQ(it.state(), LookupIterator::ACCESSOR); + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k, + Object::GetPropertyWithAccessor(&it), + Nothing()); + + if (value->StrictEquals(*element_k)) { + return Just(k); + } + + if (object->map() != *original_map) { + // Some mutation occurred in accessor. Abort "fast" path. + return IndexOfValueSlowPath(isolate, object, value, k + 1, length); + } + } else if (value->StrictEquals(*element_k)) { + return Just(k); + } + } + return Just(-1); + } +}; + +class SlowSloppyArgumentsElementsAccessor + : public SloppyArgumentsElementsAccessor< + SlowSloppyArgumentsElementsAccessor, DictionaryElementsAccessor, + ElementsKindTraits> { + public: + static Handle ConvertArgumentsStoreResult( + Isolate* isolate, Handle elements, + Handle result) { + // Elements of the arguments object in slow mode might be slow aliases. + if (result->IsAliasedArgumentsEntry()) { + DisallowHeapAllocation no_gc; + AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(*result); + Context context = elements->context(); + int context_entry = alias.aliased_context_slot(); + DCHECK(!context.get(context_entry).IsTheHole(isolate)); + return handle(context.get(context_entry), isolate); + } + return result; + } + static void SloppyDeleteImpl(Handle obj, + Handle elements, + uint32_t entry) { + // No need to delete a context mapped entry from the arguments elements. + if (entry == kMaxUInt32) return; + Isolate* isolate = obj->GetIsolate(); + Handle dict(NumberDictionary::cast(elements->arguments()), + isolate); + int length = elements->parameter_map_length(); + dict = NumberDictionary::DeleteEntry(isolate, dict, entry - length); + elements->set_arguments(*dict); + } + static void AddImpl(Handle object, uint32_t index, + Handle value, PropertyAttributes attributes, + uint32_t new_capacity) { + Isolate* isolate = object->GetIsolate(); + Handle elements( + SloppyArgumentsElements::cast(object->elements()), isolate); + Handle old_arguments( + FixedArrayBase::cast(elements->arguments()), isolate); + Handle dictionary = + old_arguments->IsNumberDictionary() + ? Handle::cast(old_arguments) + : JSObject::NormalizeElements(object); + PropertyDetails details(kData, attributes, PropertyCellType::kNoCell); + Handle new_dictionary = + NumberDictionary::Add(isolate, dictionary, index, value, details); + if (attributes != NONE) object->RequireSlowElements(*new_dictionary); + if (*dictionary != *new_dictionary) { + elements->set_arguments(*new_dictionary); + } + } + + static void ReconfigureImpl(Handle object, + Handle store, uint32_t entry, + Handle value, + PropertyAttributes attributes) { + Isolate* isolate = object->GetIsolate(); + Handle elements = + Handle::cast(store); + uint32_t length = elements->parameter_map_length(); + if (entry < length) { + Object probe = elements->get_mapped_entry(entry); + DCHECK(!probe.IsTheHole(isolate)); + Context context = elements->context(); + int context_entry = Smi::ToInt(probe); + DCHECK(!context.get(context_entry).IsTheHole(isolate)); + context.set(context_entry, *value); + + // Redefining attributes of an aliased element destroys fast aliasing. + elements->set_mapped_entry(entry, + ReadOnlyRoots(isolate).the_hole_value()); + // For elements that are still writable we re-establish slow aliasing. + if ((attributes & READ_ONLY) == 0) { + value = isolate->factory()->NewAliasedArgumentsEntry(context_entry); + } + + PropertyDetails details(kData, attributes, PropertyCellType::kNoCell); + Handle arguments( + NumberDictionary::cast(elements->arguments()), isolate); + arguments = + NumberDictionary::Add(isolate, arguments, entry, value, details); + // If the attributes were NONE, we would have called set rather than + // reconfigure. + DCHECK_NE(NONE, attributes); + object->RequireSlowElements(*arguments); + elements->set_arguments(*arguments); + } else { + Handle arguments(elements->arguments(), isolate); + DictionaryElementsAccessor::ReconfigureImpl( + object, arguments, entry - length, value, attributes); + } + } +}; + +class FastSloppyArgumentsElementsAccessor + : public SloppyArgumentsElementsAccessor< + FastSloppyArgumentsElementsAccessor, FastHoleyObjectElementsAccessor, + ElementsKindTraits> { + public: + static Handle ConvertArgumentsStoreResult( + Isolate* isolate, Handle paramtere_map, + Handle result) { + DCHECK(!result->IsAliasedArgumentsEntry()); + return result; + } + + static Handle GetArguments(Isolate* isolate, + FixedArrayBase store) { + SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store); + return Handle(elements.arguments(), isolate); + } + + static Handle NormalizeImpl( + Handle object, Handle elements) { + Handle arguments = + GetArguments(object->GetIsolate(), *elements); + return FastHoleyObjectElementsAccessor::NormalizeImpl(object, arguments); + } + + static Handle NormalizeArgumentsElements( + Handle object, Handle elements, + uint32_t* entry) { + Handle dictionary = JSObject::NormalizeElements(object); + elements->set_arguments(*dictionary); + // kMaxUInt32 indicates that a context mapped element got deleted. In this + // case we only normalize the elements (aka. migrate to SLOW_SLOPPY). + if (*entry == kMaxUInt32) return dictionary; + uint32_t length = elements->parameter_map_length(); + if (*entry >= length) { + *entry = + dictionary->FindEntry(object->GetIsolate(), *entry - length) + length; + } + return dictionary; + } + + static void SloppyDeleteImpl(Handle obj, + Handle elements, + uint32_t entry) { + // Always normalize element on deleting an entry. + NormalizeArgumentsElements(obj, elements, &entry); + SlowSloppyArgumentsElementsAccessor::SloppyDeleteImpl(obj, elements, entry); + } + + static void AddImpl(Handle object, uint32_t index, + Handle value, PropertyAttributes attributes, + uint32_t new_capacity) { + DCHECK_EQ(NONE, attributes); + Isolate* isolate = object->GetIsolate(); + Handle elements( + SloppyArgumentsElements::cast(object->elements()), isolate); + Handle old_arguments(elements->arguments(), isolate); + if (old_arguments->IsNumberDictionary() || + static_cast(old_arguments->length()) < new_capacity) { + GrowCapacityAndConvertImpl(object, new_capacity); + } + FixedArray arguments = elements->arguments(); + // For fast holey objects, the entry equals the index. The code above made + // sure that there's enough space to store the value. We cannot convert + // index to entry explicitly since the slot still contains the hole, so the + // current EntryForIndex would indicate that it is "absent" by returning + // kMaxUInt32. + FastHoleyObjectElementsAccessor::SetImpl(arguments, index, *value); + } + + static void ReconfigureImpl(Handle object, + Handle store, uint32_t entry, + Handle value, + PropertyAttributes attributes) { + DCHECK_EQ(object->elements(), *store); + Handle elements( + SloppyArgumentsElements::cast(*store), object->GetIsolate()); + NormalizeArgumentsElements(object, elements, &entry); + SlowSloppyArgumentsElementsAccessor::ReconfigureImpl(object, store, entry, + value, attributes); + } + + static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from, + uint32_t from_start, FixedArrayBase to, + ElementsKind from_kind, uint32_t to_start, + int packed_size, int copy_size) { + DCHECK(!to.IsNumberDictionary()); + if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) { + CopyDictionaryToObjectElements(isolate, from, from_start, to, + HOLEY_ELEMENTS, to_start, copy_size); + } else { + DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, from_kind); + CopyObjectToObjectElements(isolate, from, HOLEY_ELEMENTS, from_start, to, + HOLEY_ELEMENTS, to_start, copy_size); + } + } + + static void GrowCapacityAndConvertImpl(Handle object, + uint32_t capacity) { + Isolate* isolate = object->GetIsolate(); + Handle elements( + SloppyArgumentsElements::cast(object->elements()), isolate); + Handle old_arguments(FixedArray::cast(elements->arguments()), + isolate); + ElementsKind from_kind = object->GetElementsKind(); + // This method should only be called if there's a reason to update the + // elements. + DCHECK(from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS || + static_cast(old_arguments->length()) < capacity); + Handle arguments = + ConvertElementsWithCapacity(object, old_arguments, from_kind, capacity); + Handle new_map = JSObject::GetElementsTransitionMap( + object, FAST_SLOPPY_ARGUMENTS_ELEMENTS); + JSObject::MigrateToMap(object, new_map); + elements->set_arguments(FixedArray::cast(*arguments)); + JSObject::ValidateElements(*object); + } +}; + +template +class StringWrapperElementsAccessor + : public ElementsAccessorBase { + public: + static Handle GetInternalImpl(Handle holder, + uint32_t entry) { + return GetImpl(holder, entry); + } + + static Handle GetImpl(Handle holder, uint32_t entry) { + Isolate* isolate = holder->GetIsolate(); + Handle string(GetString(*holder), isolate); + uint32_t length = static_cast(string->length()); + if (entry < length) { + return isolate->factory()->LookupSingleCharacterStringFromCode( + String::Flatten(isolate, string)->Get(entry)); + } + return BackingStoreAccessor::GetImpl(isolate, holder->elements(), + entry - length); + } + + static Handle GetImpl(Isolate* isolate, FixedArrayBase elements, + uint32_t entry) { + UNREACHABLE(); + } + + static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) { + uint32_t length = static_cast(GetString(holder).length()); + if (entry < length) { + PropertyAttributes attributes = + static_cast(READ_ONLY | DONT_DELETE); + return PropertyDetails(kData, attributes, PropertyCellType::kNoCell); + } + return BackingStoreAccessor::GetDetailsImpl(holder, entry - length); + } + + static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index, PropertyFilter filter) { + uint32_t length = static_cast(GetString(holder).length()); + if (index < length) return index; + uint32_t backing_store_entry = BackingStoreAccessor::GetEntryForIndexImpl( + isolate, holder, backing_store, index, filter); + if (backing_store_entry == kMaxUInt32) return kMaxUInt32; + DCHECK(backing_store_entry < kMaxUInt32 - length); + return backing_store_entry + length; + } + + static void DeleteImpl(Handle holder, uint32_t entry) { + uint32_t length = static_cast(GetString(*holder).length()); + if (entry < length) { + return; // String contents can't be deleted. + } + BackingStoreAccessor::DeleteImpl(holder, entry - length); + } + + static void SetImpl(Handle holder, uint32_t entry, Object value) { + uint32_t length = static_cast(GetString(*holder).length()); + if (entry < length) { + return; // String contents are read-only. + } + BackingStoreAccessor::SetImpl(holder->elements(), entry - length, value); + } + + static void AddImpl(Handle object, uint32_t index, + Handle value, PropertyAttributes attributes, + uint32_t new_capacity) { + DCHECK(index >= static_cast(GetString(*object).length())); + // Explicitly grow fast backing stores if needed. Dictionaries know how to + // extend their capacity themselves. + if (KindTraits::Kind == FAST_STRING_WRAPPER_ELEMENTS && + (object->GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS || + BackingStoreAccessor::GetCapacityImpl(*object, object->elements()) != + new_capacity)) { + GrowCapacityAndConvertImpl(object, new_capacity); + } + BackingStoreAccessor::AddImpl(object, index, value, attributes, + new_capacity); + } + + static void ReconfigureImpl(Handle object, + Handle store, uint32_t entry, + Handle value, + PropertyAttributes attributes) { + uint32_t length = static_cast(GetString(*object).length()); + if (entry < length) { + return; // String contents can't be reconfigured. + } + BackingStoreAccessor::ReconfigureImpl(object, store, entry - length, value, + attributes); + } + + static void AddElementsToKeyAccumulatorImpl(Handle receiver, + KeyAccumulator* accumulator, + AddKeyConversion convert) { + Isolate* isolate = receiver->GetIsolate(); + Handle string(GetString(*receiver), isolate); + string = String::Flatten(isolate, string); + uint32_t length = static_cast(string->length()); + for (uint32_t i = 0; i < length; i++) { + accumulator->AddKey( + isolate->factory()->LookupSingleCharacterStringFromCode( + string->Get(i)), + convert); + } + BackingStoreAccessor::AddElementsToKeyAccumulatorImpl(receiver, accumulator, + convert); + } + + static void CollectElementIndicesImpl(Handle object, + Handle backing_store, + KeyAccumulator* keys) { + uint32_t length = GetString(*object).length(); + Factory* factory = keys->isolate()->factory(); + for (uint32_t i = 0; i < length; i++) { + keys->AddKey(factory->NewNumberFromUint(i)); + } + BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store, + keys); + } + + static void GrowCapacityAndConvertImpl(Handle object, + uint32_t capacity) { + Handle old_elements(object->elements(), + object->GetIsolate()); + ElementsKind from_kind = object->GetElementsKind(); + if (from_kind == FAST_STRING_WRAPPER_ELEMENTS) { + // The optimizing compiler relies on the prototype lookups of String + // objects always returning undefined. If there's a store to the + // initial String.prototype object, make sure all the optimizations + // are invalidated. + object->GetIsolate()->UpdateNoElementsProtectorOnSetLength(object); + } + // This method should only be called if there's a reason to update the + // elements. + DCHECK(from_kind == SLOW_STRING_WRAPPER_ELEMENTS || + static_cast(old_elements->length()) < capacity); + Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind, + FAST_STRING_WRAPPER_ELEMENTS, + capacity); + } + + static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from, + uint32_t from_start, FixedArrayBase to, + ElementsKind from_kind, uint32_t to_start, + int packed_size, int copy_size) { + DCHECK(!to.IsNumberDictionary()); + if (from_kind == SLOW_STRING_WRAPPER_ELEMENTS) { + CopyDictionaryToObjectElements(isolate, from, from_start, to, + HOLEY_ELEMENTS, to_start, copy_size); + } else { + DCHECK_EQ(FAST_STRING_WRAPPER_ELEMENTS, from_kind); + CopyObjectToObjectElements(isolate, from, HOLEY_ELEMENTS, from_start, to, + HOLEY_ELEMENTS, to_start, copy_size); + } + } + + static uint32_t NumberOfElementsImpl(JSObject object, + FixedArrayBase backing_store) { + uint32_t length = GetString(object).length(); + return length + + BackingStoreAccessor::NumberOfElementsImpl(object, backing_store); + } + + private: + static String GetString(JSObject holder) { + DCHECK(holder.IsJSValue()); + JSValue js_value = JSValue::cast(holder); + DCHECK(js_value.value().IsString()); + return String::cast(js_value.value()); + } +}; + +class FastStringWrapperElementsAccessor + : public StringWrapperElementsAccessor< + FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor, + ElementsKindTraits> { + public: + static Handle NormalizeImpl( + Handle object, Handle elements) { + return FastHoleyObjectElementsAccessor::NormalizeImpl(object, elements); + } +}; + +class SlowStringWrapperElementsAccessor + : public StringWrapperElementsAccessor< + SlowStringWrapperElementsAccessor, DictionaryElementsAccessor, + ElementsKindTraits> { + public: + static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) { + return DictionaryElementsAccessor::HasAccessorsImpl(holder, backing_store); + } +}; + +} // namespace + +MaybeHandle ArrayConstructInitializeElements(Handle array, + Arguments* args) { + if (args->length() == 0) { + // Optimize the case where there are no parameters passed. + JSArray::Initialize(array, JSArray::kPreallocatedArrayElements); + return array; + + } else if (args->length() == 1 && args->at(0)->IsNumber()) { + uint32_t length; + if (!args->at(0)->ToArrayLength(&length)) { + return ThrowArrayLengthRangeError(array->GetIsolate()); + } + + // Optimize the case where there is one argument and the argument is a small + // smi. + if (length > 0 && length < JSArray::kInitialMaxFastElementArray) { + ElementsKind elements_kind = array->GetElementsKind(); + JSArray::Initialize(array, length, length); + + if (!IsHoleyElementsKind(elements_kind)) { + elements_kind = GetHoleyElementsKind(elements_kind); + JSObject::TransitionElementsKind(array, elements_kind); + } + } else if (length == 0) { + JSArray::Initialize(array, JSArray::kPreallocatedArrayElements); + } else { + // Take the argument as the length. + JSArray::Initialize(array, 0); + JSArray::SetLength(array, length); + } + return array; + } + + Factory* factory = array->GetIsolate()->factory(); + + // Set length and elements on the array. + int number_of_elements = args->length(); + JSObject::EnsureCanContainElements(array, args, 0, number_of_elements, + ALLOW_CONVERTED_DOUBLE_ELEMENTS); + + // Allocate an appropriately typed elements array. + ElementsKind elements_kind = array->GetElementsKind(); + Handle elms; + if (IsDoubleElementsKind(elements_kind)) { + elms = Handle::cast( + factory->NewFixedDoubleArray(number_of_elements)); + } else { + elms = Handle::cast( + factory->NewFixedArrayWithHoles(number_of_elements)); + } + + // Fill in the content + switch (elements_kind) { + case HOLEY_SMI_ELEMENTS: + case PACKED_SMI_ELEMENTS: { + Handle smi_elms = Handle::cast(elms); + for (int entry = 0; entry < number_of_elements; entry++) { + smi_elms->set(entry, (*args)[entry], SKIP_WRITE_BARRIER); + } + break; + } + case HOLEY_ELEMENTS: + case PACKED_ELEMENTS: { + DisallowHeapAllocation no_gc; + WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); + Handle object_elms = Handle::cast(elms); + for (int entry = 0; entry < number_of_elements; entry++) { + object_elms->set(entry, (*args)[entry], mode); + } + break; + } + case HOLEY_DOUBLE_ELEMENTS: + case PACKED_DOUBLE_ELEMENTS: { + Handle double_elms = + Handle::cast(elms); + for (int entry = 0; entry < number_of_elements; entry++) { + double_elms->set(entry, (*args)[entry].Number()); + } + break; + } + default: + UNREACHABLE(); + } + + array->set_elements(*elms); + array->set_length(Smi::FromInt(number_of_elements)); + return array; +} + +void CopyFastNumberJSArrayElementsToTypedArray(Address raw_context, + Address raw_source, + Address raw_destination, + uintptr_t length, + uintptr_t offset) { + Context context = Context::cast(Object(raw_context)); + JSArray source = JSArray::cast(Object(raw_source)); + JSTypedArray destination = JSTypedArray::cast(Object(raw_destination)); + + switch (destination.GetElementsKind()) { +#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype) \ + case TYPE##_ELEMENTS: \ + CHECK(Type##ElementsAccessor::TryCopyElementsFastNumber( \ + context, source, destination, length, static_cast(offset))); \ + break; + TYPED_ARRAYS(TYPED_ARRAYS_CASE) +#undef TYPED_ARRAYS_CASE + default: + UNREACHABLE(); + } +} + +void CopyTypedArrayElementsToTypedArray(Address raw_source, + Address raw_destination, + uintptr_t length, uintptr_t offset) { + JSTypedArray source = JSTypedArray::cast(Object(raw_source)); + JSTypedArray destination = JSTypedArray::cast(Object(raw_destination)); + + switch (destination.GetElementsKind()) { +#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype) \ + case TYPE##_ELEMENTS: \ + Type##ElementsAccessor::CopyElementsFromTypedArray( \ + source, destination, length, static_cast(offset)); \ + break; + TYPED_ARRAYS(TYPED_ARRAYS_CASE) +#undef TYPED_ARRAYS_CASE + default: + UNREACHABLE(); + } +} + +void CopyTypedArrayElementsSlice(Address raw_source, Address raw_destination, + uintptr_t start, uintptr_t end) { + JSTypedArray source = JSTypedArray::cast(Object(raw_source)); + JSTypedArray destination = JSTypedArray::cast(Object(raw_destination)); + + destination.GetElementsAccessor()->CopyTypedArrayElementsSlice( + source, destination, start, end); +} + +void ElementsAccessor::InitializeOncePerProcess() { + static ElementsAccessor* accessor_array[] = { +#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(), + ELEMENTS_LIST(ACCESSOR_ARRAY) +#undef ACCESSOR_ARRAY + }; + + STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) == + kElementsKindCount); + + elements_accessors_ = accessor_array; +} + +void ElementsAccessor::TearDown() { + if (elements_accessors_ == nullptr) return; +#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind]; + ELEMENTS_LIST(ACCESSOR_DELETE) +#undef ACCESSOR_DELETE + elements_accessors_ = nullptr; +} + +Handle ElementsAccessor::Concat(Isolate* isolate, Arguments* args, + uint32_t concat_size, + uint32_t result_len) { + ElementsKind result_elements_kind = GetInitialFastElementsKind(); + bool has_raw_doubles = false; + { + DisallowHeapAllocation no_gc; + bool is_holey = false; + for (uint32_t i = 0; i < concat_size; i++) { + Object arg = (*args)[i]; + ElementsKind arg_kind = JSArray::cast(arg).GetElementsKind(); + has_raw_doubles = has_raw_doubles || IsDoubleElementsKind(arg_kind); + is_holey = is_holey || IsHoleyElementsKind(arg_kind); + result_elements_kind = + GetMoreGeneralElementsKind(result_elements_kind, arg_kind); + } + if (is_holey) { + result_elements_kind = GetHoleyElementsKind(result_elements_kind); + } + } + + // If a double array is concatted into a fast elements array, the fast + // elements array needs to be initialized to contain proper holes, since + // boxing doubles may cause incremental marking. + bool requires_double_boxing = + has_raw_doubles && !IsDoubleElementsKind(result_elements_kind); + ArrayStorageAllocationMode mode = requires_double_boxing + ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE + : DONT_INITIALIZE_ARRAY_ELEMENTS; + Handle result_array = isolate->factory()->NewJSArray( + result_elements_kind, result_len, result_len, mode); + if (result_len == 0) return result_array; + + uint32_t insertion_index = 0; + Handle storage(result_array->elements(), isolate); + ElementsAccessor* accessor = ElementsAccessor::ForKind(result_elements_kind); + for (uint32_t i = 0; i < concat_size; i++) { + // It is crucial to keep |array| in a raw pointer form to avoid + // performance degradation. + JSArray array = JSArray::cast((*args)[i]); + uint32_t len = 0; + array.length().ToArrayLength(&len); + if (len == 0) continue; + ElementsKind from_kind = array.GetElementsKind(); + accessor->CopyElements(array, 0, from_kind, storage, insertion_index, len); + insertion_index += len; + } + + DCHECK_EQ(insertion_index, result_len); + return result_array; +} + +ElementsAccessor** ElementsAccessor::elements_accessors_ = nullptr; + +#undef ELEMENTS_LIST +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/objects/elements.h b/deps/v8/src/objects/elements.h new file mode 100644 index 0000000000..844cd2ed94 --- /dev/null +++ b/deps/v8/src/objects/elements.h @@ -0,0 +1,241 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_ELEMENTS_H_ +#define V8_OBJECTS_ELEMENTS_H_ + +#include "src/objects/elements-kind.h" +#include "src/objects/keys.h" +#include "src/objects/objects.h" + +namespace v8 { +namespace internal { + +class JSTypedArray; + +// Abstract base class for handles that can operate on objects with differing +// ElementsKinds. +class ElementsAccessor { + public: + ElementsAccessor() = default; + virtual ~ElementsAccessor() = default; + + // Returns a shared ElementsAccessor for the specified ElementsKind. + static ElementsAccessor* ForKind(ElementsKind elements_kind) { + DCHECK_LT(static_cast(elements_kind), kElementsKindCount); + return elements_accessors_[elements_kind]; + } + + // Checks the elements of an object for consistency, asserting when a problem + // is found. + virtual void Validate(JSObject obj) = 0; + + // Returns true if a holder contains an element with the specified index + // without iterating up the prototype chain. The caller can optionally pass + // in the backing store to use for the check, which must be compatible with + // the ElementsKind of the ElementsAccessor. If backing_store is nullptr, the + // holder->elements() is used as the backing store. If a |filter| is + // specified the PropertyAttributes of the element at the given index + // are compared to the given |filter|. If they match/overlap the given + // index is ignored. Note that only Dictionary elements have custom + // PropertyAttributes associated, hence the |filter| argument is ignored for + // all but DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS. + virtual bool HasElement(JSObject holder, uint32_t index, + FixedArrayBase backing_store, + PropertyFilter filter = ALL_PROPERTIES) = 0; + + inline bool HasElement(JSObject holder, uint32_t index, + PropertyFilter filter = ALL_PROPERTIES); + + // Note: this is currently not implemented for string wrapper and + // typed array elements. + virtual bool HasEntry(JSObject holder, uint32_t entry) = 0; + + // TODO(cbruni): HasEntry and Get should not be exposed publicly with the + // entry parameter. + virtual Handle Get(Handle holder, uint32_t entry) = 0; + + virtual bool HasAccessors(JSObject holder) = 0; + virtual uint32_t NumberOfElements(JSObject holder) = 0; + + // Modifies the length data property as specified for JSArrays and resizes the + // underlying backing store accordingly. The method honors the semantics of + // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that + // have non-deletable elements can only be shrunk to the size of highest + // element that is non-deletable. + virtual void SetLength(Handle holder, uint32_t new_length) = 0; + + // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all + // of elements from source after source_start to the destination array. + static const int kCopyToEnd = -1; + // If kCopyToEndAndInitializeToHole is specified as the copy_size to + // CopyElements, it copies all of elements from source after source_start to + // destination array, padding any remaining uninitialized elements in the + // destination array with the hole. + static const int kCopyToEndAndInitializeToHole = -2; + + // Copy all indices that have elements from |object| into the given + // KeyAccumulator. For Dictionary-based element-kinds we filter out elements + // whose PropertyAttribute match |filter|. + virtual void CollectElementIndices(Handle object, + Handle backing_store, + KeyAccumulator* keys) = 0; + + inline void CollectElementIndices(Handle object, + KeyAccumulator* keys); + + virtual Maybe CollectValuesOrEntries( + Isolate* isolate, Handle object, + Handle values_or_entries, bool get_entries, int* nof_items, + PropertyFilter filter = ALL_PROPERTIES) = 0; + + virtual MaybeHandle PrependElementIndices( + Handle object, Handle backing_store, + Handle keys, GetKeysConversion convert, + PropertyFilter filter = ALL_PROPERTIES) = 0; + + inline MaybeHandle PrependElementIndices( + Handle object, Handle keys, + GetKeysConversion convert, PropertyFilter filter = ALL_PROPERTIES); + + virtual void AddElementsToKeyAccumulator(Handle receiver, + KeyAccumulator* accumulator, + AddKeyConversion convert) = 0; + + virtual void TransitionElementsKind(Handle object, + Handle map) = 0; + virtual void GrowCapacityAndConvert(Handle object, + uint32_t capacity) = 0; + // Unlike GrowCapacityAndConvert do not attempt to convert the backing store + // and simply return false in this case. + virtual bool GrowCapacity(Handle object, uint32_t index) = 0; + + static void InitializeOncePerProcess(); + static void TearDown(); + + virtual void Set(Handle holder, uint32_t entry, Object value) = 0; + + virtual void Add(Handle object, uint32_t index, + Handle value, PropertyAttributes attributes, + uint32_t new_capacity) = 0; + + static Handle Concat(Isolate* isolate, Arguments* args, + uint32_t concat_size, uint32_t result_length); + + virtual uint32_t Push(Handle receiver, Arguments* args, + uint32_t push_size) = 0; + + virtual uint32_t Unshift(Handle receiver, Arguments* args, + uint32_t unshift_size) = 0; + + virtual Handle Pop(Handle receiver) = 0; + + virtual Handle Shift(Handle receiver) = 0; + + virtual Handle Normalize(Handle object) = 0; + + virtual uint32_t GetCapacity(JSObject holder, + FixedArrayBase backing_store) = 0; + + virtual Object Fill(Handle receiver, Handle obj_value, + uint32_t start, uint32_t end) = 0; + + // Check an Object's own elements for an element (using SameValueZero + // semantics) + virtual Maybe IncludesValue(Isolate* isolate, Handle receiver, + Handle value, uint32_t start, + uint32_t length) = 0; + + // Check an Object's own elements for the index of an element (using SameValue + // semantics) + virtual Maybe IndexOfValue(Isolate* isolate, + Handle receiver, + Handle value, uint32_t start, + uint32_t length) = 0; + + virtual Maybe LastIndexOfValue(Handle receiver, + Handle value, + uint32_t start) = 0; + + virtual void Reverse(JSObject receiver) = 0; + + virtual void CopyElements(Isolate* isolate, Handle source, + ElementsKind source_kind, + Handle destination, int size) = 0; + + virtual Object CopyElements(Handle source, + Handle destination, size_t length, + uint32_t offset = 0) = 0; + + virtual Handle CreateListFromArrayLike(Isolate* isolate, + Handle object, + uint32_t length) = 0; + + virtual void CopyTypedArrayElementsSlice(JSTypedArray source, + JSTypedArray destination, + size_t start, size_t end) = 0; + + protected: + friend class LookupIterator; + + // Element handlers distinguish between entries and indices when they + // manipulate elements. Entries refer to elements in terms of their location + // in the underlying storage's backing store representation, and are between 0 + // and GetCapacity. Indices refer to elements in terms of the value that would + // be specified in JavaScript to access the element. In most implementations, + // indices are equivalent to entries. In the NumberDictionary + // ElementsAccessor, entries are mapped to an index using the KeyAt method on + // the NumberDictionary. + virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index) = 0; + + virtual PropertyDetails GetDetails(JSObject holder, uint32_t entry) = 0; + virtual void Reconfigure(Handle object, + Handle backing_store, uint32_t entry, + Handle value, + PropertyAttributes attributes) = 0; + + // Deletes an element in an object. + virtual void Delete(Handle holder, uint32_t entry) = 0; + + // NOTE: this method violates the handlified function signature convention: + // raw pointer parameter |source_holder| in the function that allocates. + // This is done intentionally to avoid ArrayConcat() builtin performance + // degradation. + virtual void CopyElements(JSObject source_holder, uint32_t source_start, + ElementsKind source_kind, + Handle destination, + uint32_t destination_start, int copy_size) = 0; + + private: + static ElementsAccessor** elements_accessors_; + + DISALLOW_COPY_AND_ASSIGN(ElementsAccessor); +}; + +V8_WARN_UNUSED_RESULT MaybeHandle ArrayConstructInitializeElements( + Handle array, Arguments* args); + +// Called directly from CSA. +// {raw_context}: Context pointer. +// {raw_source}: JSArray pointer. +// {raw_destination}: JSTypedArray pointer. +void CopyFastNumberJSArrayElementsToTypedArray(Address raw_context, + Address raw_source, + Address raw_destination, + uintptr_t length, + uintptr_t offset); +// {raw_source}, {raw_destination}: JSTypedArray pointers. +void CopyTypedArrayElementsToTypedArray(Address raw_source, + Address raw_destination, + uintptr_t length, uintptr_t offset); +// {raw_source}, {raw_destination}: JSTypedArray pointers. +void CopyTypedArrayElementsSlice(Address raw_source, Address raw_destination, + uintptr_t start, uintptr_t end); + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_ELEMENTS_H_ diff --git a/deps/v8/src/objects/embedder-data-array.cc b/deps/v8/src/objects/embedder-data-array.cc index c85e0b9f31..ba3e92c33c 100644 --- a/deps/v8/src/objects/embedder-data-array.cc +++ b/deps/v8/src/objects/embedder-data-array.cc @@ -4,7 +4,7 @@ #include "src/objects/embedder-data-array.h" -#include "src/isolate.h" +#include "src/execution/isolate.h" #include "src/objects/embedder-data-array-inl.h" namespace v8 { diff --git a/deps/v8/src/objects/embedder-data-array.h b/deps/v8/src/objects/embedder-data-array.h index f5ab2fa7ee..ba4fe25465 100644 --- a/deps/v8/src/objects/embedder-data-array.h +++ b/deps/v8/src/objects/embedder-data-array.h @@ -5,10 +5,10 @@ #ifndef V8_OBJECTS_EMBEDDER_DATA_ARRAY_H_ #define V8_OBJECTS_EMBEDDER_DATA_ARRAY_H_ -#include "src/globals.h" -#include "src/maybe-handles.h" +#include "src/common/globals.h" +#include "src/handles/maybe-handles.h" #include "src/objects/heap-object.h" -#include "torque-generated/class-definitions-from-dsl.h" +#include "torque-generated/field-offsets-tq.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h index b87f31ac7d..6830a4d22e 100644 --- a/deps/v8/src/objects/embedder-data-slot-inl.h +++ b/deps/v8/src/objects/embedder-data-slot-inl.h @@ -7,11 +7,11 @@ #include "src/objects/embedder-data-slot.h" +#include "src/common/v8memory.h" #include "src/heap/heap-write-barrier-inl.h" -#include "src/objects-inl.h" #include "src/objects/embedder-data-array.h" #include "src/objects/js-objects-inl.h" -#include "src/v8memory.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -25,7 +25,7 @@ EmbedderDataSlot::EmbedderDataSlot(EmbedderDataArray array, int entry_index) EmbedderDataSlot::EmbedderDataSlot(JSObject object, int embedder_field_index) : SlotBase(FIELD_ADDR( - object, object->GetEmbedderFieldOffset(embedder_field_index))) {} + object, object.GetEmbedderFieldOffset(embedder_field_index))) {} Object EmbedderDataSlot::load_tagged() const { return ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load(); @@ -56,7 +56,7 @@ void EmbedderDataSlot::store_tagged(EmbedderDataArray array, int entry_index, // static void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index, Object value) { - int slot_offset = object->GetEmbedderFieldOffset(embedder_field_index); + int slot_offset = object.GetEmbedderFieldOffset(embedder_field_index); ObjectSlot(FIELD_ADDR(object, slot_offset + kTaggedPayloadOffset)) .Relaxed_Store(value); WRITE_BARRIER(object, slot_offset + kTaggedPayloadOffset, value); diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h index 6cebf28f2d..dee8c3ec56 100644 --- a/deps/v8/src/objects/embedder-data-slot.h +++ b/deps/v8/src/objects/embedder-data-slot.h @@ -7,8 +7,8 @@ #include -#include "src/assert-scope.h" -#include "src/globals.h" +#include "src/common/assert-scope.h" +#include "src/common/globals.h" #include "src/objects/slots.h" // Has to be the last include (doesn't have include guards): diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h index c3902ca9aa..e06cfce7de 100644 --- a/deps/v8/src/objects/feedback-cell-inl.h +++ b/deps/v8/src/objects/feedback-cell-inl.h @@ -8,7 +8,7 @@ #include "src/objects/feedback-cell.h" #include "src/heap/heap-write-barrier-inl.h" -#include "src/objects-inl.h" +#include "src/objects/objects-inl.h" #include "src/objects/struct-inl.h" // Has to be the last include (doesn't have include guards): @@ -25,10 +25,15 @@ ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset) INT32_ACCESSORS(FeedbackCell, interrupt_budget, kInterruptBudgetOffset) void FeedbackCell::clear_padding() { - if (FeedbackCell::kSize == FeedbackCell::kUnalignedSize) return; - DCHECK_GE(FeedbackCell::kSize, FeedbackCell::kUnalignedSize); + if (FeedbackCell::kAlignedSize == FeedbackCell::kUnalignedSize) return; + DCHECK_GE(FeedbackCell::kAlignedSize, FeedbackCell::kUnalignedSize); memset(reinterpret_cast(address() + FeedbackCell::kUnalignedSize), 0, - FeedbackCell::kSize - FeedbackCell::kUnalignedSize); + FeedbackCell::kAlignedSize - FeedbackCell::kUnalignedSize); +} + +void FeedbackCell::reset() { + set_value(GetReadOnlyRoots().undefined_value()); + set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget()); } } // namespace internal diff --git a/deps/v8/src/objects/feedback-cell.h b/deps/v8/src/objects/feedback-cell.h index a708f4cb92..3c085f72d9 100644 --- a/deps/v8/src/objects/feedback-cell.h +++ b/deps/v8/src/objects/feedback-cell.h @@ -37,23 +37,18 @@ class FeedbackCell : public Struct { DECL_PRINTER(FeedbackCell) DECL_VERIFIER(FeedbackCell) -// Layout description. -#define FEEDBACK_CELL_FIELDS(V) \ - V(kValueOffset, kTaggedSize) \ - /* Non-pointer fields */ \ - V(kInterruptBudgetOffset, kInt32Size) \ - /* Total size. */ \ - V(kUnalignedSize, 0) + // Layout description. + DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, + TORQUE_GENERATED_FEEDBACK_CELL_FIELDS) - DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FEEDBACK_CELL_FIELDS) -#undef FEEDBACK_CELL_FIELDS - - static const int kSize = RoundUp(int{kUnalignedSize}); + static const int kUnalignedSize = kSize; + static const int kAlignedSize = RoundUp(int{kSize}); inline void clear_padding(); + inline void reset(); using BodyDescriptor = - FixedBodyDescriptor; + FixedBodyDescriptor; OBJECT_CONSTRUCTORS(FeedbackCell, Struct); }; diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h new file mode 100644 index 0000000000..6b1fdcc1e5 --- /dev/null +++ b/deps/v8/src/objects/feedback-vector-inl.h @@ -0,0 +1,354 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_FEEDBACK_VECTOR_INL_H_ +#define V8_OBJECTS_FEEDBACK_VECTOR_INL_H_ + +#include "src/objects/feedback-vector.h" + +#include "src/common/globals.h" +#include "src/heap/factory-inl.h" +#include "src/heap/heap-write-barrier-inl.h" +#include "src/objects/code-inl.h" +#include "src/objects/maybe-object-inl.h" +#include "src/objects/shared-function-info.h" +#include "src/objects/smi.h" + +// Has to be the last include (doesn't have include guards): +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +OBJECT_CONSTRUCTORS_IMPL(FeedbackVector, HeapObject) +OBJECT_CONSTRUCTORS_IMPL(FeedbackMetadata, HeapObject) +OBJECT_CONSTRUCTORS_IMPL(ClosureFeedbackCellArray, FixedArray) + +NEVER_READ_ONLY_SPACE_IMPL(FeedbackVector) +NEVER_READ_ONLY_SPACE_IMPL(ClosureFeedbackCellArray) + +CAST_ACCESSOR(FeedbackVector) +CAST_ACCESSOR(FeedbackMetadata) +CAST_ACCESSOR(ClosureFeedbackCellArray) + +INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset) + +INT32_ACCESSORS(FeedbackMetadata, closure_feedback_cell_count, + kFeedbackCellCountOffset) + +int32_t FeedbackMetadata::synchronized_slot_count() const { + return base::Acquire_Load(reinterpret_cast( + FIELD_ADDR(*this, kSlotCountOffset))); +} + +int32_t FeedbackMetadata::get(int index) const { + DCHECK(index >= 0 && index < length()); + int offset = kHeaderSize + index * kInt32Size; + return ReadField(offset); +} + +void FeedbackMetadata::set(int index, int32_t value) { + DCHECK(index >= 0 && index < length()); + int offset = kHeaderSize + index * kInt32Size; + WriteField(offset, value); +} + +bool FeedbackMetadata::is_empty() const { return slot_count() == 0; } + +int FeedbackMetadata::length() const { + return FeedbackMetadata::length(slot_count()); +} + +int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) { + switch (kind) { + case FeedbackSlotKind::kForIn: + case FeedbackSlotKind::kInstanceOf: + case FeedbackSlotKind::kCompareOp: + case FeedbackSlotKind::kBinaryOp: + case FeedbackSlotKind::kLiteral: + case FeedbackSlotKind::kTypeProfile: + return 1; + + case FeedbackSlotKind::kCall: + case FeedbackSlotKind::kCloneObject: + case FeedbackSlotKind::kLoadProperty: + case FeedbackSlotKind::kLoadGlobalInsideTypeof: + case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: + case FeedbackSlotKind::kLoadKeyed: + case FeedbackSlotKind::kHasKeyed: + case FeedbackSlotKind::kStoreNamedSloppy: + case FeedbackSlotKind::kStoreNamedStrict: + case FeedbackSlotKind::kStoreOwnNamed: + case FeedbackSlotKind::kStoreGlobalSloppy: + case FeedbackSlotKind::kStoreGlobalStrict: + case FeedbackSlotKind::kStoreKeyedSloppy: + case FeedbackSlotKind::kStoreKeyedStrict: + case FeedbackSlotKind::kStoreInArrayLiteral: + case FeedbackSlotKind::kStoreDataPropertyInLiteral: + return 2; + + case FeedbackSlotKind::kInvalid: + case FeedbackSlotKind::kKindsNumber: + UNREACHABLE(); + } + return 1; +} + +Handle ClosureFeedbackCellArray::GetFeedbackCell(int index) { + return handle(FeedbackCell::cast(get(index)), GetIsolate()); +} + +ACCESSORS(FeedbackVector, shared_function_info, SharedFunctionInfo, + kSharedFunctionInfoOffset) +WEAK_ACCESSORS(FeedbackVector, optimized_code_weak_or_smi, + kOptimizedCodeWeakOrSmiOffset) +ACCESSORS(FeedbackVector, closure_feedback_cell_array, ClosureFeedbackCellArray, + kClosureFeedbackCellArrayOffset) +INT32_ACCESSORS(FeedbackVector, length, kLengthOffset) +INT32_ACCESSORS(FeedbackVector, invocation_count, kInvocationCountOffset) +INT32_ACCESSORS(FeedbackVector, profiler_ticks, kProfilerTicksOffset) + +void FeedbackVector::clear_padding() { + if (FIELD_SIZE(kPaddingOffset) == 0) return; + DCHECK_EQ(4, FIELD_SIZE(kPaddingOffset)); + memset(reinterpret_cast(address() + kPaddingOffset), 0, + FIELD_SIZE(kPaddingOffset)); +} + +bool FeedbackVector::is_empty() const { return length() == 0; } + +FeedbackMetadata FeedbackVector::metadata() const { + return shared_function_info().feedback_metadata(); +} + +void FeedbackVector::clear_invocation_count() { set_invocation_count(0); } + +Code FeedbackVector::optimized_code() const { + MaybeObject slot = optimized_code_weak_or_smi(); + DCHECK(slot->IsSmi() || slot->IsWeakOrCleared()); + HeapObject heap_object; + return slot->GetHeapObject(&heap_object) ? Code::cast(heap_object) : Code(); +} + +OptimizationMarker FeedbackVector::optimization_marker() const { + MaybeObject slot = optimized_code_weak_or_smi(); + Smi value; + if (!slot->ToSmi(&value)) return OptimizationMarker::kNone; + return static_cast(value.value()); +} + +bool FeedbackVector::has_optimized_code() const { + return !optimized_code().is_null(); +} + +bool FeedbackVector::has_optimization_marker() const { + return optimization_marker() != OptimizationMarker::kLogFirstExecution && + optimization_marker() != OptimizationMarker::kNone; +} + +// Conversion from an integer index to either a slot or an ic slot. +// static +FeedbackSlot FeedbackVector::ToSlot(int index) { + DCHECK_GE(index, 0); + return FeedbackSlot(index); +} + +MaybeObject FeedbackVector::Get(FeedbackSlot slot) const { + return get(GetIndex(slot)); +} + +MaybeObject FeedbackVector::get(int index) const { + DCHECK_GE(index, 0); + DCHECK_LT(index, this->length()); + int offset = kFeedbackSlotsOffset + index * kTaggedSize; + return RELAXED_READ_WEAK_FIELD(*this, offset); +} + +Handle FeedbackVector::GetClosureFeedbackCell(int index) const { + DCHECK_GE(index, 0); + ClosureFeedbackCellArray cell_array = + ClosureFeedbackCellArray::cast(closure_feedback_cell_array()); + return cell_array.GetFeedbackCell(index); +} + +void FeedbackVector::Set(FeedbackSlot slot, MaybeObject value, + WriteBarrierMode mode) { + set(GetIndex(slot), value, mode); +} + +void FeedbackVector::set(int index, MaybeObject value, WriteBarrierMode mode) { + DCHECK_GE(index, 0); + DCHECK_LT(index, this->length()); + int offset = kFeedbackSlotsOffset + index * kTaggedSize; + RELAXED_WRITE_WEAK_FIELD(*this, offset, value); + CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); +} + +void FeedbackVector::Set(FeedbackSlot slot, Object value, + WriteBarrierMode mode) { + set(GetIndex(slot), MaybeObject::FromObject(value), mode); +} + +void FeedbackVector::set(int index, Object value, WriteBarrierMode mode) { + set(index, MaybeObject::FromObject(value), mode); +} + +inline MaybeObjectSlot FeedbackVector::slots_start() { + return RawMaybeWeakField(kFeedbackSlotsOffset); +} + +// Helper function to transform the feedback to BinaryOperationHint. +BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) { + switch (type_feedback) { + case BinaryOperationFeedback::kNone: + return BinaryOperationHint::kNone; + case BinaryOperationFeedback::kSignedSmall: + return BinaryOperationHint::kSignedSmall; + case BinaryOperationFeedback::kSignedSmallInputs: + return BinaryOperationHint::kSignedSmallInputs; + case BinaryOperationFeedback::kNumber: + return BinaryOperationHint::kNumber; + case BinaryOperationFeedback::kNumberOrOddball: + return BinaryOperationHint::kNumberOrOddball; + case BinaryOperationFeedback::kString: + return BinaryOperationHint::kString; + case BinaryOperationFeedback::kBigInt: + return BinaryOperationHint::kBigInt; + default: + return BinaryOperationHint::kAny; + } + UNREACHABLE(); +} + +// Helper function to transform the feedback to CompareOperationHint. +CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) { + switch (type_feedback) { + case CompareOperationFeedback::kNone: + return CompareOperationHint::kNone; + case CompareOperationFeedback::kSignedSmall: + return CompareOperationHint::kSignedSmall; + case CompareOperationFeedback::kNumber: + return CompareOperationHint::kNumber; + case CompareOperationFeedback::kNumberOrOddball: + return CompareOperationHint::kNumberOrOddball; + case CompareOperationFeedback::kInternalizedString: + return CompareOperationHint::kInternalizedString; + case CompareOperationFeedback::kString: + return CompareOperationHint::kString; + case CompareOperationFeedback::kSymbol: + return CompareOperationHint::kSymbol; + case CompareOperationFeedback::kBigInt: + return CompareOperationHint::kBigInt; + case CompareOperationFeedback::kReceiver: + return CompareOperationHint::kReceiver; + case CompareOperationFeedback::kReceiverOrNullOrUndefined: + return CompareOperationHint::kReceiverOrNullOrUndefined; + default: + return CompareOperationHint::kAny; + } + UNREACHABLE(); +} + +// Helper function to transform the feedback to ForInHint. +ForInHint ForInHintFromFeedback(int type_feedback) { + switch (type_feedback) { + case ForInFeedback::kNone: + return ForInHint::kNone; + case ForInFeedback::kEnumCacheKeys: + return ForInHint::kEnumCacheKeys; + case ForInFeedback::kEnumCacheKeysAndIndices: + return ForInHint::kEnumCacheKeysAndIndices; + default: + return ForInHint::kAny; + } + UNREACHABLE(); +} + +Handle FeedbackVector::UninitializedSentinel(Isolate* isolate) { + return isolate->factory()->uninitialized_symbol(); +} + +Handle FeedbackVector::GenericSentinel(Isolate* isolate) { + return isolate->factory()->generic_symbol(); +} + +Handle FeedbackVector::MegamorphicSentinel(Isolate* isolate) { + return isolate->factory()->megamorphic_symbol(); +} + +Handle FeedbackVector::PremonomorphicSentinel(Isolate* isolate) { + return isolate->factory()->premonomorphic_symbol(); +} + +Symbol FeedbackVector::RawUninitializedSentinel(Isolate* isolate) { + return ReadOnlyRoots(isolate).uninitialized_symbol(); +} + +bool FeedbackMetadataIterator::HasNext() const { + return next_slot_.ToInt() < metadata().slot_count(); +} + +FeedbackSlot FeedbackMetadataIterator::Next() { + DCHECK(HasNext()); + cur_slot_ = next_slot_; + slot_kind_ = metadata().GetKind(cur_slot_); + next_slot_ = FeedbackSlot(next_slot_.ToInt() + entry_size()); + return cur_slot_; +} + +int FeedbackMetadataIterator::entry_size() const { + return FeedbackMetadata::GetSlotSize(kind()); +} + +MaybeObject FeedbackNexus::GetFeedback() const { + MaybeObject feedback = vector().Get(slot()); + FeedbackVector::AssertNoLegacyTypes(feedback); + return feedback; +} + +MaybeObject FeedbackNexus::GetFeedbackExtra() const { +#ifdef DEBUG + FeedbackSlotKind kind = vector().GetKind(slot()); + DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind)); +#endif + int extra_index = vector().GetIndex(slot()) + 1; + return vector().get(extra_index); +} + +void FeedbackNexus::SetFeedback(Object feedback, WriteBarrierMode mode) { + SetFeedback(MaybeObject::FromObject(feedback)); +} + +void FeedbackNexus::SetFeedback(MaybeObject feedback, WriteBarrierMode mode) { + FeedbackVector::AssertNoLegacyTypes(feedback); + vector().Set(slot(), feedback, mode); +} + +void FeedbackNexus::SetFeedbackExtra(Object feedback_extra, + WriteBarrierMode mode) { +#ifdef DEBUG + FeedbackSlotKind kind = vector().GetKind(slot()); + DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind)); + FeedbackVector::AssertNoLegacyTypes(MaybeObject::FromObject(feedback_extra)); +#endif + int index = vector().GetIndex(slot()) + 1; + vector().set(index, MaybeObject::FromObject(feedback_extra), mode); +} + +void FeedbackNexus::SetFeedbackExtra(MaybeObject feedback_extra, + WriteBarrierMode mode) { +#ifdef DEBUG + FeedbackVector::AssertNoLegacyTypes(feedback_extra); +#endif + int index = vector().GetIndex(slot()) + 1; + vector().set(index, feedback_extra, mode); +} + +Isolate* FeedbackNexus::GetIsolate() const { return vector().GetIsolate(); } +} // namespace internal +} // namespace v8 + +#include "src/objects/object-macros-undef.h" + +#endif // V8_OBJECTS_FEEDBACK_VECTOR_INL_H_ diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc new file mode 100644 index 0000000000..0393a55f69 --- /dev/null +++ b/deps/v8/src/objects/feedback-vector.cc @@ -0,0 +1,1420 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/objects/feedback-vector.h" +#include "src/ic/handler-configuration-inl.h" +#include "src/ic/ic-inl.h" +#include "src/objects/data-handler-inl.h" +#include "src/objects/feedback-vector-inl.h" +#include "src/objects/hash-table-inl.h" +#include "src/objects/map-inl.h" +#include "src/objects/object-macros.h" +#include "src/objects/objects.h" + +namespace v8 { +namespace internal { + +FeedbackSlot FeedbackVectorSpec::AddSlot(FeedbackSlotKind kind) { + int slot = slots(); + int entries_per_slot = FeedbackMetadata::GetSlotSize(kind); + append(kind); + for (int i = 1; i < entries_per_slot; i++) { + append(FeedbackSlotKind::kInvalid); + } + return FeedbackSlot(slot); +} + +FeedbackSlot FeedbackVectorSpec::AddTypeProfileSlot() { + FeedbackSlot slot = AddSlot(FeedbackSlotKind::kTypeProfile); + CHECK_EQ(FeedbackVectorSpec::kTypeProfileSlotIndex, + FeedbackVector::GetIndex(slot)); + return slot; +} + +bool FeedbackVectorSpec::HasTypeProfileSlot() const { + FeedbackSlot slot = + FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex); + if (slots() <= slot.ToInt()) { + return false; + } + return GetKind(slot) == FeedbackSlotKind::kTypeProfile; +} + +static bool IsPropertyNameFeedback(MaybeObject feedback) { + HeapObject heap_object; + if (!feedback->GetHeapObjectIfStrong(&heap_object)) return false; + if (heap_object.IsString()) { + DCHECK(heap_object.IsInternalizedString()); + return true; + } + if (!heap_object.IsSymbol()) return false; + Symbol symbol = Symbol::cast(heap_object); + ReadOnlyRoots roots = symbol.GetReadOnlyRoots(); + return symbol != roots.uninitialized_symbol() && + symbol != roots.premonomorphic_symbol() && + symbol != roots.megamorphic_symbol(); +} + +std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind) { + return os << FeedbackMetadata::Kind2String(kind); +} + +FeedbackSlotKind FeedbackMetadata::GetKind(FeedbackSlot slot) const { + int index = VectorICComputer::index(0, slot.ToInt()); + int data = get(index); + return VectorICComputer::decode(data, slot.ToInt()); +} + +void FeedbackMetadata::SetKind(FeedbackSlot slot, FeedbackSlotKind kind) { + int index = VectorICComputer::index(0, slot.ToInt()); + int data = get(index); + int new_data = VectorICComputer::encode(data, slot.ToInt(), kind); + set(index, new_data); +} + +// static +Handle FeedbackMetadata::New(Isolate* isolate, + const FeedbackVectorSpec* spec) { + Factory* factory = isolate->factory(); + + const int slot_count = spec == nullptr ? 0 : spec->slots(); + const int closure_feedback_cell_count = + spec == nullptr ? 0 : spec->closure_feedback_cells(); + if (slot_count == 0 && closure_feedback_cell_count == 0) { + return factory->empty_feedback_metadata(); + } +#ifdef DEBUG + for (int i = 0; i < slot_count;) { + DCHECK(spec); + FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i)); + int entry_size = FeedbackMetadata::GetSlotSize(kind); + for (int j = 1; j < entry_size; j++) { + FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i + j)); + DCHECK_EQ(FeedbackSlotKind::kInvalid, kind); + } + i += entry_size; + } +#endif + + Handle metadata = + factory->NewFeedbackMetadata(slot_count, closure_feedback_cell_count); + + // Initialize the slots. The raw data section has already been pre-zeroed in + // NewFeedbackMetadata. + for (int i = 0; i < slot_count; i++) { + DCHECK(spec); + FeedbackSlot slot(i); + FeedbackSlotKind kind = spec->GetKind(slot); + metadata->SetKind(slot, kind); + } + + return metadata; +} + +bool FeedbackMetadata::SpecDiffersFrom( + const FeedbackVectorSpec* other_spec) const { + if (other_spec->slots() != slot_count()) { + return true; + } + + int slots = slot_count(); + for (int i = 0; i < slots;) { + FeedbackSlot slot(i); + FeedbackSlotKind kind = GetKind(slot); + int entry_size = FeedbackMetadata::GetSlotSize(kind); + + if (kind != other_spec->GetKind(slot)) { + return true; + } + i += entry_size; + } + return false; +} + +const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) { + switch (kind) { + case FeedbackSlotKind::kInvalid: + return "Invalid"; + case FeedbackSlotKind::kCall: + return "Call"; + case FeedbackSlotKind::kLoadProperty: + return "LoadProperty"; + case FeedbackSlotKind::kLoadGlobalInsideTypeof: + return "LoadGlobalInsideTypeof"; + case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: + return "LoadGlobalNotInsideTypeof"; + case FeedbackSlotKind::kLoadKeyed: + return "LoadKeyed"; + case FeedbackSlotKind::kHasKeyed: + return "HasKeyed"; + case FeedbackSlotKind::kStoreNamedSloppy: + return "StoreNamedSloppy"; + case FeedbackSlotKind::kStoreNamedStrict: + return "StoreNamedStrict"; + case FeedbackSlotKind::kStoreOwnNamed: + return "StoreOwnNamed"; + case FeedbackSlotKind::kStoreGlobalSloppy: + return "StoreGlobalSloppy"; + case FeedbackSlotKind::kStoreGlobalStrict: + return "StoreGlobalStrict"; + case FeedbackSlotKind::kStoreKeyedSloppy: + return "StoreKeyedSloppy"; + case FeedbackSlotKind::kStoreKeyedStrict: + return "StoreKeyedStrict"; + case FeedbackSlotKind::kStoreInArrayLiteral: + return "StoreInArrayLiteral"; + case FeedbackSlotKind::kBinaryOp: + return "BinaryOp"; + case FeedbackSlotKind::kCompareOp: + return "CompareOp"; + case FeedbackSlotKind::kStoreDataPropertyInLiteral: + return "StoreDataPropertyInLiteral"; + case FeedbackSlotKind::kLiteral: + return "Literal"; + case FeedbackSlotKind::kTypeProfile: + return "TypeProfile"; + case FeedbackSlotKind::kForIn: + return "ForIn"; + case FeedbackSlotKind::kInstanceOf: + return "InstanceOf"; + case FeedbackSlotKind::kCloneObject: + return "CloneObject"; + case FeedbackSlotKind::kKindsNumber: + break; + } + UNREACHABLE(); +} + +bool FeedbackMetadata::HasTypeProfileSlot() const { + FeedbackSlot slot = + FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex); + return slot.ToInt() < slot_count() && + GetKind(slot) == FeedbackSlotKind::kTypeProfile; +} + +FeedbackSlotKind FeedbackVector::GetKind(FeedbackSlot slot) const { + DCHECK(!is_empty()); + return metadata().GetKind(slot); +} + +FeedbackSlot FeedbackVector::GetTypeProfileSlot() const { + DCHECK(metadata().HasTypeProfileSlot()); + FeedbackSlot slot = + FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex); + DCHECK_EQ(FeedbackSlotKind::kTypeProfile, GetKind(slot)); + return slot; +} + +// static +Handle ClosureFeedbackCellArray::New( + Isolate* isolate, Handle shared) { + Factory* factory = isolate->factory(); + + int num_feedback_cells = + shared->feedback_metadata().closure_feedback_cell_count(); + + Handle feedback_cell_array = + factory->NewClosureFeedbackCellArray(num_feedback_cells); + + for (int i = 0; i < num_feedback_cells; i++) { + Handle cell = + factory->NewNoClosuresCell(factory->undefined_value()); + feedback_cell_array->set(i, *cell); + } + return feedback_cell_array; +} + +// static +Handle FeedbackVector::New( + Isolate* isolate, Handle shared, + Handle closure_feedback_cell_array) { + Factory* factory = isolate->factory(); + + const int slot_count = shared->feedback_metadata().slot_count(); + + Handle vector = factory->NewFeedbackVector( + shared, closure_feedback_cell_array, AllocationType::kOld); + + DCHECK_EQ(vector->length(), slot_count); + + DCHECK_EQ(vector->shared_function_info(), *shared); + DCHECK_EQ( + vector->optimized_code_weak_or_smi(), + MaybeObject::FromSmi(Smi::FromEnum( + FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution + : OptimizationMarker::kNone))); + DCHECK_EQ(vector->invocation_count(), 0); + DCHECK_EQ(vector->profiler_ticks(), 0); + + // Ensure we can skip the write barrier + Handle uninitialized_sentinel = UninitializedSentinel(isolate); + DCHECK_EQ(ReadOnlyRoots(isolate).uninitialized_symbol(), + *uninitialized_sentinel); + for (int i = 0; i < slot_count;) { + FeedbackSlot slot(i); + FeedbackSlotKind kind = shared->feedback_metadata().GetKind(slot); + int index = FeedbackVector::GetIndex(slot); + int entry_size = FeedbackMetadata::GetSlotSize(kind); + + Object extra_value = *uninitialized_sentinel; + switch (kind) { + case FeedbackSlotKind::kLoadGlobalInsideTypeof: + case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: + case FeedbackSlotKind::kStoreGlobalSloppy: + case FeedbackSlotKind::kStoreGlobalStrict: + vector->set(index, HeapObjectReference::ClearedValue(isolate), + SKIP_WRITE_BARRIER); + break; + case FeedbackSlotKind::kForIn: + case FeedbackSlotKind::kCompareOp: + case FeedbackSlotKind::kBinaryOp: + vector->set(index, Smi::kZero, SKIP_WRITE_BARRIER); + break; + case FeedbackSlotKind::kLiteral: + vector->set(index, Smi::kZero, SKIP_WRITE_BARRIER); + break; + case FeedbackSlotKind::kCall: + vector->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER); + extra_value = Smi::kZero; + break; + case FeedbackSlotKind::kCloneObject: + case FeedbackSlotKind::kLoadProperty: + case FeedbackSlotKind::kLoadKeyed: + case FeedbackSlotKind::kHasKeyed: + case FeedbackSlotKind::kStoreNamedSloppy: + case FeedbackSlotKind::kStoreNamedStrict: + case FeedbackSlotKind::kStoreOwnNamed: + case FeedbackSlotKind::kStoreKeyedSloppy: + case FeedbackSlotKind::kStoreKeyedStrict: + case FeedbackSlotKind::kStoreInArrayLiteral: + case FeedbackSlotKind::kStoreDataPropertyInLiteral: + case FeedbackSlotKind::kTypeProfile: + case FeedbackSlotKind::kInstanceOf: + vector->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER); + break; + + case FeedbackSlotKind::kInvalid: + case FeedbackSlotKind::kKindsNumber: + UNREACHABLE(); + break; + } + for (int j = 1; j < entry_size; j++) { + vector->set(index + j, extra_value, SKIP_WRITE_BARRIER); + } + i += entry_size; + } + + Handle result = Handle::cast(vector); + if (!isolate->is_best_effort_code_coverage() || + isolate->is_collecting_type_profile()) { + AddToVectorsForProfilingTools(isolate, result); + } + return result; +} + +// static +void FeedbackVector::AddToVectorsForProfilingTools( + Isolate* isolate, Handle vector) { + DCHECK(!isolate->is_best_effort_code_coverage() || + isolate->is_collecting_type_profile()); + if (!vector->shared_function_info().IsSubjectToDebugging()) return; + Handle list = Handle::cast( + isolate->factory()->feedback_vectors_for_profiling_tools()); + list = ArrayList::Add(isolate, list, vector); + isolate->SetFeedbackVectorsForProfilingTools(*list); +} + +// static +void FeedbackVector::SetOptimizedCode(Handle vector, + Handle code) { + DCHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION); + vector->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*code)); +} + +void FeedbackVector::ClearOptimizedCode() { + DCHECK(has_optimized_code()); + SetOptimizationMarker(OptimizationMarker::kNone); +} + +void FeedbackVector::ClearOptimizationMarker() { + DCHECK(!has_optimized_code()); + SetOptimizationMarker(OptimizationMarker::kNone); +} + +void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) { + set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(marker))); +} + +void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization( + SharedFunctionInfo shared, const char* reason) { + MaybeObject slot = optimized_code_weak_or_smi(); + if (slot->IsSmi()) { + return; + } + + if (slot->IsCleared()) { + ClearOptimizationMarker(); + return; + } + + Code code = Code::cast(slot->GetHeapObject()); + if (code.marked_for_deoptimization()) { + if (FLAG_trace_deopt) { + PrintF("[evicting optimizing code marked for deoptimization (%s) for ", + reason); + shared.ShortPrint(); + PrintF("]\n"); + } + if (!code.deopt_already_counted()) { + code.set_deopt_already_counted(true); + } + ClearOptimizedCode(); + } +} + +bool FeedbackVector::ClearSlots(Isolate* isolate) { + MaybeObject uninitialized_sentinel = MaybeObject::FromObject( + FeedbackVector::RawUninitializedSentinel(isolate)); + + bool feedback_updated = false; + FeedbackMetadataIterator iter(metadata()); + while (iter.HasNext()) { + FeedbackSlot slot = iter.Next(); + + MaybeObject obj = Get(slot); + if (obj != uninitialized_sentinel) { + FeedbackNexus nexus(*this, slot); + feedback_updated |= nexus.Clear(); + } + } + return feedback_updated; +} + +void FeedbackVector::AssertNoLegacyTypes(MaybeObject object) { +#ifdef DEBUG + HeapObject heap_object; + if (object->GetHeapObject(&heap_object)) { + // Instead of FixedArray, the Feedback and the Extra should contain + // WeakFixedArrays. The only allowed FixedArray subtype is HashTable. + DCHECK_IMPLIES(heap_object.IsFixedArray(), heap_object.IsHashTable()); + } +#endif +} + +Handle FeedbackNexus::EnsureArrayOfSize(int length) { + Isolate* isolate = GetIsolate(); + HeapObject heap_object; + if (GetFeedback()->GetHeapObjectIfStrong(&heap_object) && + heap_object.IsWeakFixedArray() && + WeakFixedArray::cast(heap_object).length() == length) { + return handle(WeakFixedArray::cast(heap_object), isolate); + } + Handle array = isolate->factory()->NewWeakFixedArray(length); + SetFeedback(*array); + return array; +} + +Handle FeedbackNexus::EnsureExtraArrayOfSize(int length) { + Isolate* isolate = GetIsolate(); + HeapObject heap_object; + if (GetFeedbackExtra()->GetHeapObjectIfStrong(&heap_object) && + heap_object.IsWeakFixedArray() && + WeakFixedArray::cast(heap_object).length() == length) { + return handle(WeakFixedArray::cast(heap_object), isolate); + } + Handle array = isolate->factory()->NewWeakFixedArray(length); + SetFeedbackExtra(*array); + return array; +} + +void FeedbackNexus::ConfigureUninitialized() { + Isolate* isolate = GetIsolate(); + switch (kind()) { + case FeedbackSlotKind::kStoreGlobalSloppy: + case FeedbackSlotKind::kStoreGlobalStrict: + case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: + case FeedbackSlotKind::kLoadGlobalInsideTypeof: { + SetFeedback(HeapObjectReference::ClearedValue(isolate), + SKIP_WRITE_BARRIER); + SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate), + SKIP_WRITE_BARRIER); + break; + } + case FeedbackSlotKind::kCloneObject: + case FeedbackSlotKind::kCall: { + SetFeedback(*FeedbackVector::UninitializedSentinel(isolate), + SKIP_WRITE_BARRIER); + SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER); + break; + } + case FeedbackSlotKind::kInstanceOf: { + SetFeedback(*FeedbackVector::UninitializedSentinel(isolate), + SKIP_WRITE_BARRIER); + break; + } + case FeedbackSlotKind::kStoreNamedSloppy: + case FeedbackSlotKind::kStoreNamedStrict: + case FeedbackSlotKind::kStoreKeyedSloppy: + case FeedbackSlotKind::kStoreKeyedStrict: + case FeedbackSlotKind::kStoreInArrayLiteral: + case FeedbackSlotKind::kStoreOwnNamed: + case FeedbackSlotKind::kLoadProperty: + case FeedbackSlotKind::kLoadKeyed: + case FeedbackSlotKind::kHasKeyed: + case FeedbackSlotKind::kStoreDataPropertyInLiteral: { + SetFeedback(*FeedbackVector::UninitializedSentinel(isolate), + SKIP_WRITE_BARRIER); + SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate), + SKIP_WRITE_BARRIER); + break; + } + default: + UNREACHABLE(); + } +} + +bool FeedbackNexus::Clear() { + bool feedback_updated = false; + + switch (kind()) { + case FeedbackSlotKind::kTypeProfile: + // We don't clear these kinds ever. + break; + + case FeedbackSlotKind::kCompareOp: + case FeedbackSlotKind::kForIn: + case FeedbackSlotKind::kBinaryOp: + // We don't clear these, either. + break; + + case FeedbackSlotKind::kLiteral: + SetFeedback(Smi::kZero, SKIP_WRITE_BARRIER); + feedback_updated = true; + break; + + case FeedbackSlotKind::kStoreNamedSloppy: + case FeedbackSlotKind::kStoreNamedStrict: + case FeedbackSlotKind::kStoreKeyedSloppy: + case FeedbackSlotKind::kStoreKeyedStrict: + case FeedbackSlotKind::kStoreInArrayLiteral: + case FeedbackSlotKind::kStoreOwnNamed: + case FeedbackSlotKind::kLoadProperty: + case FeedbackSlotKind::kLoadKeyed: + case FeedbackSlotKind::kHasKeyed: + case FeedbackSlotKind::kStoreGlobalSloppy: + case FeedbackSlotKind::kStoreGlobalStrict: + case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: + case FeedbackSlotKind::kLoadGlobalInsideTypeof: + case FeedbackSlotKind::kCall: + case FeedbackSlotKind::kInstanceOf: + case FeedbackSlotKind::kStoreDataPropertyInLiteral: + case FeedbackSlotKind::kCloneObject: + if (!IsCleared()) { + ConfigureUninitialized(); + feedback_updated = true; + } + break; + + case FeedbackSlotKind::kInvalid: + case FeedbackSlotKind::kKindsNumber: + UNREACHABLE(); + } + return feedback_updated; +} + +void FeedbackNexus::ConfigurePremonomorphic(Handle receiver_map) { + SetFeedback(*FeedbackVector::PremonomorphicSentinel(GetIsolate()), + SKIP_WRITE_BARRIER); + SetFeedbackExtra(HeapObjectReference::Weak(*receiver_map)); +} + +bool FeedbackNexus::ConfigureMegamorphic() { + DisallowHeapAllocation no_gc; + Isolate* isolate = GetIsolate(); + MaybeObject sentinel = + MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate)); + if (GetFeedback() != sentinel) { + SetFeedback(sentinel, SKIP_WRITE_BARRIER); + SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate)); + return true; + } + + return false; +} + +bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) { + DisallowHeapAllocation no_gc; + Isolate* isolate = GetIsolate(); + bool changed = false; + MaybeObject sentinel = + MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate)); + if (GetFeedback() != sentinel) { + SetFeedback(sentinel, SKIP_WRITE_BARRIER); + changed = true; + } + + Smi extra = Smi::FromInt(static_cast(property_type)); + if (changed || GetFeedbackExtra() != MaybeObject::FromSmi(extra)) { + SetFeedbackExtra(extra, SKIP_WRITE_BARRIER); + changed = true; + } + return changed; +} + +Map FeedbackNexus::GetFirstMap() const { + MapHandles maps; + ExtractMaps(&maps); + if (!maps.empty()) return *maps.at(0); + return Map(); +} + +InlineCacheState FeedbackNexus::ic_state() const { + Isolate* isolate = GetIsolate(); + MaybeObject feedback = GetFeedback(); + + switch (kind()) { + case FeedbackSlotKind::kLiteral: + if (feedback->IsSmi()) return UNINITIALIZED; + return MONOMORPHIC; + + case FeedbackSlotKind::kStoreGlobalSloppy: + case FeedbackSlotKind::kStoreGlobalStrict: + case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: + case FeedbackSlotKind::kLoadGlobalInsideTypeof: { + if (feedback->IsSmi()) return MONOMORPHIC; + + if (feedback == MaybeObject::FromObject( + *FeedbackVector::PremonomorphicSentinel(isolate))) { + DCHECK(kind() == FeedbackSlotKind::kStoreGlobalSloppy || + kind() == FeedbackSlotKind::kStoreGlobalStrict); + return PREMONOMORPHIC; + } + + DCHECK(feedback->IsWeakOrCleared()); + MaybeObject extra = GetFeedbackExtra(); + if (!feedback->IsCleared() || + extra != MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))) { + return MONOMORPHIC; + } + return UNINITIALIZED; + } + + case FeedbackSlotKind::kStoreNamedSloppy: + case FeedbackSlotKind::kStoreNamedStrict: + case FeedbackSlotKind::kStoreKeyedSloppy: + case FeedbackSlotKind::kStoreKeyedStrict: + case FeedbackSlotKind::kStoreInArrayLiteral: + case FeedbackSlotKind::kStoreOwnNamed: + case FeedbackSlotKind::kLoadProperty: + case FeedbackSlotKind::kLoadKeyed: + case FeedbackSlotKind::kHasKeyed: { + if (feedback == MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))) { + return UNINITIALIZED; + } + if (feedback == MaybeObject::FromObject( + *FeedbackVector::MegamorphicSentinel(isolate))) { + return MEGAMORPHIC; + } + if (feedback == MaybeObject::FromObject( + *FeedbackVector::PremonomorphicSentinel(isolate))) { + return PREMONOMORPHIC; + } + if (feedback->IsWeakOrCleared()) { + // Don't check if the map is cleared. + return MONOMORPHIC; + } + HeapObject heap_object; + if (feedback->GetHeapObjectIfStrong(&heap_object)) { + if (heap_object.IsWeakFixedArray()) { + // Determine state purely by our structure, don't check if the maps + // are cleared. + return POLYMORPHIC; + } + if (heap_object.IsName()) { + DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) || + IsKeyedHasICKind(kind())); + Object extra = GetFeedbackExtra()->GetHeapObjectAssumeStrong(); + WeakFixedArray extra_array = WeakFixedArray::cast(extra); + return extra_array.length() > 2 ? POLYMORPHIC : MONOMORPHIC; + } + } + UNREACHABLE(); + } + case FeedbackSlotKind::kCall: { + HeapObject heap_object; + if (feedback == MaybeObject::FromObject( + *FeedbackVector::MegamorphicSentinel(isolate))) { + return GENERIC; + } else if (feedback->IsWeakOrCleared() || + (feedback->GetHeapObjectIfStrong(&heap_object) && + heap_object.IsAllocationSite())) { + return MONOMORPHIC; + } + + CHECK_EQ(feedback, MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))); + return UNINITIALIZED; + } + case FeedbackSlotKind::kBinaryOp: { + BinaryOperationHint hint = GetBinaryOperationFeedback(); + if (hint == BinaryOperationHint::kNone) { + return UNINITIALIZED; + } else if (hint == BinaryOperationHint::kAny) { + return GENERIC; + } + + return MONOMORPHIC; + } + case FeedbackSlotKind::kCompareOp: { + CompareOperationHint hint = GetCompareOperationFeedback(); + if (hint == CompareOperationHint::kNone) { + return UNINITIALIZED; + } else if (hint == CompareOperationHint::kAny) { + return GENERIC; + } + + return MONOMORPHIC; + } + case FeedbackSlotKind::kForIn: { + ForInHint hint = GetForInFeedback(); + if (hint == ForInHint::kNone) { + return UNINITIALIZED; + } else if (hint == ForInHint::kAny) { + return GENERIC; + } + return MONOMORPHIC; + } + case FeedbackSlotKind::kInstanceOf: { + if (feedback == MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))) { + return UNINITIALIZED; + } else if (feedback == + MaybeObject::FromObject( + *FeedbackVector::MegamorphicSentinel(isolate))) { + return MEGAMORPHIC; + } + return MONOMORPHIC; + } + case FeedbackSlotKind::kStoreDataPropertyInLiteral: { + if (feedback == MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))) { + return UNINITIALIZED; + } else if (feedback->IsWeakOrCleared()) { + // Don't check if the map is cleared. + return MONOMORPHIC; + } + + return MEGAMORPHIC; + } + case FeedbackSlotKind::kTypeProfile: { + if (feedback == MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))) { + return UNINITIALIZED; + } + return MONOMORPHIC; + } + + case FeedbackSlotKind::kCloneObject: { + if (feedback == MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))) { + return UNINITIALIZED; + } + if (feedback == MaybeObject::FromObject( + *FeedbackVector::MegamorphicSentinel(isolate))) { + return MEGAMORPHIC; + } + if (feedback->IsWeakOrCleared()) { + return MONOMORPHIC; + } + + DCHECK(feedback->GetHeapObjectAssumeStrong().IsWeakFixedArray()); + return POLYMORPHIC; + } + + case FeedbackSlotKind::kInvalid: + case FeedbackSlotKind::kKindsNumber: + UNREACHABLE(); + } + return UNINITIALIZED; +} + +void FeedbackNexus::ConfigurePropertyCellMode(Handle cell) { + DCHECK(IsGlobalICKind(kind())); + Isolate* isolate = GetIsolate(); + SetFeedback(HeapObjectReference::Weak(*cell)); + SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate), + SKIP_WRITE_BARRIER); +} + +bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index, + int context_slot_index, + bool immutable) { + DCHECK(IsGlobalICKind(kind())); + DCHECK_LE(0, script_context_index); + DCHECK_LE(0, context_slot_index); + if (!ContextIndexBits::is_valid(script_context_index) || + !SlotIndexBits::is_valid(context_slot_index) || + !ImmutabilityBit::is_valid(immutable)) { + return false; + } + int config = ContextIndexBits::encode(script_context_index) | + SlotIndexBits::encode(context_slot_index) | + ImmutabilityBit::encode(immutable); + + SetFeedback(Smi::From31BitPattern(config)); + Isolate* isolate = GetIsolate(); + SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate), + SKIP_WRITE_BARRIER); + return true; +} + +void FeedbackNexus::ConfigureHandlerMode(const MaybeObjectHandle& handler) { + DCHECK(IsGlobalICKind(kind())); + DCHECK(IC::IsHandler(*handler)); + SetFeedback(HeapObjectReference::ClearedValue(GetIsolate())); + SetFeedbackExtra(*handler); +} + +void FeedbackNexus::ConfigureCloneObject(Handle source_map, + Handle result_map) { + Isolate* isolate = GetIsolate(); + MaybeObject maybe_feedback = GetFeedback(); + Handle feedback(maybe_feedback->IsStrongOrWeak() + ? maybe_feedback->GetHeapObject() + : HeapObject(), + isolate); + switch (ic_state()) { + case UNINITIALIZED: + // Cache the first map seen which meets the fast case requirements. + SetFeedback(HeapObjectReference::Weak(*source_map)); + SetFeedbackExtra(*result_map); + break; + case MONOMORPHIC: + if (maybe_feedback->IsCleared() || feedback.is_identical_to(source_map) || + Map::cast(*feedback).is_deprecated()) { + // Remain in MONOMORPHIC state if previous feedback has been collected. + SetFeedback(HeapObjectReference::Weak(*source_map)); + SetFeedbackExtra(*result_map); + } else { + // Transition to POLYMORPHIC. + Handle array = + EnsureArrayOfSize(2 * kCloneObjectPolymorphicEntrySize); + array->Set(0, maybe_feedback); + array->Set(1, GetFeedbackExtra()); + array->Set(2, HeapObjectReference::Weak(*source_map)); + array->Set(3, MaybeObject::FromObject(*result_map)); + SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate)); + } + break; + case POLYMORPHIC: { + const int kMaxElements = + FLAG_max_polymorphic_map_count * kCloneObjectPolymorphicEntrySize; + Handle array = Handle::cast(feedback); + int i = 0; + for (; i < array->length(); i += kCloneObjectPolymorphicEntrySize) { + MaybeObject feedback = array->Get(i); + if (feedback->IsCleared()) break; + Handle cached_map(Map::cast(feedback->GetHeapObject()), isolate); + if (cached_map.is_identical_to(source_map) || + cached_map->is_deprecated()) + break; + } + + if (i >= array->length()) { + if (i == kMaxElements) { + // Transition to MEGAMORPHIC. + MaybeObject sentinel = MaybeObject::FromObject( + *FeedbackVector::MegamorphicSentinel(isolate)); + SetFeedback(sentinel, SKIP_WRITE_BARRIER); + SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate)); + break; + } + + // Grow polymorphic feedback array. + Handle new_array = EnsureArrayOfSize( + array->length() + kCloneObjectPolymorphicEntrySize); + for (int j = 0; j < array->length(); ++j) { + new_array->Set(j, array->Get(j)); + } + array = new_array; + } + + array->Set(i, HeapObjectReference::Weak(*source_map)); + array->Set(i + 1, MaybeObject::FromObject(*result_map)); + break; + } + + default: + UNREACHABLE(); + } +} + +int FeedbackNexus::GetCallCount() { + DCHECK(IsCallICKind(kind())); + + Object call_count = GetFeedbackExtra()->cast(); + CHECK(call_count.IsSmi()); + uint32_t value = static_cast(Smi::ToInt(call_count)); + return CallCountField::decode(value); +} + +void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) { + DCHECK(IsCallICKind(kind())); + + Object call_count = GetFeedbackExtra()->cast(); + CHECK(call_count.IsSmi()); + uint32_t count = static_cast(Smi::ToInt(call_count)); + uint32_t value = CallCountField::encode(CallCountField::decode(count)); + int result = static_cast(value | SpeculationModeField::encode(mode)); + SetFeedbackExtra(Smi::FromInt(result), SKIP_WRITE_BARRIER); +} + +SpeculationMode FeedbackNexus::GetSpeculationMode() { + DCHECK(IsCallICKind(kind())); + + Object call_count = GetFeedbackExtra()->cast(); + CHECK(call_count.IsSmi()); + uint32_t value = static_cast(Smi::ToInt(call_count)); + return SpeculationModeField::decode(value); +} + +float FeedbackNexus::ComputeCallFrequency() { + DCHECK(IsCallICKind(kind())); + + double const invocation_count = vector().invocation_count(); + double const call_count = GetCallCount(); + if (invocation_count == 0) { + // Prevent division by 0. + return 0.0f; + } + return static_cast(call_count / invocation_count); +} + +void FeedbackNexus::ConfigureMonomorphic(Handle name, + Handle receiver_map, + const MaybeObjectHandle& handler) { + DCHECK(handler.is_null() || IC::IsHandler(*handler)); + if (kind() == FeedbackSlotKind::kStoreDataPropertyInLiteral) { + SetFeedback(HeapObjectReference::Weak(*receiver_map)); + SetFeedbackExtra(*name); + } else { + if (name.is_null()) { + SetFeedback(HeapObjectReference::Weak(*receiver_map)); + SetFeedbackExtra(*handler); + } else { + Handle array = EnsureExtraArrayOfSize(2); + SetFeedback(*name); + array->Set(0, HeapObjectReference::Weak(*receiver_map)); + array->Set(1, *handler); + } + } +} + +void FeedbackNexus::ConfigurePolymorphic(Handle name, + MapHandles const& maps, + MaybeObjectHandles* handlers) { + DCHECK_EQ(handlers->size(), maps.size()); + int receiver_count = static_cast(maps.size()); + DCHECK_GT(receiver_count, 1); + Handle array; + if (name.is_null()) { + array = EnsureArrayOfSize(receiver_count * 2); + SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()), + SKIP_WRITE_BARRIER); + } else { + array = EnsureExtraArrayOfSize(receiver_count * 2); + SetFeedback(*name); + } + + for (int current = 0; current < receiver_count; ++current) { + Handle map = maps[current]; + array->Set(current * 2, HeapObjectReference::Weak(*map)); + DCHECK(IC::IsHandler(*handlers->at(current))); + array->Set(current * 2 + 1, *handlers->at(current)); + } +} + +int FeedbackNexus::ExtractMaps(MapHandles* maps) const { + DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) || + IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) || + IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) || + IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind())); + + Isolate* isolate = GetIsolate(); + MaybeObject feedback = GetFeedback(); + bool is_named_feedback = IsPropertyNameFeedback(feedback); + HeapObject heap_object; + if ((feedback->GetHeapObjectIfStrong(&heap_object) && + heap_object.IsWeakFixedArray()) || + is_named_feedback) { + int found = 0; + WeakFixedArray array; + if (is_named_feedback) { + array = + WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong()); + } else { + array = WeakFixedArray::cast(heap_object); + } + const int increment = 2; + HeapObject heap_object; + for (int i = 0; i < array.length(); i += increment) { + DCHECK(array.Get(i)->IsWeakOrCleared()); + if (array.Get(i)->GetHeapObjectIfWeak(&heap_object)) { + Map map = Map::cast(heap_object); + maps->push_back(handle(map, isolate)); + found++; + } + } + return found; + } else if (feedback->GetHeapObjectIfWeak(&heap_object)) { + Map map = Map::cast(heap_object); + maps->push_back(handle(map, isolate)); + return 1; + } else if (feedback->GetHeapObjectIfStrong(&heap_object) && + heap_object == + heap_object.GetReadOnlyRoots().premonomorphic_symbol()) { + if (GetFeedbackExtra()->GetHeapObjectIfWeak(&heap_object)) { + Map map = Map::cast(heap_object); + maps->push_back(handle(map, isolate)); + return 1; + } + } + + return 0; +} + +MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle map) const { + DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) || + IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) || + IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) || + IsKeyedHasICKind(kind())); + + MaybeObject feedback = GetFeedback(); + Isolate* isolate = GetIsolate(); + bool is_named_feedback = IsPropertyNameFeedback(feedback); + HeapObject heap_object; + if ((feedback->GetHeapObjectIfStrong(&heap_object) && + heap_object.IsWeakFixedArray()) || + is_named_feedback) { + WeakFixedArray array; + if (is_named_feedback) { + array = + WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong()); + } else { + array = WeakFixedArray::cast(heap_object); + } + const int increment = 2; + HeapObject heap_object; + for (int i = 0; i < array.length(); i += increment) { + DCHECK(array.Get(i)->IsWeakOrCleared()); + if (array.Get(i)->GetHeapObjectIfWeak(&heap_object)) { + Map array_map = Map::cast(heap_object); + if (array_map == *map && !array.Get(i + increment - 1)->IsCleared()) { + MaybeObject handler = array.Get(i + increment - 1); + DCHECK(IC::IsHandler(handler)); + return handle(handler, isolate); + } + } + } + } else if (feedback->GetHeapObjectIfWeak(&heap_object)) { + Map cell_map = Map::cast(heap_object); + if (cell_map == *map && !GetFeedbackExtra()->IsCleared()) { + MaybeObject handler = GetFeedbackExtra(); + DCHECK(IC::IsHandler(handler)); + return handle(handler, isolate); + } + } + + return MaybeObjectHandle(); +} + +bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list, + int length) const { + DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) || + IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) || + IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) || + IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind())); + + MaybeObject feedback = GetFeedback(); + Isolate* isolate = GetIsolate(); + int count = 0; + bool is_named_feedback = IsPropertyNameFeedback(feedback); + HeapObject heap_object; + if ((feedback->GetHeapObjectIfStrong(&heap_object) && + heap_object.IsWeakFixedArray()) || + is_named_feedback) { + WeakFixedArray array; + if (is_named_feedback) { + array = + WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong()); + } else { + array = WeakFixedArray::cast(heap_object); + } + const int increment = 2; + HeapObject heap_object; + for (int i = 0; i < array.length(); i += increment) { + // Be sure to skip handlers whose maps have been cleared. + DCHECK(array.Get(i)->IsWeakOrCleared()); + if (array.Get(i)->GetHeapObjectIfWeak(&heap_object) && + !array.Get(i + increment - 1)->IsCleared()) { + MaybeObject handler = array.Get(i + increment - 1); + DCHECK(IC::IsHandler(handler)); + code_list->push_back(handle(handler, isolate)); + count++; + } + } + } else if (feedback->GetHeapObjectIfWeak(&heap_object)) { + MaybeObject extra = GetFeedbackExtra(); + if (!extra->IsCleared()) { + DCHECK(IC::IsHandler(extra)); + code_list->push_back(handle(extra, isolate)); + count++; + } + } + return count == length; +} + +Name FeedbackNexus::GetName() const { + if (IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) || + IsKeyedHasICKind(kind())) { + MaybeObject feedback = GetFeedback(); + if (IsPropertyNameFeedback(feedback)) { + return Name::cast(feedback->GetHeapObjectAssumeStrong()); + } + } + return Name(); +} + +KeyedAccessLoadMode FeedbackNexus::GetKeyedAccessLoadMode() const { + DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedHasICKind(kind())); + MapHandles maps; + MaybeObjectHandles handlers; + + if (GetKeyType() == PROPERTY) return STANDARD_LOAD; + + ExtractMaps(&maps); + FindHandlers(&handlers, static_cast(maps.size())); + for (MaybeObjectHandle const& handler : handlers) { + KeyedAccessLoadMode mode = LoadHandler::GetKeyedAccessLoadMode(*handler); + if (mode != STANDARD_LOAD) return mode; + } + + return STANDARD_LOAD; +} + +namespace { + +bool BuiltinHasKeyedAccessStoreMode(int builtin_index) { + DCHECK(Builtins::IsBuiltinId(builtin_index)); + switch (builtin_index) { + case Builtins::kKeyedStoreIC_SloppyArguments_Standard: + case Builtins::kKeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW: + case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB: + case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionHandleCOW: + case Builtins::kStoreFastElementIC_Standard: + case Builtins::kStoreFastElementIC_GrowNoTransitionHandleCOW: + case Builtins::kStoreFastElementIC_NoTransitionIgnoreOOB: + case Builtins::kStoreFastElementIC_NoTransitionHandleCOW: + case Builtins::kStoreInArrayLiteralIC_Slow_Standard: + case Builtins::kStoreInArrayLiteralIC_Slow_GrowNoTransitionHandleCOW: + case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB: + case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionHandleCOW: + case Builtins::kKeyedStoreIC_Slow_Standard: + case Builtins::kKeyedStoreIC_Slow_GrowNoTransitionHandleCOW: + case Builtins::kKeyedStoreIC_Slow_NoTransitionIgnoreOOB: + case Builtins::kKeyedStoreIC_Slow_NoTransitionHandleCOW: + case Builtins::kElementsTransitionAndStore_Standard: + case Builtins::kElementsTransitionAndStore_GrowNoTransitionHandleCOW: + case Builtins::kElementsTransitionAndStore_NoTransitionIgnoreOOB: + case Builtins::kElementsTransitionAndStore_NoTransitionHandleCOW: + return true; + default: + return false; + } + UNREACHABLE(); +} + +KeyedAccessStoreMode KeyedAccessStoreModeForBuiltin(int builtin_index) { + DCHECK(BuiltinHasKeyedAccessStoreMode(builtin_index)); + switch (builtin_index) { + case Builtins::kKeyedStoreIC_SloppyArguments_Standard: + case Builtins::kStoreInArrayLiteralIC_Slow_Standard: + case Builtins::kKeyedStoreIC_Slow_Standard: + case Builtins::kStoreFastElementIC_Standard: + case Builtins::kElementsTransitionAndStore_Standard: + return STANDARD_STORE; + case Builtins::kKeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW: + case Builtins::kStoreInArrayLiteralIC_Slow_GrowNoTransitionHandleCOW: + case Builtins::kKeyedStoreIC_Slow_GrowNoTransitionHandleCOW: + case Builtins::kStoreFastElementIC_GrowNoTransitionHandleCOW: + case Builtins::kElementsTransitionAndStore_GrowNoTransitionHandleCOW: + return STORE_AND_GROW_HANDLE_COW; + case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB: + case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB: + case Builtins::kKeyedStoreIC_Slow_NoTransitionIgnoreOOB: + case Builtins::kStoreFastElementIC_NoTransitionIgnoreOOB: + case Builtins::kElementsTransitionAndStore_NoTransitionIgnoreOOB: + return STORE_IGNORE_OUT_OF_BOUNDS; + case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionHandleCOW: + case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionHandleCOW: + case Builtins::kKeyedStoreIC_Slow_NoTransitionHandleCOW: + case Builtins::kStoreFastElementIC_NoTransitionHandleCOW: + case Builtins::kElementsTransitionAndStore_NoTransitionHandleCOW: + return STORE_HANDLE_COW; + default: + UNREACHABLE(); + } +} + +} // namespace + +KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const { + DCHECK(IsKeyedStoreICKind(kind()) || IsStoreInArrayLiteralICKind(kind())); + KeyedAccessStoreMode mode = STANDARD_STORE; + MapHandles maps; + MaybeObjectHandles handlers; + + if (GetKeyType() == PROPERTY) return mode; + + ExtractMaps(&maps); + FindHandlers(&handlers, static_cast(maps.size())); + for (const MaybeObjectHandle& maybe_code_handler : handlers) { + // The first handler that isn't the slow handler will have the bits we need. + Handle handler; + if (maybe_code_handler.object()->IsStoreHandler()) { + Handle data_handler = + Handle::cast(maybe_code_handler.object()); + handler = handle(Code::cast(data_handler->smi_handler()), + vector().GetIsolate()); + } else if (maybe_code_handler.object()->IsSmi()) { + // Skip proxy handlers. + DCHECK_EQ(*(maybe_code_handler.object()), + *StoreHandler::StoreProxy(GetIsolate())); + continue; + } else { + // Element store without prototype chain check. + handler = Handle::cast(maybe_code_handler.object()); + } + + if (handler->is_builtin()) { + const int builtin_index = handler->builtin_index(); + if (!BuiltinHasKeyedAccessStoreMode(builtin_index)) continue; + + mode = KeyedAccessStoreModeForBuiltin(builtin_index); + break; + } + } + + return mode; +} + +IcCheckType FeedbackNexus::GetKeyType() const { + DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) || + IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind())); + MaybeObject feedback = GetFeedback(); + if (feedback == MaybeObject::FromObject( + *FeedbackVector::MegamorphicSentinel(GetIsolate()))) { + return static_cast( + Smi::ToInt(GetFeedbackExtra()->cast())); + } + return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT; +} + +BinaryOperationHint FeedbackNexus::GetBinaryOperationFeedback() const { + DCHECK_EQ(kind(), FeedbackSlotKind::kBinaryOp); + int feedback = GetFeedback().ToSmi().value(); + return BinaryOperationHintFromFeedback(feedback); +} + +CompareOperationHint FeedbackNexus::GetCompareOperationFeedback() const { + DCHECK_EQ(kind(), FeedbackSlotKind::kCompareOp); + int feedback = GetFeedback().ToSmi().value(); + return CompareOperationHintFromFeedback(feedback); +} + +ForInHint FeedbackNexus::GetForInFeedback() const { + DCHECK_EQ(kind(), FeedbackSlotKind::kForIn); + int feedback = GetFeedback().ToSmi().value(); + return ForInHintFromFeedback(feedback); +} + +MaybeHandle FeedbackNexus::GetConstructorFeedback() const { + DCHECK_EQ(kind(), FeedbackSlotKind::kInstanceOf); + Isolate* isolate = GetIsolate(); + MaybeObject feedback = GetFeedback(); + HeapObject heap_object; + if (feedback->GetHeapObjectIfWeak(&heap_object)) { + return handle(JSObject::cast(heap_object), isolate); + } + return MaybeHandle(); +} + +namespace { + +bool InList(Handle types, Handle type) { + for (int i = 0; i < types->Length(); i++) { + Object obj = types->Get(i); + if (String::cast(obj).Equals(*type)) { + return true; + } + } + return false; +} +} // anonymous namespace + +void FeedbackNexus::Collect(Handle type, int position) { + DCHECK(IsTypeProfileKind(kind())); + DCHECK_GE(position, 0); + Isolate* isolate = GetIsolate(); + + MaybeObject const feedback = GetFeedback(); + + // Map source position to collection of types + Handle types; + + if (feedback == MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))) { + types = SimpleNumberDictionary::New(isolate, 1); + } else { + types = handle( + SimpleNumberDictionary::cast(feedback->GetHeapObjectAssumeStrong()), + isolate); + } + + Handle position_specific_types; + + int entry = types->FindEntry(isolate, position); + if (entry == SimpleNumberDictionary::kNotFound) { + position_specific_types = ArrayList::New(isolate, 1); + types = SimpleNumberDictionary::Set( + isolate, types, position, + ArrayList::Add(isolate, position_specific_types, type)); + } else { + DCHECK(types->ValueAt(entry).IsArrayList()); + position_specific_types = + handle(ArrayList::cast(types->ValueAt(entry)), isolate); + if (!InList(position_specific_types, type)) { // Add type + types = SimpleNumberDictionary::Set( + isolate, types, position, + ArrayList::Add(isolate, position_specific_types, type)); + } + } + SetFeedback(*types); +} + +std::vector FeedbackNexus::GetSourcePositions() const { + DCHECK(IsTypeProfileKind(kind())); + std::vector source_positions; + Isolate* isolate = GetIsolate(); + + MaybeObject const feedback = GetFeedback(); + + if (feedback == MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))) { + return source_positions; + } + + Handle types( + SimpleNumberDictionary::cast(feedback->GetHeapObjectAssumeStrong()), + isolate); + + for (int index = SimpleNumberDictionary::kElementsStartIndex; + index < types->length(); index += SimpleNumberDictionary::kEntrySize) { + int key_index = index + SimpleNumberDictionary::kEntryKeyIndex; + Object key = types->get(key_index); + if (key.IsSmi()) { + int position = Smi::cast(key).value(); + source_positions.push_back(position); + } + } + return source_positions; +} + +std::vector> FeedbackNexus::GetTypesForSourcePositions( + uint32_t position) const { + DCHECK(IsTypeProfileKind(kind())); + Isolate* isolate = GetIsolate(); + + MaybeObject const feedback = GetFeedback(); + std::vector> types_for_position; + if (feedback == MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))) { + return types_for_position; + } + + Handle types( + SimpleNumberDictionary::cast(feedback->GetHeapObjectAssumeStrong()), + isolate); + + int entry = types->FindEntry(isolate, position); + if (entry == SimpleNumberDictionary::kNotFound) { + return types_for_position; + } + DCHECK(types->ValueAt(entry).IsArrayList()); + Handle position_specific_types = + Handle(ArrayList::cast(types->ValueAt(entry)), isolate); + for (int i = 0; i < position_specific_types->Length(); i++) { + Object t = position_specific_types->Get(i); + types_for_position.push_back(Handle(String::cast(t), isolate)); + } + + return types_for_position; +} + +namespace { + +Handle ConvertToJSObject(Isolate* isolate, + Handle feedback) { + Handle type_profile = + isolate->factory()->NewJSObject(isolate->object_function()); + + for (int index = SimpleNumberDictionary::kElementsStartIndex; + index < feedback->length(); + index += SimpleNumberDictionary::kEntrySize) { + int key_index = index + SimpleNumberDictionary::kEntryKeyIndex; + Object key = feedback->get(key_index); + if (key.IsSmi()) { + int value_index = index + SimpleNumberDictionary::kEntryValueIndex; + + Handle position_specific_types( + ArrayList::cast(feedback->get(value_index)), isolate); + + int position = Smi::ToInt(key); + JSObject::AddDataElement( + type_profile, position, + isolate->factory()->NewJSArrayWithElements( + ArrayList::Elements(isolate, position_specific_types)), + PropertyAttributes::NONE); + } + } + return type_profile; +} +} // namespace + +JSObject FeedbackNexus::GetTypeProfile() const { + DCHECK(IsTypeProfileKind(kind())); + Isolate* isolate = GetIsolate(); + + MaybeObject const feedback = GetFeedback(); + + if (feedback == MaybeObject::FromObject( + *FeedbackVector::UninitializedSentinel(isolate))) { + return *isolate->factory()->NewJSObject(isolate->object_function()); + } + + return *ConvertToJSObject(isolate, + handle(SimpleNumberDictionary::cast( + feedback->GetHeapObjectAssumeStrong()), + isolate)); +} + +void FeedbackNexus::ResetTypeProfile() { + DCHECK(IsTypeProfileKind(kind())); + SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate())); +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h new file mode 100644 index 0000000000..89e0b9e6aa --- /dev/null +++ b/deps/v8/src/objects/feedback-vector.h @@ -0,0 +1,772 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_FEEDBACK_VECTOR_H_ +#define V8_OBJECTS_FEEDBACK_VECTOR_H_ + +#include + +#include "src/base/logging.h" +#include "src/base/macros.h" +#include "src/common/globals.h" +#include "src/objects/elements-kind.h" +#include "src/objects/map.h" +#include "src/objects/name.h" +#include "src/objects/type-hints.h" +#include "src/zone/zone-containers.h" + +// Has to be the last include (doesn't have include guards): +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +enum class FeedbackSlotKind { + // This kind means that the slot points to the middle of other slot + // which occupies more than one feedback vector element. + // There must be no such slots in the system. + kInvalid, + + // Sloppy kinds come first, for easy language mode testing. + kStoreGlobalSloppy, + kStoreNamedSloppy, + kStoreKeyedSloppy, + kLastSloppyKind = kStoreKeyedSloppy, + + // Strict and language mode unaware kinds. + kCall, + kLoadProperty, + kLoadGlobalNotInsideTypeof, + kLoadGlobalInsideTypeof, + kLoadKeyed, + kHasKeyed, + kStoreGlobalStrict, + kStoreNamedStrict, + kStoreOwnNamed, + kStoreKeyedStrict, + kStoreInArrayLiteral, + kBinaryOp, + kCompareOp, + kStoreDataPropertyInLiteral, + kTypeProfile, + kLiteral, + kForIn, + kInstanceOf, + kCloneObject, + + kKindsNumber // Last value indicating number of kinds. +}; + +inline bool IsCallICKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kCall; +} + +inline bool IsLoadICKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kLoadProperty; +} + +inline bool IsLoadGlobalICKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kLoadGlobalNotInsideTypeof || + kind == FeedbackSlotKind::kLoadGlobalInsideTypeof; +} + +inline bool IsKeyedLoadICKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kLoadKeyed; +} + +inline bool IsKeyedHasICKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kHasKeyed; +} + +inline bool IsStoreGlobalICKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kStoreGlobalSloppy || + kind == FeedbackSlotKind::kStoreGlobalStrict; +} + +inline bool IsStoreICKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kStoreNamedSloppy || + kind == FeedbackSlotKind::kStoreNamedStrict; +} + +inline bool IsStoreOwnICKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kStoreOwnNamed; +} + +inline bool IsStoreDataPropertyInLiteralKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kStoreDataPropertyInLiteral; +} + +inline bool IsKeyedStoreICKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kStoreKeyedSloppy || + kind == FeedbackSlotKind::kStoreKeyedStrict; +} + +inline bool IsStoreInArrayLiteralICKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kStoreInArrayLiteral; +} + +inline bool IsGlobalICKind(FeedbackSlotKind kind) { + return IsLoadGlobalICKind(kind) || IsStoreGlobalICKind(kind); +} + +inline bool IsTypeProfileKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kTypeProfile; +} + +inline bool IsCloneObjectKind(FeedbackSlotKind kind) { + return kind == FeedbackSlotKind::kCloneObject; +} + +inline TypeofMode GetTypeofModeFromSlotKind(FeedbackSlotKind kind) { + DCHECK(IsLoadGlobalICKind(kind)); + return (kind == FeedbackSlotKind::kLoadGlobalInsideTypeof) + ? INSIDE_TYPEOF + : NOT_INSIDE_TYPEOF; +} + +inline LanguageMode GetLanguageModeFromSlotKind(FeedbackSlotKind kind) { + DCHECK(IsStoreICKind(kind) || IsStoreOwnICKind(kind) || + IsStoreGlobalICKind(kind) || IsKeyedStoreICKind(kind)); + STATIC_ASSERT(FeedbackSlotKind::kStoreGlobalSloppy <= + FeedbackSlotKind::kLastSloppyKind); + STATIC_ASSERT(FeedbackSlotKind::kStoreKeyedSloppy <= + FeedbackSlotKind::kLastSloppyKind); + STATIC_ASSERT(FeedbackSlotKind::kStoreNamedSloppy <= + FeedbackSlotKind::kLastSloppyKind); + return (kind <= FeedbackSlotKind::kLastSloppyKind) ? LanguageMode::kSloppy + : LanguageMode::kStrict; +} + +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, + FeedbackSlotKind kind); + +using MaybeObjectHandles = std::vector; + +class FeedbackMetadata; + +// ClosureFeedbackCellArray is a FixedArray that contains feedback cells used +// when creating closures from a function. Along with the feedback +// cells, the first slot (slot 0) is used to hold a budget to measure the +// hotness of the function. This is created once the function is compiled and is +// either held by the feedback vector (if allocated) or by the FeedbackCell of +// the closure. +class ClosureFeedbackCellArray : public FixedArray { + public: + NEVER_READ_ONLY_SPACE + + DECL_CAST(ClosureFeedbackCellArray) + + V8_EXPORT_PRIVATE static Handle New( + Isolate* isolate, Handle shared); + inline Handle GetFeedbackCell(int index); + + DECL_VERIFIER(ClosureFeedbackCellArray) + DECL_PRINTER(ClosureFeedbackCellArray) + + private: + OBJECT_CONSTRUCTORS(ClosureFeedbackCellArray, FixedArray); +}; + +// A FeedbackVector has a fixed header with: +// - shared function info (which includes feedback metadata) +// - invocation count +// - runtime profiler ticks +// - optimized code cell (weak cell or Smi marker) +// followed by an array of feedback slots, of length determined by the feedback +// metadata. +class FeedbackVector : public HeapObject { + public: + NEVER_READ_ONLY_SPACE + + DECL_CAST(FeedbackVector) + + inline bool is_empty() const; + + inline FeedbackMetadata metadata() const; + + // [shared_function_info]: The shared function info for the function with this + // feedback vector. + DECL_ACCESSORS(shared_function_info, SharedFunctionInfo) + + // [optimized_code_weak_or_smi]: weak reference to optimized code or a Smi + // marker defining optimization behaviour. + DECL_ACCESSORS(optimized_code_weak_or_smi, MaybeObject) + + // [feedback_cell_array]: The FixedArray to hold the feedback cells for any + // closures created by this function. + DECL_ACCESSORS(closure_feedback_cell_array, ClosureFeedbackCellArray) + + // [length]: The length of the feedback vector (not including the header, i.e. + // the number of feedback slots). + DECL_INT32_ACCESSORS(length) + + // [invocation_count]: The number of times this function has been invoked. + DECL_INT32_ACCESSORS(invocation_count) + + // [profiler_ticks]: The number of times this function has been seen by the + // runtime profiler. + DECL_INT32_ACCESSORS(profiler_ticks) + + // Initialize the padding if necessary. + inline void clear_padding(); + + inline void clear_invocation_count(); + + inline Code optimized_code() const; + inline OptimizationMarker optimization_marker() const; + inline bool has_optimized_code() const; + inline bool has_optimization_marker() const; + void ClearOptimizedCode(); + void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared, + const char* reason); + static void SetOptimizedCode(Handle vector, + Handle code); + void SetOptimizationMarker(OptimizationMarker marker); + + // Clears the optimization marker in the feedback vector. + void ClearOptimizationMarker(); + + // Conversion from a slot to an integer index to the underlying array. + static int GetIndex(FeedbackSlot slot) { return slot.ToInt(); } + + // Conversion from an integer index to the underlying array to a slot. + static inline FeedbackSlot ToSlot(int index); + inline MaybeObject Get(FeedbackSlot slot) const; + inline MaybeObject get(int index) const; + inline void Set(FeedbackSlot slot, MaybeObject value, + WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + inline void set(int index, MaybeObject value, + WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + inline void Set(FeedbackSlot slot, Object value, + WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + inline void set(int index, Object value, + WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + + // Returns the feedback cell at |index| that is used to create the + // closure. + inline Handle GetClosureFeedbackCell(int index) const; + + // Gives access to raw memory which stores the array's data. + inline MaybeObjectSlot slots_start(); + + // Returns slot kind for given slot. + V8_EXPORT_PRIVATE FeedbackSlotKind GetKind(FeedbackSlot slot) const; + + FeedbackSlot GetTypeProfileSlot() const; + + V8_EXPORT_PRIVATE static Handle New( + Isolate* isolate, Handle shared, + Handle closure_feedback_cell_array); + +#define DEFINE_SLOT_KIND_PREDICATE(Name) \ + bool Name(FeedbackSlot slot) const { return Name##Kind(GetKind(slot)); } + + DEFINE_SLOT_KIND_PREDICATE(IsCallIC) + DEFINE_SLOT_KIND_PREDICATE(IsGlobalIC) + DEFINE_SLOT_KIND_PREDICATE(IsLoadIC) + DEFINE_SLOT_KIND_PREDICATE(IsLoadGlobalIC) + DEFINE_SLOT_KIND_PREDICATE(IsKeyedLoadIC) + DEFINE_SLOT_KIND_PREDICATE(IsStoreIC) + DEFINE_SLOT_KIND_PREDICATE(IsStoreOwnIC) + DEFINE_SLOT_KIND_PREDICATE(IsStoreGlobalIC) + DEFINE_SLOT_KIND_PREDICATE(IsKeyedStoreIC) + DEFINE_SLOT_KIND_PREDICATE(IsTypeProfile) +#undef DEFINE_SLOT_KIND_PREDICATE + + // Returns typeof mode encoded into kind of given slot. + inline TypeofMode GetTypeofMode(FeedbackSlot slot) const { + return GetTypeofModeFromSlotKind(GetKind(slot)); + } + + // Returns language mode encoded into kind of given slot. + inline LanguageMode GetLanguageMode(FeedbackSlot slot) const { + return GetLanguageModeFromSlotKind(GetKind(slot)); + } + + V8_EXPORT_PRIVATE static void AssertNoLegacyTypes(MaybeObject object); + + DECL_PRINTER(FeedbackVector) + DECL_VERIFIER(FeedbackVector) + + void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot); // NOLINT + + // Clears the vector slots. Return true if feedback has changed. + bool ClearSlots(Isolate* isolate); + + // The object that indicates an uninitialized cache. + static inline Handle UninitializedSentinel(Isolate* isolate); + + // The object that indicates a generic state. + static inline Handle GenericSentinel(Isolate* isolate); + + // The object that indicates a megamorphic state. + static inline Handle MegamorphicSentinel(Isolate* isolate); + + // The object that indicates a premonomorphic state. + static inline Handle PremonomorphicSentinel(Isolate* isolate); + + // A raw version of the uninitialized sentinel that's safe to read during + // garbage collection (e.g., for patching the cache). + static inline Symbol RawUninitializedSentinel(Isolate* isolate); + + // Layout description. + DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, + TORQUE_GENERATED_FEEDBACK_VECTOR_FIELDS) + + static const int kHeaderSize = kSize; + + static_assert(kSize % kObjectAlignment == 0, + "Header must be padded for alignment"); + static const int kFeedbackSlotsOffset = kHeaderSize; + + class BodyDescriptor; + + // Garbage collection support. + static constexpr int SizeFor(int length) { + return kFeedbackSlotsOffset + length * kTaggedSize; + } + + private: + static void AddToVectorsForProfilingTools(Isolate* isolate, + Handle vector); + + OBJECT_CONSTRUCTORS(FeedbackVector, HeapObject); +}; + +class V8_EXPORT_PRIVATE FeedbackVectorSpec { + public: + explicit FeedbackVectorSpec(Zone* zone) + : slot_kinds_(zone), num_closure_feedback_cells_(0) { + slot_kinds_.reserve(16); + } + + int slots() const { return static_cast(slot_kinds_.size()); } + int closure_feedback_cells() const { return num_closure_feedback_cells_; } + + int AddFeedbackCellForCreateClosure() { + return num_closure_feedback_cells_++; + } + + FeedbackSlotKind GetKind(FeedbackSlot slot) const { + return static_cast(slot_kinds_.at(slot.ToInt())); + } + + bool HasTypeProfileSlot() const; + + // If used, the TypeProfileSlot is always added as the first slot and its + // index is constant. If other slots are added before the TypeProfileSlot, + // this number changes. + static const int kTypeProfileSlotIndex = 0; + + FeedbackSlot AddCallICSlot() { return AddSlot(FeedbackSlotKind::kCall); } + + FeedbackSlot AddLoadICSlot() { + return AddSlot(FeedbackSlotKind::kLoadProperty); + } + + FeedbackSlot AddLoadGlobalICSlot(TypeofMode typeof_mode) { + return AddSlot(typeof_mode == INSIDE_TYPEOF + ? FeedbackSlotKind::kLoadGlobalInsideTypeof + : FeedbackSlotKind::kLoadGlobalNotInsideTypeof); + } + + FeedbackSlot AddKeyedLoadICSlot() { + return AddSlot(FeedbackSlotKind::kLoadKeyed); + } + + FeedbackSlot AddKeyedHasICSlot() { + return AddSlot(FeedbackSlotKind::kHasKeyed); + } + + FeedbackSlotKind GetStoreICSlot(LanguageMode language_mode) { + STATIC_ASSERT(LanguageModeSize == 2); + return is_strict(language_mode) ? FeedbackSlotKind::kStoreNamedStrict + : FeedbackSlotKind::kStoreNamedSloppy; + } + + FeedbackSlot AddStoreICSlot(LanguageMode language_mode) { + return AddSlot(GetStoreICSlot(language_mode)); + } + + FeedbackSlot AddStoreOwnICSlot() { + return AddSlot(FeedbackSlotKind::kStoreOwnNamed); + } + + FeedbackSlot AddStoreGlobalICSlot(LanguageMode language_mode) { + STATIC_ASSERT(LanguageModeSize == 2); + return AddSlot(is_strict(language_mode) + ? FeedbackSlotKind::kStoreGlobalStrict + : FeedbackSlotKind::kStoreGlobalSloppy); + } + + FeedbackSlotKind GetKeyedStoreICSlotKind(LanguageMode language_mode) { + STATIC_ASSERT(LanguageModeSize == 2); + return is_strict(language_mode) ? FeedbackSlotKind::kStoreKeyedStrict + : FeedbackSlotKind::kStoreKeyedSloppy; + } + + FeedbackSlot AddKeyedStoreICSlot(LanguageMode language_mode) { + return AddSlot(GetKeyedStoreICSlotKind(language_mode)); + } + + FeedbackSlot AddStoreInArrayLiteralICSlot() { + return AddSlot(FeedbackSlotKind::kStoreInArrayLiteral); + } + + FeedbackSlot AddBinaryOpICSlot() { + return AddSlot(FeedbackSlotKind::kBinaryOp); + } + + FeedbackSlot AddCompareICSlot() { + return AddSlot(FeedbackSlotKind::kCompareOp); + } + + FeedbackSlot AddForInSlot() { return AddSlot(FeedbackSlotKind::kForIn); } + + FeedbackSlot AddInstanceOfSlot() { + return AddSlot(FeedbackSlotKind::kInstanceOf); + } + + FeedbackSlot AddLiteralSlot() { return AddSlot(FeedbackSlotKind::kLiteral); } + + FeedbackSlot AddStoreDataPropertyInLiteralICSlot() { + return AddSlot(FeedbackSlotKind::kStoreDataPropertyInLiteral); + } + + FeedbackSlot AddTypeProfileSlot(); + + FeedbackSlot AddCloneObjectSlot() { + return AddSlot(FeedbackSlotKind::kCloneObject); + } + +#ifdef OBJECT_PRINT + // For gdb debugging. + void Print(); +#endif // OBJECT_PRINT + + DECL_PRINTER(FeedbackVectorSpec) + + private: + FeedbackSlot AddSlot(FeedbackSlotKind kind); + + void append(FeedbackSlotKind kind) { + slot_kinds_.push_back(static_cast(kind)); + } + + ZoneVector slot_kinds_; + unsigned int num_closure_feedback_cells_; + + friend class SharedFeedbackSlot; +}; + +// Helper class that creates a feedback slot on-demand. +class SharedFeedbackSlot { + public: + // FeedbackSlot default constructor constructs an invalid slot. + SharedFeedbackSlot(FeedbackVectorSpec* spec, FeedbackSlotKind kind) + : kind_(kind), spec_(spec) {} + + FeedbackSlot Get() { + if (slot_.IsInvalid()) slot_ = spec_->AddSlot(kind_); + return slot_; + } + + private: + FeedbackSlotKind kind_; + FeedbackSlot slot_; + FeedbackVectorSpec* spec_; +}; + +// FeedbackMetadata is an array-like object with a slot count (indicating how +// many slots are stored). We save space by packing several slots into an array +// of int32 data. The length is never stored - it is always calculated from +// slot_count. All instances are created through the static New function, and +// the number of slots is static once an instance is created. +class FeedbackMetadata : public HeapObject { + public: + DECL_CAST(FeedbackMetadata) + + // The number of slots that this metadata contains. Stored as an int32. + DECL_INT32_ACCESSORS(slot_count) + + // The number of feedback cells required for create closures. Stored as an + // int32. + // TODO(mythria): Consider using 16 bits for this and slot_count so that we + // can save 4 bytes. + DECL_INT32_ACCESSORS(closure_feedback_cell_count) + + // Get slot_count using an acquire load. + inline int32_t synchronized_slot_count() const; + + // Returns number of feedback vector elements used by given slot kind. + static inline int GetSlotSize(FeedbackSlotKind kind); + + bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const; + + inline bool is_empty() const; + + // Returns slot kind for given slot. + V8_EXPORT_PRIVATE FeedbackSlotKind GetKind(FeedbackSlot slot) const; + + // If {spec} is null, then it is considered empty. + V8_EXPORT_PRIVATE static Handle New( + Isolate* isolate, const FeedbackVectorSpec* spec = nullptr); + + DECL_PRINTER(FeedbackMetadata) + DECL_VERIFIER(FeedbackMetadata) + + static const char* Kind2String(FeedbackSlotKind kind); + bool HasTypeProfileSlot() const; + + // Garbage collection support. + // This includes any necessary padding at the end of the object for pointer + // size alignment. + static int SizeFor(int slot_count) { + return OBJECT_POINTER_ALIGN(kHeaderSize + length(slot_count) * kInt32Size); + } + + static const int kSlotCountOffset = HeapObject::kHeaderSize; + static const int kFeedbackCellCountOffset = kSlotCountOffset + kInt32Size; + static const int kHeaderSize = kFeedbackCellCountOffset + kInt32Size; + + class BodyDescriptor; + + private: + friend class AccessorAssembler; + + // Raw accessors to the encoded slot data. + inline int32_t get(int index) const; + inline void set(int index, int32_t value); + + // The number of int32 data fields needed to store {slot_count} slots. + // Does not include any extra padding for pointer size alignment. + static int length(int slot_count) { + return VectorICComputer::word_count(slot_count); + } + inline int length() const; + + static const int kFeedbackSlotKindBits = 5; + STATIC_ASSERT(static_cast(FeedbackSlotKind::kKindsNumber) < + (1 << kFeedbackSlotKindBits)); + + void SetKind(FeedbackSlot slot, FeedbackSlotKind kind); + + using VectorICComputer = + BitSetComputer; + + OBJECT_CONSTRUCTORS(FeedbackMetadata, HeapObject); +}; + +// Verify that an empty hash field looks like a tagged object, but can't +// possibly be confused with a pointer. +STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag); +STATIC_ASSERT(Name::kEmptyHashField == 0x3); +// Verify that a set hash field will not look like a tagged object. +STATIC_ASSERT(Name::kHashNotComputedMask == kHeapObjectTag); + +class FeedbackMetadataIterator { + public: + explicit FeedbackMetadataIterator(Handle metadata) + : metadata_handle_(metadata), + next_slot_(FeedbackSlot(0)), + slot_kind_(FeedbackSlotKind::kInvalid) {} + + explicit FeedbackMetadataIterator(FeedbackMetadata metadata) + : metadata_(metadata), + next_slot_(FeedbackSlot(0)), + slot_kind_(FeedbackSlotKind::kInvalid) {} + + inline bool HasNext() const; + + inline FeedbackSlot Next(); + + // Returns slot kind of the last slot returned by Next(). + FeedbackSlotKind kind() const { + DCHECK_NE(FeedbackSlotKind::kInvalid, slot_kind_); + DCHECK_NE(FeedbackSlotKind::kKindsNumber, slot_kind_); + return slot_kind_; + } + + // Returns entry size of the last slot returned by Next(). + inline int entry_size() const; + + private: + FeedbackMetadata metadata() const { + return !metadata_handle_.is_null() ? *metadata_handle_ : metadata_; + } + + // The reason for having a handle and a raw pointer to the meta data is + // to have a single iterator implementation for both "handlified" and raw + // pointer use cases. + Handle metadata_handle_; + FeedbackMetadata metadata_; + FeedbackSlot cur_slot_; + FeedbackSlot next_slot_; + FeedbackSlotKind slot_kind_; +}; + +// A FeedbackNexus is the combination of a FeedbackVector and a slot. +class V8_EXPORT_PRIVATE FeedbackNexus final { + public: + FeedbackNexus(Handle vector, FeedbackSlot slot) + : vector_handle_(vector), slot_(slot) { + kind_ = + (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector->GetKind(slot); + } + FeedbackNexus(FeedbackVector vector, FeedbackSlot slot) + : vector_(vector), slot_(slot) { + kind_ = + (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector.GetKind(slot); + } + + Handle vector_handle() const { + DCHECK(vector_.is_null()); + return vector_handle_; + } + FeedbackVector vector() const { + return vector_handle_.is_null() ? vector_ : *vector_handle_; + } + FeedbackSlot slot() const { return slot_; } + FeedbackSlotKind kind() const { return kind_; } + + inline LanguageMode GetLanguageMode() const { + return vector().GetLanguageMode(slot()); + } + + InlineCacheState ic_state() const; + bool IsUninitialized() const { return ic_state() == UNINITIALIZED; } + bool IsMegamorphic() const { return ic_state() == MEGAMORPHIC; } + bool IsGeneric() const { return ic_state() == GENERIC; } + + void Print(std::ostream& os); // NOLINT + + // For map-based ICs (load, keyed-load, store, keyed-store). + Map GetFirstMap() const; + + int ExtractMaps(MapHandles* maps) const; + MaybeObjectHandle FindHandlerForMap(Handle map) const; + bool FindHandlers(MaybeObjectHandles* code_list, int length = -1) const; + + bool IsCleared() const { + InlineCacheState state = ic_state(); + return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC; + } + + // Clear() returns true if the state of the underlying vector was changed. + bool Clear(); + void ConfigureUninitialized(); + void ConfigurePremonomorphic(Handle receiver_map); + // ConfigureMegamorphic() returns true if the state of the underlying vector + // was changed. Extra feedback is cleared if the 0 parameter version is used. + bool ConfigureMegamorphic(); + bool ConfigureMegamorphic(IcCheckType property_type); + + inline MaybeObject GetFeedback() const; + inline MaybeObject GetFeedbackExtra() const; + + inline Isolate* GetIsolate() const; + + void ConfigureMonomorphic(Handle name, Handle receiver_map, + const MaybeObjectHandle& handler); + + void ConfigurePolymorphic(Handle name, MapHandles const& maps, + MaybeObjectHandles* handlers); + + BinaryOperationHint GetBinaryOperationFeedback() const; + CompareOperationHint GetCompareOperationFeedback() const; + ForInHint GetForInFeedback() const; + + // For KeyedLoad ICs. + KeyedAccessLoadMode GetKeyedAccessLoadMode() const; + + // For KeyedStore ICs. + KeyedAccessStoreMode GetKeyedAccessStoreMode() const; + + // For KeyedLoad and KeyedStore ICs. + IcCheckType GetKeyType() const; + Name GetName() const; + + // For Call ICs. + int GetCallCount(); + void SetSpeculationMode(SpeculationMode mode); + SpeculationMode GetSpeculationMode(); + + // Compute the call frequency based on the call count and the invocation + // count (taken from the type feedback vector). + float ComputeCallFrequency(); + + using SpeculationModeField = BitField; + using CallCountField = BitField; + + // For InstanceOf ICs. + MaybeHandle GetConstructorFeedback() const; + + // For Global Load and Store ICs. + void ConfigurePropertyCellMode(Handle cell); + // Returns false if given combination of indices is not allowed. + bool ConfigureLexicalVarMode(int script_context_index, int context_slot_index, + bool immutable); + void ConfigureHandlerMode(const MaybeObjectHandle& handler); + + // For CloneObject ICs + static constexpr int kCloneObjectPolymorphicEntrySize = 2; + void ConfigureCloneObject(Handle source_map, Handle result_map); + +// Bit positions in a smi that encodes lexical environment variable access. +#define LEXICAL_MODE_BIT_FIELDS(V, _) \ + V(ContextIndexBits, unsigned, 12, _) \ + V(SlotIndexBits, unsigned, 18, _) \ + V(ImmutabilityBit, bool, 1, _) + + DEFINE_BIT_FIELDS(LEXICAL_MODE_BIT_FIELDS) +#undef LEXICAL_MODE_BIT_FIELDS + + // Make sure we don't overflow the smi. + STATIC_ASSERT(LEXICAL_MODE_BIT_FIELDS_Ranges::kBitsCount <= kSmiValueSize); + + // For TypeProfile feedback vector slots. + // ResetTypeProfile will always reset type profile information. + void ResetTypeProfile(); + + // Add a type to the list of types for source position . + void Collect(Handle type, int position); + JSObject GetTypeProfile() const; + + std::vector GetSourcePositions() const; + std::vector> GetTypesForSourcePositions(uint32_t pos) const; + + inline void SetFeedback(Object feedback, + WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + inline void SetFeedback(MaybeObject feedback, + WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + inline void SetFeedbackExtra(Object feedback_extra, + WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + inline void SetFeedbackExtra(MaybeObject feedback_extra, + WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + + Handle EnsureArrayOfSize(int length); + Handle EnsureExtraArrayOfSize(int length); + + private: + // The reason for having a vector handle and a raw pointer is that we can and + // should use handles during IC miss, but not during GC when we clear ICs. If + // you have a handle to the vector that is better because more operations can + // be done, like allocation. + Handle vector_handle_; + FeedbackVector vector_; + FeedbackSlot slot_; + FeedbackSlotKind kind_; +}; + +inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback); +inline CompareOperationHint CompareOperationHintFromFeedback(int type_feedback); +inline ForInHint ForInHintFromFeedback(int type_feedback); + +} // namespace internal +} // namespace v8 + +#include "src/objects/object-macros-undef.h" + +#endif // V8_OBJECTS_FEEDBACK_VECTOR_H_ diff --git a/deps/v8/src/objects/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h new file mode 100644 index 0000000000..be60fb54a2 --- /dev/null +++ b/deps/v8/src/objects/field-index-inl.h @@ -0,0 +1,73 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_FIELD_INDEX_INL_H_ +#define V8_OBJECTS_FIELD_INDEX_INL_H_ + +#include "src/objects/descriptor-array-inl.h" +#include "src/objects/field-index.h" +#include "src/objects/objects-inl.h" + +namespace v8 { +namespace internal { + +FieldIndex FieldIndex::ForInObjectOffset(int offset, Encoding encoding) { + DCHECK_IMPLIES(encoding == kWord32, IsAligned(offset, kInt32Size)); + DCHECK_IMPLIES(encoding == kTagged, IsAligned(offset, kTaggedSize)); + DCHECK_IMPLIES(encoding == kDouble, IsAligned(offset, kDoubleSize)); + return FieldIndex(true, offset, encoding, 0, 0); +} + +FieldIndex FieldIndex::ForPropertyIndex(const Map map, int property_index, + Representation representation) { + DCHECK(map.instance_type() >= FIRST_NONSTRING_TYPE); + int inobject_properties = map.GetInObjectProperties(); + bool is_inobject = property_index < inobject_properties; + int first_inobject_offset; + int offset; + if (is_inobject) { + first_inobject_offset = map.GetInObjectPropertyOffset(0); + offset = map.GetInObjectPropertyOffset(property_index); + } else { + first_inobject_offset = FixedArray::kHeaderSize; + property_index -= inobject_properties; + offset = PropertyArray::OffsetOfElementAt(property_index); + } + Encoding encoding = FieldEncoding(representation); + return FieldIndex(is_inobject, offset, encoding, inobject_properties, + first_inobject_offset); +} + +// Returns the index format accepted by the HLoadFieldByIndex instruction. +// (In-object: zero-based from (object start + JSObject::kHeaderSize), +// out-of-object: zero-based from FixedArray::kHeaderSize.) +int FieldIndex::GetLoadByFieldIndex() const { + // For efficiency, the LoadByFieldIndex instruction takes an index that is + // optimized for quick access. If the property is inline, the index is + // positive. If it's out-of-line, the encoded index is -raw_index - 1 to + // disambiguate the zero out-of-line index from the zero inobject case. + // The index itself is shifted up by one bit, the lower-most bit + // signifying if the field is a mutable double box (1) or not (0). + int result = index(); + if (is_inobject()) { + result -= JSObject::kHeaderSize / kTaggedSize; + } else { + result -= FixedArray::kHeaderSize / kTaggedSize; + result = -result - 1; + } + result = static_cast(result) << 1; + return is_double() ? (result | 1) : result; +} + +FieldIndex FieldIndex::ForDescriptor(const Map map, int descriptor_index) { + PropertyDetails details = + map.instance_descriptors().GetDetails(descriptor_index); + int field_index = details.field_index(); + return ForPropertyIndex(map, field_index, details.representation()); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_FIELD_INDEX_INL_H_ diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h new file mode 100644 index 0000000000..f352ef6800 --- /dev/null +++ b/deps/v8/src/objects/field-index.h @@ -0,0 +1,127 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_FIELD_INDEX_H_ +#define V8_OBJECTS_FIELD_INDEX_H_ + +#include "src/objects/property-details.h" +#include "src/utils/utils.h" + +namespace v8 { +namespace internal { + +class Map; + +// Wrapper class to hold a field index, usually but not necessarily generated +// from a property index. When available, the wrapper class captures additional +// information to allow the field index to be translated back into the property +// index it was originally generated from. +class FieldIndex final { + public: + enum Encoding { kTagged, kDouble, kWord32 }; + + FieldIndex() : bit_field_(0) {} + + static inline FieldIndex ForPropertyIndex( + const Map map, int index, + Representation representation = Representation::Tagged()); + static inline FieldIndex ForInObjectOffset(int offset, Encoding encoding); + static inline FieldIndex ForDescriptor(const Map map, int descriptor_index); + + inline int GetLoadByFieldIndex() const; + + bool is_inobject() const { return IsInObjectBits::decode(bit_field_); } + + bool is_double() const { return EncodingBits::decode(bit_field_) == kDouble; } + + int offset() const { return OffsetBits::decode(bit_field_); } + + // Zero-indexed from beginning of the object. + int index() const { + DCHECK(IsAligned(offset(), kTaggedSize)); + return offset() / kTaggedSize; + } + + int outobject_array_index() const { + DCHECK(!is_inobject()); + return index() - first_inobject_property_offset() / kTaggedSize; + } + + // Zero-based from the first inobject property. Overflows to out-of-object + // properties. + int property_index() const { + int result = index() - first_inobject_property_offset() / kTaggedSize; + if (!is_inobject()) { + result += InObjectPropertyBits::decode(bit_field_); + } + return result; + } + + int GetFieldAccessStubKey() const { + return bit_field_ & + (IsInObjectBits::kMask | EncodingBits::kMask | OffsetBits::kMask); + } + + bool operator==(FieldIndex const& other) const { + return bit_field_ == other.bit_field_; + } + bool operator!=(FieldIndex const& other) const { return !(*this == other); } + + private: + FieldIndex(bool is_inobject, int offset, Encoding encoding, + int inobject_properties, int first_inobject_property_offset) { + DCHECK(IsAligned(first_inobject_property_offset, kTaggedSize)); + bit_field_ = IsInObjectBits::encode(is_inobject) | + EncodingBits::encode(encoding) | + FirstInobjectPropertyOffsetBits::encode( + first_inobject_property_offset) | + OffsetBits::encode(offset) | + InObjectPropertyBits::encode(inobject_properties); + } + + static Encoding FieldEncoding(Representation representation) { + switch (representation.kind()) { + case Representation::kNone: + case Representation::kSmi: + case Representation::kHeapObject: + case Representation::kTagged: + return kTagged; + case Representation::kDouble: + return kDouble; + default: + break; + } + PrintF("%s\n", representation.Mnemonic()); + UNREACHABLE(); + return kTagged; + } + + int first_inobject_property_offset() const { + return FirstInobjectPropertyOffsetBits::decode(bit_field_); + } + + static const int kOffsetBitsSize = + (kDescriptorIndexBitCount + 1 + kTaggedSizeLog2); + + // Index from beginning of object. + class OffsetBits : public BitField64 {}; + class IsInObjectBits : public BitField64 {}; + class EncodingBits : public BitField64 {}; + // Number of inobject properties. + class InObjectPropertyBits + : public BitField64 { + }; + // Offset of first inobject property from beginning of object. + class FirstInobjectPropertyOffsetBits + : public BitField64 {}; + STATIC_ASSERT(FirstInobjectPropertyOffsetBits::kNext <= 64); + + uint64_t bit_field_; +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_FIELD_INDEX_H_ diff --git a/deps/v8/src/objects/field-type.cc b/deps/v8/src/objects/field-type.cc new file mode 100644 index 0000000000..5c771c4ffa --- /dev/null +++ b/deps/v8/src/objects/field-type.cc @@ -0,0 +1,87 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/objects/field-type.h" + +#include "src/handles/handles-inl.h" +#include "src/objects/objects-inl.h" +#include "src/objects/smi.h" +#include "src/utils/ostreams.h" + +namespace v8 { +namespace internal { + +// static +FieldType FieldType::None() { return FieldType(Smi::FromInt(2).ptr()); } + +// static +FieldType FieldType::Any() { return FieldType(Smi::FromInt(1).ptr()); } + +// static +Handle FieldType::None(Isolate* isolate) { + return handle(None(), isolate); +} + +// static +Handle FieldType::Any(Isolate* isolate) { + return handle(Any(), isolate); +} + +// static +FieldType FieldType::Class(Map map) { return FieldType::cast(map); } + +// static +Handle FieldType::Class(Handle map, Isolate* isolate) { + return handle(Class(*map), isolate); +} + +// static +FieldType FieldType::cast(Object object) { + DCHECK(object == None() || object == Any() || object.IsMap()); + return FieldType(object.ptr()); +} + +bool FieldType::IsClass() const { return this->IsMap(); } + +Map FieldType::AsClass() const { + DCHECK(IsClass()); + return Map::cast(*this); +} + +bool FieldType::NowStable() const { + return !this->IsClass() || AsClass().is_stable(); +} + +bool FieldType::NowIs(FieldType other) const { + if (other.IsAny()) return true; + if (IsNone()) return true; + if (other.IsNone()) return false; + if (IsAny()) return false; + DCHECK(IsClass()); + DCHECK(other.IsClass()); + return *this == other; +} + +bool FieldType::NowIs(Handle other) const { return NowIs(*other); } + +void FieldType::PrintTo(std::ostream& os) const { + if (IsAny()) { + os << "Any"; + } else if (IsNone()) { + os << "None"; + } else { + DCHECK(IsClass()); + os << "Class(" << reinterpret_cast(AsClass().ptr()) << ")"; + } +} + +bool FieldType::NowContains(Object value) const { + if (*this == Any()) return true; + if (*this == None()) return false; + if (!value.IsHeapObject()) return false; + return HeapObject::cast(value).map() == Map::cast(*this); +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/objects/field-type.h b/deps/v8/src/objects/field-type.h new file mode 100644 index 0000000000..3c22692307 --- /dev/null +++ b/deps/v8/src/objects/field-type.h @@ -0,0 +1,56 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_FIELD_TYPE_H_ +#define V8_OBJECTS_FIELD_TYPE_H_ + +#include "src/objects/heap-object.h" +#include "src/objects/map.h" +#include "src/objects/objects.h" + +namespace v8 { +namespace internal { + +template +class Handle; + +class FieldType : public Object { + public: + static FieldType None(); + static FieldType Any(); + V8_EXPORT_PRIVATE static Handle None(Isolate* isolate); + V8_EXPORT_PRIVATE static Handle Any(Isolate* isolate); + V8_EXPORT_PRIVATE static FieldType Class(Map map); + V8_EXPORT_PRIVATE static Handle Class(Handle map, + Isolate* isolate); + V8_EXPORT_PRIVATE static FieldType cast(Object object); + static FieldType unchecked_cast(Object object) { + return FieldType(object.ptr()); + } + + bool NowContains(Object value) const; + + bool NowContains(Handle value) const { return NowContains(*value); } + + bool IsClass() const; + Map AsClass() const; + bool IsNone() const { return *this == None(); } + bool IsAny() const { return *this == Any(); } + bool NowStable() const; + bool NowIs(FieldType other) const; + bool NowIs(Handle other) const; + + V8_EXPORT_PRIVATE void PrintTo(std::ostream& os) const; + + FieldType* operator->() { return this; } + const FieldType* operator->() const { return this; } + + private: + explicit constexpr FieldType(Address ptr) : Object(ptr) {} +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_FIELD_TYPE_H_ diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h index d494f8d15b..6d2b42edbf 100644 --- a/deps/v8/src/objects/fixed-array-inl.h +++ b/deps/v8/src/objects/fixed-array-inl.h @@ -7,19 +7,19 @@ #include "src/objects/fixed-array.h" -#include "src/base/tsan.h" -#include "src/conversions.h" -#include "src/handles-inl.h" +#include "src/handles/handles-inl.h" #include "src/heap/heap-write-barrier-inl.h" -#include "src/objects-inl.h" +#include "src/numbers/conversions.h" #include "src/objects/bigint.h" #include "src/objects/compressed-slots.h" #include "src/objects/heap-number-inl.h" #include "src/objects/map.h" #include "src/objects/maybe-object-inl.h" +#include "src/objects/objects-inl.h" #include "src/objects/oddball.h" #include "src/objects/slots.h" -#include "src/roots-inl.h" +#include "src/roots/roots-inl.h" +#include "src/sanitizer/tsan.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -30,7 +30,6 @@ namespace internal { OBJECT_CONSTRUCTORS_IMPL(FixedArrayBase, HeapObject) OBJECT_CONSTRUCTORS_IMPL(FixedArray, FixedArrayBase) OBJECT_CONSTRUCTORS_IMPL(FixedDoubleArray, FixedArrayBase) -OBJECT_CONSTRUCTORS_IMPL(FixedTypedArrayBase, FixedArrayBase) OBJECT_CONSTRUCTORS_IMPL(ArrayList, FixedArray) OBJECT_CONSTRUCTORS_IMPL(ByteArray, FixedArrayBase) OBJECT_CONSTRUCTORS_IMPL(TemplateList, FixedArray) @@ -58,13 +57,13 @@ CAST_ACCESSOR(ByteArray) CAST_ACCESSOR(FixedArray) CAST_ACCESSOR(FixedArrayBase) CAST_ACCESSOR(FixedDoubleArray) -CAST_ACCESSOR(FixedTypedArrayBase) CAST_ACCESSOR(TemplateList) CAST_ACCESSOR(WeakFixedArray) CAST_ACCESSOR(WeakArrayList) SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) + SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset) SYNCHRONIZED_SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset) @@ -76,8 +75,6 @@ Object FixedArrayBase::unchecked_synchronized_length() const { return ACQUIRE_READ_FIELD(*this, kLengthOffset); } -ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset) - ObjectSlot FixedArray::GetFirstElementAddress() { return RawField(OffsetOfElementAt(0)); } @@ -87,7 +84,7 @@ bool FixedArray::ContainsOnlySmisOrHoles() { ObjectSlot current = GetFirstElementAddress(); for (int i = 0; i < length(); ++i, ++current) { Object candidate = *current; - if (!candidate->IsSmi() && candidate != the_hole) return false; + if (!candidate.IsSmi() && candidate != the_hole) return false; } return true; } @@ -98,25 +95,11 @@ Object FixedArray::get(int index) const { } Handle FixedArray::get(FixedArray array, int index, Isolate* isolate) { - return handle(array->get(index), isolate); -} - -template -MaybeHandle FixedArray::GetValue(Isolate* isolate, int index) const { - Object obj = get(index); - if (obj->IsUndefined(isolate)) return MaybeHandle(); - return Handle(T::cast(obj), isolate); -} - -template -Handle FixedArray::GetValueChecked(Isolate* isolate, int index) const { - Object obj = get(index); - CHECK(!obj->IsUndefined(isolate)); - return Handle(T::cast(obj), isolate); + return handle(array.get(index), isolate); } bool FixedArray::is_the_hole(Isolate* isolate, int index) { - return get(index)->IsTheHole(isolate); + return get(index).IsTheHole(isolate); } void FixedArray::set(int index, Smi value) { @@ -147,9 +130,9 @@ void FixedArray::set(int index, Object value, WriteBarrierMode mode) { } void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) { - DCHECK_NE(array->map(), array->GetReadOnlyRoots().fixed_cow_array_map()); + DCHECK_NE(array.map(), array.GetReadOnlyRoots().fixed_cow_array_map()); DCHECK_GE(index, 0); - DCHECK_LT(index, array->length()); + DCHECK_LT(index, array.length()); DCHECK(!ObjectInYoungGeneration(value)); RELAXED_WRITE_FIELD(array, kHeaderSize + index * kTaggedSize, value); } @@ -202,16 +185,27 @@ ObjectSlot FixedArray::RawFieldOfElementAt(int index) { return RawField(OffsetOfElementAt(index)); } -void FixedArray::MoveElements(Heap* heap, int dst_index, int src_index, int len, - WriteBarrierMode mode) { +void FixedArray::MoveElements(Isolate* isolate, int dst_index, int src_index, + int len, WriteBarrierMode mode) { + if (len == 0) return; + DCHECK_LE(dst_index + len, length()); + DCHECK_LE(src_index + len, length()); DisallowHeapAllocation no_gc; - heap->MoveElements(*this, dst_index, src_index, len, mode); + ObjectSlot dst_slot(RawFieldOfElementAt(dst_index)); + ObjectSlot src_slot(RawFieldOfElementAt(src_index)); + isolate->heap()->MoveRange(*this, dst_slot, src_slot, len, mode); } -void FixedArray::CopyElements(Heap* heap, int dst_index, FixedArray src, +void FixedArray::CopyElements(Isolate* isolate, int dst_index, FixedArray src, int src_index, int len, WriteBarrierMode mode) { + if (len == 0) return; + DCHECK_LE(dst_index + len, length()); + DCHECK_LE(src_index + len, src.length()); DisallowHeapAllocation no_gc; - heap->CopyElements(*this, src, dst_index, src_index, len, mode); + + ObjectSlot dst_slot(RawFieldOfElementAt(dst_index)); + ObjectSlot src_slot(src.RawFieldOfElementAt(src_index)); + isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode); } // Perform a binary search in a fixed array. @@ -221,7 +215,7 @@ int BinarySearch(T* array, Name name, int valid_entries, DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == nullptr); int low = 0; int high = array->number_of_entries() - 1; - uint32_t hash = name->hash_field(); + uint32_t hash = name.hash_field(); int limit = high; DCHECK(low <= high); @@ -229,7 +223,7 @@ int BinarySearch(T* array, Name name, int valid_entries, while (low != high) { int mid = low + (high - low) / 2; Name mid_name = array->GetSortedKey(mid); - uint32_t mid_hash = mid_name->hash_field(); + uint32_t mid_hash = mid_name.hash_field(); if (mid_hash >= hash) { high = mid; @@ -241,7 +235,7 @@ int BinarySearch(T* array, Name name, int valid_entries, for (; low <= limit; ++low) { int sort_index = array->GetSortedKeyIndex(low); Name entry = array->GetKey(sort_index); - uint32_t current_hash = entry->hash_field(); + uint32_t current_hash = entry.hash_field(); if (current_hash != hash) { if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) { *out_insertion_index = sort_index + (current_hash > hash ? 0 : 1); @@ -268,12 +262,12 @@ template int LinearSearch(T* array, Name name, int valid_entries, int* out_insertion_index) { if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) { - uint32_t hash = name->hash_field(); + uint32_t hash = name.hash_field(); int len = array->number_of_entries(); for (int number = 0; number < len; number++) { int sorted_index = array->GetSortedKeyIndex(number); Name entry = array->GetKey(sorted_index); - uint32_t current_hash = entry->hash_field(); + uint32_t current_hash = entry.hash_field(); if (current_hash > hash) { *out_insertion_index = sorted_index; return T::kNotFound; @@ -320,7 +314,7 @@ double FixedDoubleArray::get_scalar(int index) { map() != GetReadOnlyRoots().fixed_array_map()); DCHECK(index >= 0 && index < this->length()); DCHECK(!is_the_hole(index)); - return READ_DOUBLE_FIELD(*this, kHeaderSize + index * kDoubleSize); + return ReadField(kHeaderSize + index * kDoubleSize); } uint64_t FixedDoubleArray::get_representation(int index) { @@ -328,15 +322,16 @@ uint64_t FixedDoubleArray::get_representation(int index) { map() != GetReadOnlyRoots().fixed_array_map()); DCHECK(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kDoubleSize; - return READ_UINT64_FIELD(*this, offset); + // Bug(v8:8875): Doubles may be unaligned. + return ReadUnalignedValue(field_address(offset)); } Handle FixedDoubleArray::get(FixedDoubleArray array, int index, Isolate* isolate) { - if (array->is_the_hole(index)) { + if (array.is_the_hole(index)) { return ReadOnlyRoots(isolate).the_hole_value_handle(); } else { - return isolate->factory()->NewNumber(array->get_scalar(index)); + return isolate->factory()->NewNumber(array.get_scalar(index)); } } @@ -345,9 +340,9 @@ void FixedDoubleArray::set(int index, double value) { map() != GetReadOnlyRoots().fixed_array_map()); int offset = kHeaderSize + index * kDoubleSize; if (std::isnan(value)) { - WRITE_DOUBLE_FIELD(*this, offset, std::numeric_limits::quiet_NaN()); + WriteField(offset, std::numeric_limits::quiet_NaN()); } else { - WRITE_DOUBLE_FIELD(*this, offset, value); + WriteField(offset, value); } DCHECK(!is_the_hole(index)); } @@ -360,7 +355,7 @@ void FixedDoubleArray::set_the_hole(int index) { DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() && map() != GetReadOnlyRoots().fixed_array_map()); int offset = kHeaderSize + index * kDoubleSize; - WRITE_UINT64_FIELD(*this, offset, kHoleNanInt64); + WriteUnalignedValue(field_address(offset), kHoleNanInt64); } bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) { @@ -371,8 +366,9 @@ bool FixedDoubleArray::is_the_hole(int index) { return get_representation(index) == kHoleNanInt64; } -void FixedDoubleArray::MoveElements(Heap* heap, int dst_index, int src_index, - int len, WriteBarrierMode mode) { +void FixedDoubleArray::MoveElements(Isolate* isolate, int dst_index, + int src_index, int len, + WriteBarrierMode mode) { DCHECK_EQ(SKIP_WRITE_BARRIER, mode); double* data_start = reinterpret_cast(FIELD_ADDR(*this, kHeaderSize)); @@ -414,6 +410,19 @@ MaybeObjectSlot WeakFixedArray::RawFieldOfElementAt(int index) { return RawMaybeWeakField(OffsetOfElementAt(index)); } +void WeakFixedArray::CopyElements(Isolate* isolate, int dst_index, + WeakFixedArray src, int src_index, int len, + WriteBarrierMode mode) { + if (len == 0) return; + DCHECK_LE(dst_index + len, length()); + DCHECK_LE(src_index + len, src.length()); + DisallowHeapAllocation no_gc; + + MaybeObjectSlot dst_slot(data_start() + dst_index); + MaybeObjectSlot src_slot(src.data_start() + src_index); + isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode); +} + MaybeObject WeakArrayList::Get(int index) const { DCHECK(index >= 0 && index < this->capacity()); return RELAXED_READ_WEAK_FIELD(*this, OffsetOfElementAt(index)); @@ -431,10 +440,23 @@ MaybeObjectSlot WeakArrayList::data_start() { return RawMaybeWeakField(kHeaderSize); } +void WeakArrayList::CopyElements(Isolate* isolate, int dst_index, + WeakArrayList src, int src_index, int len, + WriteBarrierMode mode) { + if (len == 0) return; + DCHECK_LE(dst_index + len, capacity()); + DCHECK_LE(src_index + len, src.capacity()); + DisallowHeapAllocation no_gc; + + MaybeObjectSlot dst_slot(data_start() + dst_index); + MaybeObjectSlot src_slot(src.data_start() + src_index); + isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode); +} + HeapObject WeakArrayList::Iterator::Next() { if (!array_.is_null()) { - while (index_ < array_->length()) { - MaybeObject item = array_->Get(index_++); + while (index_ < array_.length()) { + MaybeObject item = array_.Get(index_++); DCHECK(item->IsWeakOrCleared()); if (!item->IsCleared()) return item->GetHeapObjectAssumeWeak(); } @@ -444,16 +466,16 @@ HeapObject WeakArrayList::Iterator::Next() { } int ArrayList::Length() const { - if (FixedArray::cast(*this)->length() == 0) return 0; - return Smi::ToInt(FixedArray::cast(*this)->get(kLengthIndex)); + if (FixedArray::cast(*this).length() == 0) return 0; + return Smi::ToInt(FixedArray::cast(*this).get(kLengthIndex)); } void ArrayList::SetLength(int length) { - return FixedArray::cast(*this)->set(kLengthIndex, Smi::FromInt(length)); + return FixedArray::cast(*this).set(kLengthIndex, Smi::FromInt(length)); } Object ArrayList::Get(int index) const { - return FixedArray::cast(*this)->get(kFirstIndex + index); + return FixedArray::cast(*this).get(kFirstIndex + index); } ObjectSlot ArrayList::Slot(int index) { @@ -461,25 +483,25 @@ ObjectSlot ArrayList::Slot(int index) { } void ArrayList::Set(int index, Object obj, WriteBarrierMode mode) { - FixedArray::cast(*this)->set(kFirstIndex + index, obj, mode); + FixedArray::cast(*this).set(kFirstIndex + index, obj, mode); } void ArrayList::Clear(int index, Object undefined) { - DCHECK(undefined->IsUndefined()); - FixedArray::cast(*this)->set(kFirstIndex + index, undefined, - SKIP_WRITE_BARRIER); + DCHECK(undefined.IsUndefined()); + FixedArray::cast(*this).set(kFirstIndex + index, undefined, + SKIP_WRITE_BARRIER); } int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kTaggedSize); } byte ByteArray::get(int index) const { DCHECK(index >= 0 && index < this->length()); - return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize); + return ReadField(kHeaderSize + index * kCharSize); } void ByteArray::set(int index, byte value) { DCHECK(index >= 0 && index < this->length()); - WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize, value); + WriteField(kHeaderSize + index * kCharSize, value); } void ByteArray::copy_in(int index, const byte* buffer, int length) { @@ -498,22 +520,22 @@ void ByteArray::copy_out(int index, byte* buffer, int length) { int ByteArray::get_int(int index) const { DCHECK(index >= 0 && index < this->length() / kIntSize); - return READ_INT_FIELD(*this, kHeaderSize + index * kIntSize); + return ReadField(kHeaderSize + index * kIntSize); } void ByteArray::set_int(int index, int value) { DCHECK(index >= 0 && index < this->length() / kIntSize); - WRITE_INT_FIELD(*this, kHeaderSize + index * kIntSize, value); + WriteField(kHeaderSize + index * kIntSize, value); } uint32_t ByteArray::get_uint32(int index) const { DCHECK(index >= 0 && index < this->length() / kUInt32Size); - return READ_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size); + return ReadField(kHeaderSize + index * kUInt32Size); } void ByteArray::set_uint32(int index, uint32_t value) { DCHECK(index >= 0 && index < this->length() / kUInt32Size); - WRITE_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size, value); + WriteField(kHeaderSize + index * kUInt32Size, value); } void ByteArray::clear_padding() { @@ -559,385 +581,16 @@ int PodArray::length() const { return ByteArray::length() / sizeof(T); } -void* FixedTypedArrayBase::external_pointer() const { - intptr_t ptr = READ_INTPTR_FIELD(*this, kExternalPointerOffset); - return reinterpret_cast(ptr); -} - -void FixedTypedArrayBase::set_external_pointer(void* value) { - intptr_t ptr = reinterpret_cast(value); - WRITE_INTPTR_FIELD(*this, kExternalPointerOffset, ptr); -} - -void* FixedTypedArrayBase::DataPtr() { - return reinterpret_cast( - base_pointer()->ptr() + reinterpret_cast(external_pointer())); -} - -int FixedTypedArrayBase::ElementSize(InstanceType type) { - int element_size; - switch (type) { -#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ - case FIXED_##TYPE##_ARRAY_TYPE: \ - element_size = sizeof(ctype); \ - break; - - TYPED_ARRAYS(TYPED_ARRAY_CASE) -#undef TYPED_ARRAY_CASE - default: - UNREACHABLE(); - } - return element_size; -} - -int FixedTypedArrayBase::DataSize(InstanceType type) const { - if (base_pointer() == Smi::kZero) return 0; - return length() * ElementSize(type); -} - -int FixedTypedArrayBase::DataSize() const { - return DataSize(map()->instance_type()); -} - -size_t FixedTypedArrayBase::ByteLength() const { - return static_cast(length()) * - static_cast(ElementSize(map()->instance_type())); -} - -int FixedTypedArrayBase::size() const { - return OBJECT_POINTER_ALIGN(kDataOffset + DataSize()); -} - -int FixedTypedArrayBase::TypedArraySize(InstanceType type) const { - return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type)); -} - -// static -int FixedTypedArrayBase::TypedArraySize(InstanceType type, int length) { - return OBJECT_POINTER_ALIGN(kDataOffset + length * ElementSize(type)); -} - -uint8_t Uint8ArrayTraits::defaultValue() { return 0; } - -uint8_t Uint8ClampedArrayTraits::defaultValue() { return 0; } - -int8_t Int8ArrayTraits::defaultValue() { return 0; } - -uint16_t Uint16ArrayTraits::defaultValue() { return 0; } - -int16_t Int16ArrayTraits::defaultValue() { return 0; } - -uint32_t Uint32ArrayTraits::defaultValue() { return 0; } - -int32_t Int32ArrayTraits::defaultValue() { return 0; } - -float Float32ArrayTraits::defaultValue() { - return std::numeric_limits::quiet_NaN(); -} - -double Float64ArrayTraits::defaultValue() { - return std::numeric_limits::quiet_NaN(); -} - -template -typename Traits::ElementType FixedTypedArray::get_scalar(int index) { - DCHECK((index >= 0) && (index < this->length())); - return FixedTypedArray::get_scalar_from_data_ptr(DataPtr(), index); -} - -// static -template -typename Traits::ElementType FixedTypedArray::get_scalar_from_data_ptr( - void* data_ptr, int index) { - typename Traits::ElementType* ptr = reinterpret_cast(data_ptr); - // The JavaScript memory model allows for racy reads and writes to a - // SharedArrayBuffer's backing store, which will always be a FixedTypedArray. - // ThreadSanitizer will catch these racy accesses and warn about them, so we - // disable TSAN for these reads and writes using annotations. - // - // We don't use relaxed atomics here, as it is not a requirement of the - // JavaScript memory model to have tear-free reads of overlapping accesses, - // and using relaxed atomics may introduce overhead. - TSAN_ANNOTATE_IGNORE_READS_BEGIN; - ElementType result; - if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) { - // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size - // fields (external pointers, doubles and BigInt data) are only kTaggedSize - // aligned so we have to use unaligned pointer friendly way of accessing - // them in order to avoid undefined behavior in C++ code. - result = ReadUnalignedValue(reinterpret_cast
(ptr) + - index * sizeof(ElementType)); - } else { - result = ptr[index]; - } - TSAN_ANNOTATE_IGNORE_READS_END; - return result; -} - -template -void FixedTypedArray::set(int index, ElementType value) { - CHECK((index >= 0) && (index < this->length())); - // See the comment in FixedTypedArray::get_scalar. - auto* ptr = reinterpret_cast(DataPtr()); - TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; - if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) { - // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size - // fields (external pointers, doubles and BigInt data) are only kTaggedSize - // aligned so we have to use unaligned pointer friendly way of accessing - // them in order to avoid undefined behavior in C++ code. - WriteUnalignedValue( - reinterpret_cast
(ptr) + index * sizeof(ElementType), value); - } else { - ptr[index] = value; - } - TSAN_ANNOTATE_IGNORE_WRITES_END; -} - -template -typename Traits::ElementType FixedTypedArray::from(int value) { - return static_cast(value); -} - -template <> -inline uint8_t FixedTypedArray::from(int value) { - if (value < 0) return 0; - if (value > 0xFF) return 0xFF; - return static_cast(value); -} - -template <> -inline int64_t FixedTypedArray::from(int value) { - UNREACHABLE(); -} - -template <> -inline uint64_t FixedTypedArray::from(int value) { - UNREACHABLE(); -} - -template -typename Traits::ElementType FixedTypedArray::from(uint32_t value) { - return static_cast(value); -} - -template <> -inline uint8_t FixedTypedArray::from(uint32_t value) { - // We need this special case for Uint32 -> Uint8Clamped, because the highest - // Uint32 values will be negative as an int, clamping to 0, rather than 255. - if (value > 0xFF) return 0xFF; - return static_cast(value); -} - -template <> -inline int64_t FixedTypedArray::from(uint32_t value) { - UNREACHABLE(); -} - -template <> -inline uint64_t FixedTypedArray::from(uint32_t value) { - UNREACHABLE(); -} - -template -typename Traits::ElementType FixedTypedArray::from(double value) { - return static_cast(DoubleToInt32(value)); -} - -template <> -inline uint8_t FixedTypedArray::from(double value) { - // Handle NaNs and less than zero values which clamp to zero. - if (!(value > 0)) return 0; - if (value > 0xFF) return 0xFF; - return static_cast(lrint(value)); -} - -template <> -inline int64_t FixedTypedArray::from(double value) { - UNREACHABLE(); -} - -template <> -inline uint64_t FixedTypedArray::from(double value) { - UNREACHABLE(); -} - -template <> -inline float FixedTypedArray::from(double value) { - using limits = std::numeric_limits; - if (value > limits::max()) return limits::infinity(); - if (value < limits::lowest()) return -limits::infinity(); - return static_cast(value); -} - -template <> -inline double FixedTypedArray::from(double value) { - return value; -} - -template -typename Traits::ElementType FixedTypedArray::from(int64_t value) { - UNREACHABLE(); -} - -template -typename Traits::ElementType FixedTypedArray::from(uint64_t value) { - UNREACHABLE(); -} - -template <> -inline int64_t FixedTypedArray::from(int64_t value) { - return value; -} - -template <> -inline uint64_t FixedTypedArray::from(uint64_t value) { - return value; -} - -template <> -inline uint64_t FixedTypedArray::from(int64_t value) { - return static_cast(value); -} - -template <> -inline int64_t FixedTypedArray::from(uint64_t value) { - return static_cast(value); -} - -template -typename Traits::ElementType FixedTypedArray::FromHandle( - Handle value, bool* lossless) { - if (value->IsSmi()) { - return from(Smi::ToInt(*value)); - } - DCHECK(value->IsHeapNumber()); - return from(HeapNumber::cast(*value)->value()); -} - -template <> -inline int64_t FixedTypedArray::FromHandle( - Handle value, bool* lossless) { - DCHECK(value->IsBigInt()); - return BigInt::cast(*value)->AsInt64(lossless); -} - -template <> -inline uint64_t FixedTypedArray::FromHandle( - Handle value, bool* lossless) { - DCHECK(value->IsBigInt()); - return BigInt::cast(*value)->AsUint64(lossless); -} - -template -Handle FixedTypedArray::get(Isolate* isolate, - FixedTypedArray array, - int index) { - return Traits::ToHandle(isolate, array->get_scalar(index)); -} - -template -void FixedTypedArray::SetValue(uint32_t index, Object value) { - ElementType cast_value = Traits::defaultValue(); - if (value->IsSmi()) { - int int_value = Smi::ToInt(value); - cast_value = from(int_value); - } else if (value->IsHeapNumber()) { - double double_value = HeapNumber::cast(value)->value(); - cast_value = from(double_value); - } else { - // Clamp undefined to the default value. All other types have been - // converted to a number type further up in the call chain. - DCHECK(value->IsUndefined()); - } - set(index, cast_value); -} - -template <> -inline void FixedTypedArray::SetValue(uint32_t index, - Object value) { - DCHECK(value->IsBigInt()); - set(index, BigInt::cast(value)->AsInt64()); -} - -template <> -inline void FixedTypedArray::SetValue(uint32_t index, - Object value) { - DCHECK(value->IsBigInt()); - set(index, BigInt::cast(value)->AsUint64()); -} - -Handle Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) { - return handle(Smi::FromInt(scalar), isolate); -} - -Handle Uint8ClampedArrayTraits::ToHandle(Isolate* isolate, - uint8_t scalar) { - return handle(Smi::FromInt(scalar), isolate); -} - -Handle Int8ArrayTraits::ToHandle(Isolate* isolate, int8_t scalar) { - return handle(Smi::FromInt(scalar), isolate); -} - -Handle Uint16ArrayTraits::ToHandle(Isolate* isolate, uint16_t scalar) { - return handle(Smi::FromInt(scalar), isolate); -} - -Handle Int16ArrayTraits::ToHandle(Isolate* isolate, int16_t scalar) { - return handle(Smi::FromInt(scalar), isolate); -} - -Handle Uint32ArrayTraits::ToHandle(Isolate* isolate, uint32_t scalar) { - return isolate->factory()->NewNumberFromUint(scalar); -} - -Handle Int32ArrayTraits::ToHandle(Isolate* isolate, int32_t scalar) { - return isolate->factory()->NewNumberFromInt(scalar); -} - -Handle Float32ArrayTraits::ToHandle(Isolate* isolate, float scalar) { - return isolate->factory()->NewNumber(scalar); -} - -Handle Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) { - return isolate->factory()->NewNumber(scalar); -} - -Handle BigInt64ArrayTraits::ToHandle(Isolate* isolate, int64_t scalar) { - return BigInt::FromInt64(isolate, scalar); -} - -Handle BigUint64ArrayTraits::ToHandle(Isolate* isolate, - uint64_t scalar) { - return BigInt::FromUint64(isolate, scalar); -} - -// static -template -STATIC_CONST_MEMBER_DEFINITION const InstanceType - FixedTypedArray::kInstanceType; - -template -FixedTypedArray::FixedTypedArray(Address ptr) - : FixedTypedArrayBase(ptr) { - DCHECK(IsHeapObject() && map()->instance_type() == Traits::kInstanceType); -} - -template -FixedTypedArray FixedTypedArray::cast(Object object) { - return FixedTypedArray(object.ptr()); -} - int TemplateList::length() const { - return Smi::ToInt(FixedArray::cast(*this)->get(kLengthIndex)); + return Smi::ToInt(FixedArray::cast(*this).get(kLengthIndex)); } Object TemplateList::get(int index) const { - return FixedArray::cast(*this)->get(kFirstElementIndex + index); + return FixedArray::cast(*this).get(kFirstElementIndex + index); } void TemplateList::set(int index, Object value) { - FixedArray::cast(*this)->set(kFirstElementIndex + index, value); + FixedArray::cast(*this).set(kFirstElementIndex + index, value); } } // namespace internal diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h index e3ab45ba0e..02f26502b2 100644 --- a/deps/v8/src/objects/fixed-array.h +++ b/deps/v8/src/objects/fixed-array.h @@ -5,10 +5,10 @@ #ifndef V8_OBJECTS_FIXED_ARRAY_H_ #define V8_OBJECTS_FIXED_ARRAY_H_ -#include "src/maybe-handles.h" +#include "src/handles/maybe-handles.h" #include "src/objects/instance-type.h" #include "src/objects/smi.h" -#include "torque-generated/class-definitions-from-dsl.h" +#include "torque-generated/field-offsets-tq.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -100,8 +100,6 @@ class FixedArrayBase : public HeapObject { DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, TORQUE_GENERATED_FIXED_ARRAY_BASE_FIELDS) - static const int kHeaderSize = kSize; - protected: // Special-purpose constructor for subclasses that have fast paths where // their ptr() is a Smi. @@ -117,11 +115,6 @@ class FixedArray : public FixedArrayBase { inline Object get(int index) const; static inline Handle get(FixedArray array, int index, Isolate* isolate); - template - MaybeHandle GetValue(Isolate* isolate, int index) const; - - template - Handle GetValueChecked(Isolate* isolate, int index) const; // Return a grown copy if the index is bigger than the array's length. V8_EXPORT_PRIVATE static Handle SetAndGrow( @@ -147,16 +140,14 @@ class FixedArray : public FixedArrayBase { inline ObjectSlot GetFirstElementAddress(); inline bool ContainsOnlySmisOrHoles(); - // Returns true iff the elements are Numbers and sorted ascending. - bool ContainsSortedNumbers(); // Gives access to raw memory which stores the array's data. inline ObjectSlot data_start(); - inline void MoveElements(Heap* heap, int dst_index, int src_index, int len, - WriteBarrierMode mode); + inline void MoveElements(Isolate* isolate, int dst_index, int src_index, + int len, WriteBarrierMode mode); - inline void CopyElements(Heap* heap, int dst_index, FixedArray src, + inline void CopyElements(Isolate* isolate, int dst_index, FixedArray src, int src_index, int len, WriteBarrierMode mode); inline void FillWithHoles(int from, int to); @@ -201,6 +192,8 @@ class FixedArray : public FixedArrayBase { using BodyDescriptor = FlexibleBodyDescriptor; + static constexpr int kObjectsOffset = kHeaderSize; + protected: // Set operation on FixedArray without using write barriers. Can // only be used for storing old space objects or smis. @@ -243,8 +236,8 @@ class FixedDoubleArray : public FixedArrayBase { return kHeaderSize + length * kDoubleSize; } - inline void MoveElements(Heap* heap, int dst_index, int src_index, int len, - WriteBarrierMode mode); + inline void MoveElements(Isolate* isolate, int dst_index, int src_index, + int len, WriteBarrierMode mode); inline void FillWithHoles(int from, int to); @@ -296,6 +289,9 @@ class WeakFixedArray : public HeapObject { inline MaybeObjectSlot RawFieldOfElementAt(int index); + inline void CopyElements(Isolate* isolate, int dst_index, WeakFixedArray src, + int src_index, int len, WriteBarrierMode mode); + DECL_PRINTER(WeakFixedArray) DECL_VERIFIER(WeakFixedArray) @@ -354,6 +350,9 @@ class WeakArrayList : public HeapObject { // Gives access to raw memory which stores the array's data. inline MaybeObjectSlot data_start(); + inline void CopyElements(Isolate* isolate, int dst_index, WeakArrayList src, + int src_index, int len, WriteBarrierMode mode); + V8_EXPORT_PRIVATE bool IsFull(); DECL_INT_ACCESSORS(capacity) @@ -577,128 +576,6 @@ class PodArray : public ByteArray { OBJECT_CONSTRUCTORS(PodArray, ByteArray); }; -class FixedTypedArrayBase : public FixedArrayBase { - public: - // [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr. - DECL_ACCESSORS(base_pointer, Object) - - // [external_pointer]: Contains the offset between base_pointer and the start - // of the data. If the base_pointer is a nullptr, the external_pointer - // therefore points to the actual backing store. - DECL_PRIMITIVE_ACCESSORS(external_pointer, void*) - - // Dispatched behavior. - DECL_CAST(FixedTypedArrayBase) - - DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize, - TORQUE_GENERATED_FIXED_TYPED_ARRAY_BASE_FIELDS) - static const int kHeaderSize = kSize; - -#ifdef V8_COMPRESS_POINTERS - // TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize - // is only kTaggedSize aligned but we can keep using unaligned access since - // both x64 and arm64 architectures (where pointer compression supported) - // allow unaligned access to doubles. - STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize)); -#else - STATIC_ASSERT(IsAligned(kHeaderSize, kDoubleAlignment)); -#endif - - static const int kDataOffset = kHeaderSize; - - static const int kMaxElementSize = 8; - -#ifdef V8_HOST_ARCH_32_BIT - static const size_t kMaxByteLength = std::numeric_limits::max(); -#else - static const size_t kMaxByteLength = - static_cast(Smi::kMaxValue) * kMaxElementSize; -#endif // V8_HOST_ARCH_32_BIT - - static const size_t kMaxLength = Smi::kMaxValue; - - class BodyDescriptor; - - inline int size() const; - - static inline int TypedArraySize(InstanceType type, int length); - inline int TypedArraySize(InstanceType type) const; - - // Use with care: returns raw pointer into heap. - inline void* DataPtr(); - - inline int DataSize() const; - - inline size_t ByteLength() const; - - static inline intptr_t ExternalPointerValueForOnHeapArray() { - return FixedTypedArrayBase::kDataOffset - kHeapObjectTag; - } - - static inline void* ExternalPointerPtrForOnHeapArray() { - return reinterpret_cast(ExternalPointerValueForOnHeapArray()); - } - - private: - static inline int ElementSize(InstanceType type); - - inline int DataSize(InstanceType type) const; - - OBJECT_CONSTRUCTORS(FixedTypedArrayBase, FixedArrayBase); -}; - -template -class FixedTypedArray : public FixedTypedArrayBase { - public: - using ElementType = typename Traits::ElementType; - static const InstanceType kInstanceType = Traits::kInstanceType; - - DECL_CAST(FixedTypedArray) - - static inline ElementType get_scalar_from_data_ptr(void* data_ptr, int index); - inline ElementType get_scalar(int index); - static inline Handle get(Isolate* isolate, FixedTypedArray array, - int index); - inline void set(int index, ElementType value); - - static inline ElementType from(int value); - static inline ElementType from(uint32_t value); - static inline ElementType from(double value); - static inline ElementType from(int64_t value); - static inline ElementType from(uint64_t value); - - static inline ElementType FromHandle(Handle value, - bool* lossless = nullptr); - - // This accessor applies the correct conversion from Smi, HeapNumber - // and undefined. - inline void SetValue(uint32_t index, Object value); - - DECL_PRINTER(FixedTypedArray) - DECL_VERIFIER(FixedTypedArray) - - private: - OBJECT_CONSTRUCTORS(FixedTypedArray, FixedTypedArrayBase); -}; - -#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType) \ - STATIC_ASSERT(sizeof(elementType) <= FixedTypedArrayBase::kMaxElementSize); \ - class Type##ArrayTraits { \ - public: /* NOLINT */ \ - using ElementType = elementType; \ - static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \ - static const char* ArrayTypeName() { return "Fixed" #Type "Array"; } \ - static inline Handle ToHandle(Isolate* isolate, \ - elementType scalar); \ - static inline elementType defaultValue(); \ - }; \ - \ - using Fixed##Type##Array = FixedTypedArray; - -TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS) - -#undef FIXED_TYPED_ARRAY_TRAITS - class TemplateList : public FixedArray { public: static Handle New(Isolate* isolate, int size); diff --git a/deps/v8/src/objects/foreign-inl.h b/deps/v8/src/objects/foreign-inl.h index 0ac9f652bb..fc93b66a0a 100644 --- a/deps/v8/src/objects/foreign-inl.h +++ b/deps/v8/src/objects/foreign-inl.h @@ -8,7 +8,7 @@ #include "src/objects/foreign.h" #include "src/heap/heap-write-barrier-inl.h" -#include "src/objects-inl.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -23,15 +23,15 @@ CAST_ACCESSOR(Foreign) // static bool Foreign::IsNormalized(Object value) { if (value == Smi::kZero) return true; - return Foreign::cast(value)->foreign_address() != kNullAddress; + return Foreign::cast(value).foreign_address() != kNullAddress; } Address Foreign::foreign_address() { - return READ_UINTPTR_FIELD(*this, kForeignAddressOffset); + return ReadField
(kForeignAddressOffset); } void Foreign::set_foreign_address(Address value) { - WRITE_UINTPTR_FIELD(*this, kForeignAddressOffset, value); + WriteField
(kForeignAddressOffset, value); } } // namespace internal diff --git a/deps/v8/src/objects/foreign.h b/deps/v8/src/objects/foreign.h index 629d549b6d..617ca0e34f 100644 --- a/deps/v8/src/objects/foreign.h +++ b/deps/v8/src/objects/foreign.h @@ -6,7 +6,7 @@ #define V8_OBJECTS_FOREIGN_H_ #include "src/objects/heap-object.h" -#include "torque-generated/class-definitions-from-dsl.h" +#include "torque-generated/field-offsets-tq.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/frame-array-inl.h b/deps/v8/src/objects/frame-array-inl.h index 78d08da00f..5b342c64c0 100644 --- a/deps/v8/src/objects/frame-array-inl.h +++ b/deps/v8/src/objects/frame-array-inl.h @@ -33,17 +33,17 @@ FRAME_ARRAY_FIELD_LIST(DEFINE_FRAME_ARRAY_ACCESSORS) #undef DEFINE_FRAME_ARRAY_ACCESSORS bool FrameArray::IsWasmFrame(int frame_ix) const { - const int flags = Flags(frame_ix)->value(); + const int flags = Flags(frame_ix).value(); return (flags & kIsWasmFrame) != 0; } bool FrameArray::IsWasmInterpretedFrame(int frame_ix) const { - const int flags = Flags(frame_ix)->value(); + const int flags = Flags(frame_ix).value(); return (flags & kIsWasmInterpretedFrame) != 0; } bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const { - const int flags = Flags(frame_ix)->value(); + const int flags = Flags(frame_ix).value(); return (flags & kIsAsmJsWasmFrame) != 0; } diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h index 438718e25f..42750cf69c 100644 --- a/deps/v8/src/objects/frame-array.h +++ b/deps/v8/src/objects/frame-array.h @@ -5,7 +5,7 @@ #ifndef V8_OBJECTS_FRAME_ARRAY_H_ #define V8_OBJECTS_FRAME_ARRAY_H_ -#include "src/objects.h" +#include "src/objects/objects.h" #include "src/wasm/wasm-objects.h" // Has to be the last include (doesn't have include guards): diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h index b36c4e154f..bea8257515 100644 --- a/deps/v8/src/objects/free-space-inl.h +++ b/deps/v8/src/objects/free-space-inl.h @@ -7,10 +7,10 @@ #include "src/objects/free-space.h" +#include "src/execution/isolate.h" #include "src/heap/heap-write-barrier-inl.h" #include "src/heap/heap.h" -#include "src/isolate.h" -#include "src/objects-inl.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -30,7 +30,7 @@ FreeSpace FreeSpace::next() { Heap* heap = GetHeapFromWritableObject(*this); Object free_space_map = Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap); - DCHECK_IMPLIES(!map_slot().contains_value(free_space_map->ptr()), + DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()), !heap->deserialization_complete() && map_slot().contains_value(kNullAddress)); #endif @@ -43,7 +43,7 @@ void FreeSpace::set_next(FreeSpace next) { Heap* heap = GetHeapFromWritableObject(*this); Object free_space_map = Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap); - DCHECK_IMPLIES(!map_slot().contains_value(free_space_map->ptr()), + DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()), !heap->deserialization_complete() && map_slot().contains_value(kNullAddress)); #endif @@ -53,7 +53,7 @@ void FreeSpace::set_next(FreeSpace next) { FreeSpace FreeSpace::cast(HeapObject o) { SLOW_DCHECK(!GetHeapFromWritableObject(o)->deserialization_complete() || - o->IsFreeSpace()); + o.IsFreeSpace()); return bit_cast(o); } diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h index f1f7bb56c5..38f5794646 100644 --- a/deps/v8/src/objects/free-space.h +++ b/deps/v8/src/objects/free-space.h @@ -6,7 +6,7 @@ #define V8_OBJECTS_FREE_SPACE_H_ #include "src/objects/heap-object.h" -#include "torque-generated/class-definitions-from-dsl.h" +#include "torque-generated/field-offsets-tq.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/function-kind.h b/deps/v8/src/objects/function-kind.h new file mode 100644 index 0000000000..4a1819813c --- /dev/null +++ b/deps/v8/src/objects/function-kind.h @@ -0,0 +1,194 @@ + +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_FUNCTION_KIND_H_ +#define V8_OBJECTS_FUNCTION_KIND_H_ + +#include "src/utils/utils.h" + +namespace v8 { +namespace internal { + +enum FunctionKind : uint8_t { + // BEGIN constructable functions + kNormalFunction, + kModule, + // BEGIN class constructors + // BEGIN base constructors + kBaseConstructor, + // BEGIN default constructors + kDefaultBaseConstructor, + // END base constructors + // BEGIN derived cosntructors + kDefaultDerivedConstructor, + // END default constructors + kDerivedConstructor, + // END derived costructors + // END class cosntructors + // END constructable functions. + // BEGIN accessors + kGetterFunction, + kSetterFunction, + // END accessors + // BEGIN arrow functions + kArrowFunction, + // BEGIN async functions + kAsyncArrowFunction, + // END arrow functions + kAsyncFunction, + // BEGIN concise methods 1 + kAsyncConciseMethod, + // BEGIN generators + kAsyncConciseGeneratorMethod, + // END concise methods 1 + kAsyncGeneratorFunction, + // END async functions + kGeneratorFunction, + // BEGIN concise methods 2 + kConciseGeneratorMethod, + // END generators + kConciseMethod, + kClassMembersInitializerFunction, + // END concise methods 2 + + kLastFunctionKind = kClassMembersInitializerFunction, +}; + +inline bool IsArrowFunction(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kArrowFunction, + FunctionKind::kAsyncArrowFunction); +} + +inline bool IsModule(FunctionKind kind) { + return kind == FunctionKind::kModule; +} + +inline bool IsAsyncGeneratorFunction(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kAsyncConciseGeneratorMethod, + FunctionKind::kAsyncGeneratorFunction); +} + +inline bool IsGeneratorFunction(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kAsyncConciseGeneratorMethod, + FunctionKind::kConciseGeneratorMethod); +} + +inline bool IsAsyncFunction(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kAsyncArrowFunction, + FunctionKind::kAsyncGeneratorFunction); +} + +inline bool IsResumableFunction(FunctionKind kind) { + return IsGeneratorFunction(kind) || IsAsyncFunction(kind) || IsModule(kind); +} + +inline bool IsConciseMethod(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kAsyncConciseMethod, + FunctionKind::kAsyncConciseGeneratorMethod) || + IsInRange(kind, FunctionKind::kConciseGeneratorMethod, + FunctionKind::kClassMembersInitializerFunction); +} + +inline bool IsStrictFunctionWithoutPrototype(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kGetterFunction, + FunctionKind::kAsyncArrowFunction) || + IsInRange(kind, FunctionKind::kAsyncConciseMethod, + FunctionKind::kAsyncConciseGeneratorMethod) || + IsInRange(kind, FunctionKind::kConciseGeneratorMethod, + FunctionKind::kClassMembersInitializerFunction); +} + +inline bool IsGetterFunction(FunctionKind kind) { + return kind == FunctionKind::kGetterFunction; +} + +inline bool IsSetterFunction(FunctionKind kind) { + return kind == FunctionKind::kSetterFunction; +} + +inline bool IsAccessorFunction(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kGetterFunction, + FunctionKind::kSetterFunction); +} + +inline bool IsDefaultConstructor(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kDefaultBaseConstructor, + FunctionKind::kDefaultDerivedConstructor); +} + +inline bool IsBaseConstructor(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kBaseConstructor, + FunctionKind::kDefaultBaseConstructor); +} + +inline bool IsDerivedConstructor(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kDefaultDerivedConstructor, + FunctionKind::kDerivedConstructor); +} + +inline bool IsClassConstructor(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kBaseConstructor, + FunctionKind::kDerivedConstructor); +} + +inline bool IsClassMembersInitializerFunction(FunctionKind kind) { + return kind == FunctionKind::kClassMembersInitializerFunction; +} + +inline bool IsConstructable(FunctionKind kind) { + return IsInRange(kind, FunctionKind::kNormalFunction, + FunctionKind::kDerivedConstructor); +} + +inline const char* FunctionKind2String(FunctionKind kind) { + switch (kind) { + case FunctionKind::kNormalFunction: + return "NormalFunction"; + case FunctionKind::kArrowFunction: + return "ArrowFunction"; + case FunctionKind::kGeneratorFunction: + return "GeneratorFunction"; + case FunctionKind::kConciseMethod: + return "ConciseMethod"; + case FunctionKind::kDerivedConstructor: + return "DerivedConstructor"; + case FunctionKind::kBaseConstructor: + return "BaseConstructor"; + case FunctionKind::kGetterFunction: + return "GetterFunction"; + case FunctionKind::kSetterFunction: + return "SetterFunction"; + case FunctionKind::kAsyncFunction: + return "AsyncFunction"; + case FunctionKind::kModule: + return "Module"; + case FunctionKind::kClassMembersInitializerFunction: + return "ClassMembersInitializerFunction"; + case FunctionKind::kDefaultBaseConstructor: + return "DefaultBaseConstructor"; + case FunctionKind::kDefaultDerivedConstructor: + return "DefaultDerivedConstructor"; + case FunctionKind::kAsyncArrowFunction: + return "AsyncArrowFunction"; + case FunctionKind::kAsyncConciseMethod: + return "AsyncConciseMethod"; + case FunctionKind::kConciseGeneratorMethod: + return "ConciseGeneratorMethod"; + case FunctionKind::kAsyncConciseGeneratorMethod: + return "AsyncConciseGeneratorMethod"; + case FunctionKind::kAsyncGeneratorFunction: + return "AsyncGeneratorFunction"; + } + UNREACHABLE(); +} + +inline std::ostream& operator<<(std::ostream& os, FunctionKind kind) { + return os << FunctionKind2String(kind); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_FUNCTION_KIND_H_ diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h index d65d9de083..77453721ae 100644 --- a/deps/v8/src/objects/hash-table-inl.h +++ b/deps/v8/src/objects/hash-table-inl.h @@ -8,10 +8,10 @@ #include "src/objects/hash-table.h" #include "src/heap/heap.h" -#include "src/objects-inl.h" #include "src/objects/fixed-array-inl.h" #include "src/objects/heap-object-inl.h" -#include "src/roots-inl.h" +#include "src/objects/objects-inl.h" +#include "src/roots/roots-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -193,7 +193,7 @@ bool ObjectHashSet::Has(Isolate* isolate, Handle key, int32_t hash) { bool ObjectHashSet::Has(Isolate* isolate, Handle key) { Object hash = key->GetHash(); - if (!hash->IsSmi()) return false; + if (!hash.IsSmi()) return false; return FindEntry(ReadOnlyRoots(isolate), key, Smi::ToInt(hash)) != kNotFound; } @@ -207,7 +207,7 @@ uint32_t ObjectHashTableShape::Hash(Isolate* isolate, Handle key) { uint32_t ObjectHashTableShape::HashForObject(ReadOnlyRoots roots, Object other) { - return Smi::ToInt(other->GetHash()); + return Smi::ToInt(other.GetHash()); } } // namespace internal diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h index 0c83d01b42..610dc9d28e 100644 --- a/deps/v8/src/objects/hash-table.h +++ b/deps/v8/src/objects/hash-table.h @@ -8,10 +8,10 @@ #include "src/base/compiler-specific.h" #include "src/base/export-template.h" #include "src/base/macros.h" -#include "src/globals.h" +#include "src/common/globals.h" #include "src/objects/fixed-array.h" #include "src/objects/smi.h" -#include "src/roots.h" +#include "src/roots/roots.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -150,7 +150,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable // Find entry for key otherwise return kNotFound. inline int FindEntry(ReadOnlyRoots roots, Key key, int32_t hash); - int FindEntry(Isolate* isolate, Key key); + inline int FindEntry(Isolate* isolate, Key key); // Rehashes the table in-place. void Rehash(ReadOnlyRoots roots); diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h index ad82296bce..3986e9146c 100644 --- a/deps/v8/src/objects/heap-number-inl.h +++ b/deps/v8/src/objects/heap-number-inl.h @@ -7,8 +7,8 @@ #include "src/objects/heap-number.h" -#include "src/objects-inl.h" #include "src/objects/heap-object-inl.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -23,30 +23,28 @@ OBJECT_CONSTRUCTORS_IMPL(MutableHeapNumber, HeapNumberBase) CAST_ACCESSOR(HeapNumber) CAST_ACCESSOR(MutableHeapNumber) -double HeapNumberBase::value() const { - return READ_DOUBLE_FIELD(*this, kValueOffset); -} +double HeapNumberBase::value() const { return ReadField(kValueOffset); } void HeapNumberBase::set_value(double value) { - WRITE_DOUBLE_FIELD(*this, kValueOffset, value); + WriteField(kValueOffset, value); } uint64_t HeapNumberBase::value_as_bits() const { - return READ_UINT64_FIELD(*this, kValueOffset); + // Bug(v8:8875): HeapNumber's double may be unaligned. + return ReadUnalignedValue(field_address(kValueOffset)); } void HeapNumberBase::set_value_as_bits(uint64_t bits) { - WRITE_UINT64_FIELD(*this, kValueOffset, bits); + WriteUnalignedValue(field_address(kValueOffset), bits); } int HeapNumberBase::get_exponent() { - return ((READ_INT_FIELD(*this, kExponentOffset) & kExponentMask) >> - kExponentShift) - + return ((ReadField(kExponentOffset) & kExponentMask) >> kExponentShift) - kExponentBias; } int HeapNumberBase::get_sign() { - return READ_INT_FIELD(*this, kExponentOffset) & kSignMask; + return ReadField(kExponentOffset) & kSignMask; } } // namespace internal diff --git a/deps/v8/src/objects/heap-object-inl.h b/deps/v8/src/objects/heap-object-inl.h index be97f8bb79..3d5deeff63 100644 --- a/deps/v8/src/objects/heap-object-inl.h +++ b/deps/v8/src/objects/heap-object-inl.h @@ -9,7 +9,7 @@ #include "src/heap/heap-write-barrier-inl.h" // TODO(jkummerow): Get rid of this by moving NROSO::GetIsolate elsewhere. -#include "src/isolate.h" +#include "src/execution/isolate.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -17,9 +17,6 @@ namespace v8 { namespace internal { -OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object) -CAST_ACCESSOR(HeapObject) - HeapObject::HeapObject(Address ptr, AllowInlineSmiStorage allow_smi) : Object(ptr) { SLOW_DCHECK( @@ -27,12 +24,6 @@ HeapObject::HeapObject(Address ptr, AllowInlineSmiStorage allow_smi) IsHeapObject()); } -// static -HeapObject HeapObject::FromAddress(Address address) { - DCHECK_TAG_ALIGNED(address); - return HeapObject(address + kHeapObjectTag); -} - // static Heap* NeverReadOnlySpaceObject::GetHeap(const HeapObject object) { return GetHeapFromWritableObject(object); diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h index f42dc05b81..9ca51bdda1 100644 --- a/deps/v8/src/objects/heap-object.h +++ b/deps/v8/src/objects/heap-object.h @@ -5,10 +5,10 @@ #ifndef V8_OBJECTS_HEAP_OBJECT_H_ #define V8_OBJECTS_HEAP_OBJECT_H_ -#include "src/globals.h" -#include "src/roots.h" +#include "src/common/globals.h" +#include "src/roots/roots.h" -#include "src/objects.h" +#include "src/objects/objects.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -85,7 +85,10 @@ class HeapObject : public Object { #undef DECL_STRUCT_PREDICATE // Converts an address to a HeapObject pointer. - static inline HeapObject FromAddress(Address address); + static inline HeapObject FromAddress(Address address) { + DCHECK_TAG_ALIGNED(address); + return HeapObject(address + kHeapObjectTag); + } // Returns the address of this HeapObject. inline Address address() const { return ptr() - kHeapObjectTag; } @@ -197,6 +200,9 @@ class HeapObject : public Object { OBJECT_CONSTRUCTORS(HeapObject, Object); }; +OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object) +CAST_ACCESSOR(HeapObject) + // Helper class for objects that can never be in RO space. class NeverReadOnlySpaceObject { public: diff --git a/deps/v8/src/objects/instance-type-inl.h b/deps/v8/src/objects/instance-type-inl.h index 5925c6aa92..2f867411f2 100644 --- a/deps/v8/src/objects/instance-type-inl.h +++ b/deps/v8/src/objects/instance-type-inl.h @@ -6,7 +6,7 @@ #define V8_OBJECTS_INSTANCE_TYPE_INL_H_ #include "src/objects/map-inl.h" -#include "src/utils.h" +#include "src/utils/utils.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -19,11 +19,6 @@ namespace InstanceTypeChecker { // Define type checkers for classes with single instance type. INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER) -#define TYPED_ARRAY_INSTANCE_TYPE_CHECKER(Type, type, TYPE, ctype) \ - INSTANCE_TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE) -TYPED_ARRAYS(TYPED_ARRAY_INSTANCE_TYPE_CHECKER) -#undef TYPED_ARRAY_INSTANCE_TYPE_CHECKER - #define STRUCT_INSTANCE_TYPE_CHECKER(TYPE, Name, name) \ INSTANCE_TYPE_CHECKER(Name, TYPE) STRUCT_LIST(STRUCT_INSTANCE_TYPE_CHECKER) @@ -40,8 +35,7 @@ INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE) V8_INLINE bool IsFixedArrayBase(InstanceType instance_type) { return IsFixedArray(instance_type) || IsFixedDoubleArray(instance_type) || - IsFixedTypedArrayBase(instance_type) || IsByteArray(instance_type) || - IsBytecodeArray(instance_type); + IsByteArray(instance_type) || IsBytecodeArray(instance_type); } V8_INLINE bool IsHeapObject(InstanceType instance_type) { return true; } @@ -69,11 +63,6 @@ V8_INLINE bool IsJSReceiver(InstanceType instance_type) { // pointer rather than looking up the instance type. INSTANCE_TYPE_CHECKERS(TYPE_CHECKER) -#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype) \ - TYPE_CHECKER(Fixed##Type##Array) -TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER) -#undef TYPED_ARRAY_TYPE_CHECKER - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h index edbc428a5d..559ed34784 100644 --- a/deps/v8/src/objects/instance-type.h +++ b/deps/v8/src/objects/instance-type.h @@ -5,8 +5,8 @@ #ifndef V8_OBJECTS_INSTANCE_TYPE_H_ #define V8_OBJECTS_INSTANCE_TYPE_H_ -#include "src/elements-kind.h" -#include "src/objects-definitions.h" +#include "src/objects/elements-kind.h" +#include "src/objects/objects-definitions.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -131,17 +131,6 @@ enum InstanceType : uint16_t { BYTE_ARRAY_TYPE, BYTECODE_ARRAY_TYPE, FREE_SPACE_TYPE, - FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE - FIXED_UINT8_ARRAY_TYPE, - FIXED_INT16_ARRAY_TYPE, - FIXED_UINT16_ARRAY_TYPE, - FIXED_INT32_ARRAY_TYPE, - FIXED_UINT32_ARRAY_TYPE, - FIXED_FLOAT32_ARRAY_TYPE, - FIXED_FLOAT64_ARRAY_TYPE, - FIXED_UINT8_CLAMPED_ARRAY_TYPE, - FIXED_BIGINT64_ARRAY_TYPE, - FIXED_BIGUINT64_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE FIXED_DOUBLE_ARRAY_TYPE, FEEDBACK_METADATA_TYPE, FILLER_TYPE, // LAST_DATA_TYPE @@ -168,14 +157,18 @@ enum InstanceType : uint16_t { PROMISE_REACTION_TYPE, PROTOTYPE_INFO_TYPE, SCRIPT_TYPE, + SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE, STACK_FRAME_INFO_TYPE, STACK_TRACE_FRAME_TYPE, + TEMPLATE_OBJECT_DESCRIPTION_TYPE, TUPLE2_TYPE, TUPLE3_TYPE, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, + WASM_CAPI_FUNCTION_DATA_TYPE, WASM_DEBUG_INFO_TYPE, WASM_EXCEPTION_TAG_TYPE, WASM_EXPORTED_FUNCTION_DATA_TYPE, + WASM_JS_FUNCTION_DATA_TYPE, CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE CALLBACK_TASK_TYPE, @@ -190,14 +183,14 @@ enum InstanceType : uint16_t { FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE OBJECT_BOILERPLATE_DESCRIPTION_TYPE, CLOSURE_FEEDBACK_CELL_ARRAY_TYPE, - HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE - ORDERED_HASH_MAP_TYPE, // FIRST_DICTIONARY_TYPE + HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE + ORDERED_HASH_MAP_TYPE, ORDERED_HASH_SET_TYPE, ORDERED_NAME_DICTIONARY_TYPE, NAME_DICTIONARY_TYPE, GLOBAL_DICTIONARY_TYPE, NUMBER_DICTIONARY_TYPE, - SIMPLE_NUMBER_DICTIONARY_TYPE, // LAST_DICTIONARY_TYPE + SIMPLE_NUMBER_DICTIONARY_TYPE, STRING_TABLE_TYPE, EPHEMERON_HASH_TABLE_TYPE, // LAST_HASH_TABLE_TYPE SCOPE_INFO_TYPE, @@ -330,9 +323,6 @@ enum InstanceType : uint16_t { // Boundaries for testing if given HeapObject is a subclass of HashTable FIRST_HASH_TABLE_TYPE = HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE = EPHEMERON_HASH_TABLE_TYPE, - // Boundaries for testing if given HeapObject is a subclass of Dictionary - FIRST_DICTIONARY_TYPE = ORDERED_HASH_MAP_TYPE, - LAST_DICTIONARY_TYPE = SIMPLE_NUMBER_DICTIONARY_TYPE, // Boundaries for testing if given HeapObject is a subclass of WeakFixedArray. FIRST_WEAK_FIXED_ARRAY_TYPE = WEAK_FIXED_ARRAY_TYPE, LAST_WEAK_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE, @@ -342,9 +332,6 @@ enum InstanceType : uint16_t { // Boundaries for testing if given HeapObject is a subclass of Microtask. FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE, LAST_MICROTASK_TYPE = FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE, - // Boundaries for testing for a fixed typed array. - FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE, - LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_BIGUINT64_ARRAY_TYPE, // Boundary for promotion to old space. LAST_DATA_TYPE = FILLER_TYPE, // Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy). @@ -385,6 +372,10 @@ STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType); STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType); STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType); +// Make sure it doesn't matter whether we sign-extend or zero-extend these +// values, because Torque treats InstanceType as signed. +STATIC_ASSERT(LAST_TYPE < 1 << 15); + V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, InstanceType instance_type); @@ -471,11 +462,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, V(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE) \ V(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE) \ V(SmallOrderedNameDictionary, SMALL_ORDERED_NAME_DICTIONARY_TYPE) \ - V(SourcePositionTableWithFrameCache, TUPLE2_TYPE) \ V(StoreHandler, STORE_HANDLER_TYPE) \ V(StringTable, STRING_TABLE_TYPE) \ V(Symbol, SYMBOL_TYPE) \ - V(TemplateObjectDescription, TUPLE2_TYPE) \ V(TransitionArray, TRANSITION_ARRAY_TYPE) \ V(UncompiledDataWithoutPreparseData, \ UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \ @@ -511,10 +500,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, #define INSTANCE_TYPE_CHECKERS_RANGE(V) \ V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE) \ - V(Dictionary, FIRST_DICTIONARY_TYPE, LAST_DICTIONARY_TYPE) \ V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE) \ - V(FixedTypedArrayBase, FIRST_FIXED_TYPED_ARRAY_TYPE, \ - LAST_FIXED_TYPED_ARRAY_TYPE) \ V(HashTable, FIRST_HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE) \ V(JSMapIterator, FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE) \ V(JSSetIterator, FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE) \ diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc index 8a43f36245..f2bc87ebac 100644 --- a/deps/v8/src/objects/intl-objects.cc +++ b/deps/v8/src/objects/intl-objects.cc @@ -13,24 +13,25 @@ #include #include -#include "src/api-inl.h" -#include "src/global-handles.h" +#include "src/api/api-inl.h" +#include "src/execution/isolate.h" +#include "src/handles/global-handles.h" #include "src/heap/factory.h" -#include "src/isolate.h" -#include "src/objects-inl.h" #include "src/objects/js-collator-inl.h" #include "src/objects/js-date-time-format-inl.h" #include "src/objects/js-locale-inl.h" #include "src/objects/js-number-format-inl.h" +#include "src/objects/objects-inl.h" +#include "src/objects/property-descriptor.h" #include "src/objects/string.h" -#include "src/property-descriptor.h" -#include "src/string-case.h" +#include "src/strings/string-case.h" #include "unicode/basictz.h" #include "unicode/brkiter.h" #include "unicode/calendar.h" #include "unicode/coll.h" #include "unicode/datefmt.h" #include "unicode/decimfmt.h" +#include "unicode/formattedvalue.h" #include "unicode/locid.h" #include "unicode/normalizer2.h" #include "unicode/numfmt.h" @@ -153,7 +154,7 @@ void ToUpperWithSharpS(const Vector& src, inline int FindFirstUpperOrNonAscii(String s, int length) { for (int index = 0; index < length; ++index) { - uint16_t ch = s->Get(index); + uint16_t ch = s.Get(index); if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) { return index; } @@ -168,11 +169,11 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat, if (flat.IsOneByte()) { if (!*dest) { dest->reset(NewArray(length)); - CopyChars(dest->get(), flat.ToOneByteVector().start(), length); + CopyChars(dest->get(), flat.ToOneByteVector().begin(), length); } return reinterpret_cast(dest->get()); } else { - return reinterpret_cast(flat.ToUC16Vector().start()); + return reinterpret_cast(flat.ToUC16Vector().begin()); } } @@ -192,15 +193,23 @@ const uint8_t* Intl::ToLatin1LowerTable() { return &kToLower[0]; } icu::UnicodeString Intl::ToICUUnicodeString(Isolate* isolate, Handle string) { - string = String::Flatten(isolate, string); - { - DisallowHeapAllocation no_gc; - std::unique_ptr sap; - return icu::UnicodeString( - GetUCharBufferFromFlat(string->GetFlatContent(no_gc), &sap, - string->length()), - string->length()); + DCHECK(string->IsFlat()); + DisallowHeapAllocation no_gc; + std::unique_ptr sap; + // Short one-byte strings can be expanded on the stack to avoid allocating a + // temporary buffer. + constexpr int kShortStringSize = 80; + UChar short_string_buffer[kShortStringSize]; + const UChar* uchar_buffer = nullptr; + const String::FlatContent& flat = string->GetFlatContent(no_gc); + int32_t length = string->length(); + if (flat.IsOneByte() && length <= kShortStringSize) { + CopyChars(short_string_buffer, flat.ToOneByteVector().begin(), length); + uchar_buffer = short_string_buffer; + } else { + uchar_buffer = GetUCharBufferFromFlat(flat, &sap, length); } + return icu::UnicodeString(uchar_buffer, length); } namespace { @@ -254,19 +263,19 @@ MaybeHandle LocaleConvertCase(Isolate* isolate, Handle s, // one-byte sliced string with a two-byte parent string. // Called from TF builtins. String Intl::ConvertOneByteToLower(String src, String dst) { - DCHECK_EQ(src->length(), dst->length()); - DCHECK(src->IsOneByteRepresentation()); - DCHECK(src->IsFlat()); - DCHECK(dst->IsSeqOneByteString()); + DCHECK_EQ(src.length(), dst.length()); + DCHECK(src.IsOneByteRepresentation()); + DCHECK(src.IsFlat()); + DCHECK(dst.IsSeqOneByteString()); DisallowHeapAllocation no_gc; - const int length = src->length(); - String::FlatContent src_flat = src->GetFlatContent(no_gc); - uint8_t* dst_data = SeqOneByteString::cast(dst)->GetChars(no_gc); + const int length = src.length(); + String::FlatContent src_flat = src.GetFlatContent(no_gc); + uint8_t* dst_data = SeqOneByteString::cast(dst).GetChars(no_gc); if (src_flat.IsOneByte()) { - const uint8_t* src_data = src_flat.ToOneByteVector().start(); + const uint8_t* src_data = src_flat.ToOneByteVector().begin(); bool has_changed_character = false; int index_to_first_unprocessed = @@ -288,7 +297,7 @@ String Intl::ConvertOneByteToLower(String src, String dst) { int index_to_first_unprocessed = FindFirstUpperOrNonAscii(src, length); if (index_to_first_unprocessed == length) return src; - const uint16_t* src_data = src_flat.ToUC16Vector().start(); + const uint16_t* src_data = src_flat.ToUC16Vector().begin(); CopyChars(dst_data, src_data, index_to_first_unprocessed); for (int index = index_to_first_unprocessed; index < length; ++index) { dst_data[index] = ToLatin1Lower(static_cast(src_data[index])); @@ -347,7 +356,7 @@ MaybeHandle Intl::ConvertToUpper(Isolate* isolate, Handle s) { bool has_changed_character = false; int index_to_first_unprocessed = FastAsciiConvert( reinterpret_cast(result->GetChars(no_gc)), - reinterpret_cast(src.start()), length, + reinterpret_cast(src.begin()), length, &has_changed_character); if (index_to_first_unprocessed == length) { return has_changed_character ? result : s; @@ -968,7 +977,7 @@ MaybeHandle Intl::StringLocaleCompare(Isolate* isolate, Handle constructor = Handle( JSFunction::cast( - isolate->context()->native_context()->intl_collator_function()), + isolate->context().native_context().intl_collator_function()), isolate); Handle collator; @@ -978,10 +987,9 @@ MaybeHandle Intl::StringLocaleCompare(Isolate* isolate, if (can_cache) { isolate->set_icu_object_in_cache( Isolate::ICUObjectCacheType::kDefaultCollator, - std::static_pointer_cast( - collator->icu_collator()->get())); + std::static_pointer_cast(collator->icu_collator().get())); } - icu::Collator* icu_collator = collator->icu_collator()->raw(); + icu::Collator* icu_collator = collator->icu_collator().raw(); return Intl::CompareStrings(isolate, *icu_collator, string1, string2); } @@ -992,6 +1000,21 @@ Handle Intl::CompareStrings(Isolate* isolate, Handle string2) { Factory* factory = isolate->factory(); + // Early return for identical strings. + if (string1.is_identical_to(string2)) { + return factory->NewNumberFromInt(UCollationResult::UCOL_EQUAL); + } + + // Early return for empty strings. + if (string1->length() == 0) { + return factory->NewNumberFromInt(string2->length() == 0 + ? UCollationResult::UCOL_EQUAL + : UCollationResult::UCOL_LESS); + } + if (string2->length() == 0) { + return factory->NewNumberFromInt(UCollationResult::UCOL_GREATER); + } + string1 = String::Flatten(isolate, string1); string2 = String::Flatten(isolate, string2); @@ -1025,9 +1048,10 @@ MaybeHandle Intl::NumberToLocaleString(Isolate* isolate, bool can_cache = locales->IsUndefined(isolate) && options->IsUndefined(isolate); if (can_cache) { - icu::NumberFormat* cached_number_format = - static_cast(isolate->get_cached_icu_object( - Isolate::ICUObjectCacheType::kDefaultNumberFormat)); + icu::number::LocalizedNumberFormatter* cached_number_format = + static_cast( + isolate->get_cached_icu_object( + Isolate::ICUObjectCacheType::kDefaultNumberFormat)); // We may use the cached icu::NumberFormat for a fast path. if (cached_number_format != nullptr) { return JSNumberFormat::FormatNumeric(isolate, *cached_number_format, @@ -1037,7 +1061,7 @@ MaybeHandle Intl::NumberToLocaleString(Isolate* isolate, Handle constructor = Handle( JSFunction::cast( - isolate->context()->native_context()->intl_number_format_function()), + isolate->context().native_context().intl_number_format_function()), isolate); Handle number_format; // 2. Let numberFormat be ? Construct(%NumberFormat%, « locales, options »). @@ -1048,13 +1072,13 @@ MaybeHandle Intl::NumberToLocaleString(Isolate* isolate, if (can_cache) { isolate->set_icu_object_in_cache( Isolate::ICUObjectCacheType::kDefaultNumberFormat, - std::static_pointer_cast( - number_format->icu_number_format()->get())); + std::static_pointer_cast( + number_format->icu_number_formatter().get())); } // Return FormatNumber(numberFormat, x). - icu::NumberFormat* icu_number_format = - number_format->icu_number_format()->raw(); + icu::number::LocalizedNumberFormatter* icu_number_format = + number_format->icu_number_formatter().raw(); return JSNumberFormat::FormatNumeric(isolate, *icu_number_format, numeric_obj); } @@ -1116,19 +1140,17 @@ Maybe GetNumberOption(Isolate* isolate, Handle options, } // namespace -Maybe Intl::SetNumberFormatDigitOptions(Isolate* isolate, - icu::DecimalFormat* number_format, - Handle options, - int mnfd_default, - int mxfd_default) { - CHECK_NOT_NULL(number_format); +Maybe Intl::SetNumberFormatDigitOptions( + Isolate* isolate, Handle options, int mnfd_default, + int mxfd_default) { + Intl::NumberFormatDigitOptions digit_options; // 5. Let mnid be ? GetNumberOption(options, "minimumIntegerDigits,", 1, 21, // 1). int mnid; if (!GetNumberOption(isolate, options, "minimumIntegerDigits", 1, 21, 1) .To(&mnid)) { - return Nothing(); + return Nothing(); } // 6. Let mnfd be ? GetNumberOption(options, "minimumFractionDigits", 0, 20, @@ -1137,7 +1159,7 @@ Maybe Intl::SetNumberFormatDigitOptions(Isolate* isolate, if (!GetNumberOption(isolate, options, "minimumFractionDigits", 0, 20, mnfd_default) .To(&mnfd)) { - return Nothing(); + return Nothing(); } // 7. Let mxfdActualDefault be max( mnfd, mxfdDefault ). @@ -1149,7 +1171,7 @@ Maybe Intl::SetNumberFormatDigitOptions(Isolate* isolate, if (!GetNumberOption(isolate, options, "maximumFractionDigits", mnfd, 20, mxfd_actual_default) .To(&mxfd)) { - return Nothing(); + return Nothing(); } // 9. Let mnsd be ? Get(options, "minimumSignificantDigits"). @@ -1158,7 +1180,7 @@ Maybe Intl::SetNumberFormatDigitOptions(Isolate* isolate, isolate->factory()->minimumSignificantDigits_string(); ASSIGN_RETURN_ON_EXCEPTION_VALUE( isolate, mnsd_obj, JSReceiver::GetProperty(isolate, options, mnsd_str), - Nothing()); + Nothing()); // 10. Let mxsd be ? Get(options, "maximumSignificantDigits"). Handle mxsd_obj; @@ -1166,45 +1188,43 @@ Maybe Intl::SetNumberFormatDigitOptions(Isolate* isolate, isolate->factory()->maximumSignificantDigits_string(); ASSIGN_RETURN_ON_EXCEPTION_VALUE( isolate, mxsd_obj, JSReceiver::GetProperty(isolate, options, mxsd_str), - Nothing()); + Nothing()); // 11. Set intlObj.[[MinimumIntegerDigits]] to mnid. - number_format->setMinimumIntegerDigits(mnid); + digit_options.minimum_integer_digits = mnid; // 12. Set intlObj.[[MinimumFractionDigits]] to mnfd. - number_format->setMinimumFractionDigits(mnfd); + digit_options.minimum_fraction_digits = mnfd; // 13. Set intlObj.[[MaximumFractionDigits]] to mxfd. - number_format->setMaximumFractionDigits(mxfd); + digit_options.maximum_fraction_digits = mxfd; - bool significant_digits_used = false; // 14. If mnsd is not undefined or mxsd is not undefined, then if (!mnsd_obj->IsUndefined(isolate) || !mxsd_obj->IsUndefined(isolate)) { // 14. a. Let mnsd be ? DefaultNumberOption(mnsd, 1, 21, 1). int mnsd; if (!DefaultNumberOption(isolate, mnsd_obj, 1, 21, 1, mnsd_str).To(&mnsd)) { - return Nothing(); + return Nothing(); } // 14. b. Let mxsd be ? DefaultNumberOption(mxsd, mnsd, 21, 21). int mxsd; if (!DefaultNumberOption(isolate, mxsd_obj, mnsd, 21, 21, mxsd_str) .To(&mxsd)) { - return Nothing(); + return Nothing(); } - significant_digits_used = true; - // 14. c. Set intlObj.[[MinimumSignificantDigits]] to mnsd. - number_format->setMinimumSignificantDigits(mnsd); + digit_options.minimum_significant_digits = mnsd; // 14. d. Set intlObj.[[MaximumSignificantDigits]] to mxsd. - number_format->setMaximumSignificantDigits(mxsd); + digit_options.maximum_significant_digits = mxsd; + } else { + digit_options.minimum_significant_digits = 0; + digit_options.maximum_significant_digits = 0; } - number_format->setSignificantDigitsUsed(significant_digits_used); - number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp); - return Just(true); + return Just(digit_options); } namespace { @@ -1447,36 +1467,47 @@ MaybeHandle Intl::SupportedLocalesOf( } namespace { + template bool IsValidExtension(const icu::Locale& locale, const char* key, const std::string& value) { + const char* legacy_type = uloc_toLegacyType(key, value.c_str()); + if (legacy_type == nullptr) { + return false; + } UErrorCode status = U_ZERO_ERROR; std::unique_ptr enumeration( T::getKeywordValuesForLocale(key, icu::Locale(locale.getBaseName()), false, status)); - if (U_SUCCESS(status)) { - int32_t length; - std::string legacy_type(uloc_toLegacyType(key, value.c_str())); - for (const char* item = enumeration->next(&length, status); item != nullptr; - item = enumeration->next(&length, status)) { - if (U_SUCCESS(status) && legacy_type == item) { - return true; - } + if (U_FAILURE(status)) { + return false; + } + int32_t length; + for (const char* item = enumeration->next(&length, status); + U_SUCCESS(status) && item != nullptr; + item = enumeration->next(&length, status)) { + if (strcmp(legacy_type, item) == 0) { + return true; } } return false; } -bool IsValidCalendar(const icu::Locale& locale, const std::string& value) { - return IsValidExtension(locale, "calendar", value); -} - bool IsValidCollation(const icu::Locale& locale, const std::string& value) { std::set invalid_values = {"standard", "search"}; if (invalid_values.find(value) != invalid_values.end()) return false; return IsValidExtension(locale, "collation", value); } +} // namespace + +bool Intl::IsValidCalendar(const icu::Locale& locale, + const std::string& value) { + return IsValidExtension(locale, "calendar", value); +} + +namespace { + bool IsValidNumberingSystem(const std::string& value) { std::set invalid_values = {"native", "traditio", "finance"}; if (invalid_values.find(value) != invalid_values.end()) return false; @@ -1527,7 +1558,7 @@ std::map LookupAndValidateUnicodeExtensions( bool is_valid_value = false; // 8.h.ii.1.a If keyLocaleData contains requestedValue, then if (strcmp("ca", bcp47_key) == 0) { - is_valid_value = IsValidCalendar(*icu_locale, bcp47_value); + is_valid_value = Intl::IsValidCalendar(*icu_locale, bcp47_value); } else if (strcmp("co", bcp47_key) == 0) { is_valid_value = IsValidCollation(*icu_locale, bcp47_value); } else if (strcmp("hc", bcp47_key) == 0) { @@ -1649,6 +1680,7 @@ Intl::ResolvedLocale Intl::ResolveLocale( Managed Intl::SetTextToBreakIterator( Isolate* isolate, Handle text, icu::BreakIterator* break_iterator) { + text = String::Flatten(isolate, text); icu::UnicodeString* u_text = (icu::UnicodeString*)(Intl::ToICUUnicodeString(isolate, text).clone()); @@ -1858,6 +1890,29 @@ Maybe Intl::GetLocaleMatcher(Isolate* isolate, Intl::MatcherOption::kLookup); } +Maybe Intl::GetNumberingSystem(Isolate* isolate, + Handle options, + const char* method, + std::unique_ptr* result) { + const std::vector empty_values = {}; + Maybe maybe = Intl::GetStringOption(isolate, options, "numberingSystem", + empty_values, method, result); + MAYBE_RETURN(maybe, Nothing()); + if (maybe.FromJust() && *result != nullptr) { + if (!IsValidNumberingSystem(result->get())) { + THROW_NEW_ERROR_RETURN_VALUE( + isolate, + NewRangeError( + MessageTemplate::kInvalid, + isolate->factory()->numberingSystem_string(), + isolate->factory()->NewStringFromAsciiChecked(result->get())), + Nothing()); + } + return Just(true); + } + return Just(false); +} + Intl::HourCycle Intl::ToHourCycle(const std::string& hc) { if (hc == "h11") return Intl::HourCycle::kH11; if (hc == "h12") return Intl::HourCycle::kH12; @@ -1928,11 +1983,27 @@ Handle Intl::NumberFieldToType(Isolate* isolate, UNREACHABLE(); return Handle(); + case UNUM_COMPACT_FIELD: + return isolate->factory()->compact_string(); + case UNUM_MEASURE_UNIT_FIELD: + return isolate->factory()->unit_string(); + default: UNREACHABLE(); return Handle(); } } +// A helper function to convert the FormattedValue for several Intl objects. +MaybeHandle Intl::FormattedToString( + Isolate* isolate, const icu::FormattedValue& formatted) { + UErrorCode status = U_ZERO_ERROR; + icu::UnicodeString result = formatted.toString(status); + if (U_FAILURE(status)) { + THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String); + } + return Intl::ToString(isolate, result); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h index 5adb6fa2c8..1274fa0549 100644 --- a/deps/v8/src/objects/intl-objects.h +++ b/deps/v8/src/objects/intl-objects.h @@ -14,9 +14,9 @@ #include #include "src/base/timezone-cache.h" -#include "src/contexts.h" -#include "src/objects.h" +#include "src/objects/contexts.h" #include "src/objects/managed.h" +#include "src/objects/objects.h" #include "unicode/locid.h" #include "unicode/uversion.h" @@ -25,10 +25,10 @@ namespace U_ICU_NAMESPACE { class BreakIterator; class Collator; -class DecimalFormat; +class FormattedValue; class SimpleDateFormat; class UnicodeString; -} +} // namespace U_ICU_NAMESPACE namespace v8 { namespace internal { @@ -171,9 +171,16 @@ class Intl { Handle options); // ecma402/#sec-setnfdigitoptions - V8_WARN_UNUSED_RESULT static Maybe SetNumberFormatDigitOptions( - Isolate* isolate, icu::DecimalFormat* number_format, - Handle options, int mnfd_default, int mxfd_default); + struct NumberFormatDigitOptions { + int minimum_integer_digits; + int minimum_fraction_digits; + int maximum_fraction_digits; + int minimum_significant_digits; + int maximum_significant_digits; + }; + V8_WARN_UNUSED_RESULT static Maybe + SetNumberFormatDigitOptions(Isolate* isolate, Handle options, + int mnfd_default, int mxfd_default); static icu::Locale CreateICULocale(const std::string& bcp47_locale); @@ -186,6 +193,10 @@ class Intl { Isolate* isolate, const icu::UnicodeString& string, int32_t begin, int32_t end); + // Helper function to convert a FormattedValue to String + V8_WARN_UNUSED_RESULT static MaybeHandle FormattedToString( + Isolate* isolate, const icu::FormattedValue& formatted); + // Helper function to convert number field id to type string. static Handle NumberFieldToType(Isolate* isolate, Handle numeric_obj, @@ -244,6 +255,15 @@ class Intl { V8_WARN_UNUSED_RESULT static Maybe GetLocaleMatcher( Isolate* isolate, Handle options, const char* method); + // Shared function to read the "numberingSystem" option. + V8_WARN_UNUSED_RESULT static Maybe GetNumberingSystem( + Isolate* isolate, Handle options, const char* method, + std::unique_ptr* result); + + // Check the calendar is valid or not for that locale. + static bool IsValidCalendar(const icu::Locale& locale, + const std::string& value); + struct ResolvedLocale { std::string locale; icu::Locale icu_locale; diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq new file mode 100644 index 0000000000..67d8537feb --- /dev/null +++ b/deps/v8/src/objects/intl-objects.tq @@ -0,0 +1,64 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/objects/js-number-format.h' +#include 'src/objects/js-objects.h' +#include 'src/objects/js-plural-rules.h' +#include 'src/objects/js-relative-time-format.h' +#include 'src/objects/js-date-time-format.h' +#include 'src/objects/js-list-format.h' +#include 'src/objects/js-locale.h' +#include 'src/objects/js-segment-iterator.h' +#include 'src/objects/js-segmenter.h' + +extern class JSDateTimeFormat extends JSObject { + icu_locale: Foreign; // Managed + icu_simple_date_format: Foreign; // Managed + icu_date_interval_format: Foreign; // Managed + bound_format: JSFunction | Undefined; + flags: Smi; +} + +extern class JSListFormat extends JSObject { + locale: String; + icu_formatter: Foreign; // Managed + flags: Smi; +} + +extern class JSNumberFormat extends JSObject { + locale: String; + icu_number_formatter: + Foreign; // Managed + bound_format: JSFunction | Undefined; + flags: Smi; +} + +extern class JSPluralRules extends JSObject { + locale: String; + flags: Smi; + icu_plural_rules: Foreign; // Managed + icu_decimal_format: Foreign; // Managed +} + +extern class JSRelativeTimeFormat extends JSObject { + locale: String; + icu_formatter: Foreign; // Managed + flags: Smi; +} + +extern class JSLocale extends JSObject { + icu_locale: Foreign; // Managed +} + +extern class JSSegmenter extends JSObject { + locale: String; + icu_break_iterator: Foreign; // Managed + flags: Smi; +} + +extern class JSSegmentIterator extends JSObject { + icu_break_iterator: Foreign; // Managed + unicode_string: Foreign; // Managed + flags: Smi; +} diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h index 39677093c2..061fec10f7 100644 --- a/deps/v8/src/objects/js-array-buffer-inl.h +++ b/deps/v8/src/objects/js-array-buffer-inl.h @@ -8,8 +8,8 @@ #include "src/objects/js-array-buffer.h" #include "src/heap/heap-write-barrier-inl.h" -#include "src/objects-inl.h" #include "src/objects/js-objects-inl.h" +#include "src/objects/objects-inl.h" #include "src/wasm/wasm-engine.h" // Has to be the last include (doesn't have include guards): @@ -29,21 +29,19 @@ CAST_ACCESSOR(JSTypedArray) CAST_ACCESSOR(JSDataView) size_t JSArrayBuffer::byte_length() const { - return READ_UINTPTR_FIELD(*this, kByteLengthOffset); + return ReadField(kByteLengthOffset); } void JSArrayBuffer::set_byte_length(size_t value) { - WRITE_UINTPTR_FIELD(*this, kByteLengthOffset, value); + WriteField(kByteLengthOffset, value); } void* JSArrayBuffer::backing_store() const { - intptr_t ptr = READ_INTPTR_FIELD(*this, kBackingStoreOffset); - return reinterpret_cast(ptr); + return reinterpret_cast(ReadField
(kBackingStoreOffset)); } void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) { - intptr_t ptr = reinterpret_cast(value); - WRITE_INTPTR_FIELD(*this, kBackingStoreOffset, ptr); + WriteField
(kBackingStoreOffset, reinterpret_cast
(value)); } size_t JSArrayBuffer::allocation_length() const { @@ -93,11 +91,11 @@ void JSArrayBuffer::clear_padding() { } void JSArrayBuffer::set_bit_field(uint32_t bits) { - WRITE_UINT32_FIELD(*this, kBitFieldOffset, bits); + WriteField(kBitFieldOffset, bits); } uint32_t JSArrayBuffer::bit_field() const { - return READ_UINT32_FIELD(*this, kBitFieldOffset); + return ReadField(kBitFieldOffset); } // |bit_field| fields. @@ -111,49 +109,58 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared, JSArrayBuffer::IsSharedBit) size_t JSArrayBufferView::byte_offset() const { - return READ_UINTPTR_FIELD(*this, kByteOffsetOffset); + return ReadField(kByteOffsetOffset); } void JSArrayBufferView::set_byte_offset(size_t value) { - WRITE_UINTPTR_FIELD(*this, kByteOffsetOffset, value); + WriteField(kByteOffsetOffset, value); } size_t JSArrayBufferView::byte_length() const { - return READ_UINTPTR_FIELD(*this, kByteLengthOffset); + return ReadField(kByteLengthOffset); } void JSArrayBufferView::set_byte_length(size_t value) { - WRITE_UINTPTR_FIELD(*this, kByteLengthOffset, value); + WriteField(kByteLengthOffset, value); } ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset) bool JSArrayBufferView::WasDetached() const { - return JSArrayBuffer::cast(buffer())->was_detached(); + return JSArrayBuffer::cast(buffer()).was_detached(); } -Object JSTypedArray::length() const { return READ_FIELD(*this, kLengthOffset); } +size_t JSTypedArray::length() const { return ReadField(kLengthOffset); } -size_t JSTypedArray::length_value() const { - double val = length()->Number(); - DCHECK_LE(val, kMaxSafeInteger); // 2^53-1 - DCHECK_GE(val, -kMaxSafeInteger); // -2^53+1 - DCHECK_LE(val, std::numeric_limits::max()); - DCHECK_GE(val, std::numeric_limits::min()); - return static_cast(val); +void JSTypedArray::set_length(size_t value) { + WriteField(kLengthOffset, value); } -void JSTypedArray::set_length(Object value, WriteBarrierMode mode) { - WRITE_FIELD(*this, kLengthOffset, value); - CONDITIONAL_WRITE_BARRIER(*this, kLengthOffset, value, mode); +void* JSTypedArray::external_pointer() const { + return reinterpret_cast(ReadField
(kExternalPointerOffset)); +} + +void JSTypedArray::set_external_pointer(void* value) { + WriteField
(kExternalPointerOffset, reinterpret_cast
(value)); +} + +ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset) + +void* JSTypedArray::DataPtr() { + return reinterpret_cast( + base_pointer().ptr() + reinterpret_cast(external_pointer())); } bool JSTypedArray::is_on_heap() const { DisallowHeapAllocation no_gc; // Checking that buffer()->backing_store() is not nullptr is not sufficient; // it will be nullptr when byte_length is 0 as well. - FixedTypedArrayBase fta = FixedTypedArrayBase::cast(elements()); - return fta->base_pointer()->ptr() == fta.ptr(); + return base_pointer().ptr() == elements().ptr(); +} + +// static +void* JSTypedArray::ExternalPointerForOnHeapArray() { + return reinterpret_cast(ByteArray::kHeaderSize - kHeapObjectTag); } // static @@ -178,9 +185,13 @@ MaybeHandle JSTypedArray::Validate(Isolate* isolate, return array; } -#ifdef VERIFY_HEAP -ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset) -#endif +void* JSDataView::data_pointer() const { + return reinterpret_cast(ReadField
(kDataPointerOffset)); +} + +void JSDataView::set_data_pointer(void* value) { + WriteField
(kDataPointerOffset, reinterpret_cast
(value)); +} } // namespace internal } // namespace v8 diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc index f96ae7e752..a506920f95 100644 --- a/deps/v8/src/objects/js-array-buffer.cc +++ b/deps/v8/src/objects/js-array-buffer.cc @@ -5,8 +5,8 @@ #include "src/objects/js-array-buffer.h" #include "src/objects/js-array-buffer-inl.h" -#include "src/counters.h" -#include "src/property-descriptor.h" +#include "src/logging/counters.h" +#include "src/objects/property-descriptor.h" namespace v8 { namespace internal { @@ -69,7 +69,7 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) { if (allocation.is_wasm_memory) { wasm::WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker(); - memory_tracker->FreeMemoryIfIsWasmMemory(isolate, allocation.backing_store); + memory_tracker->FreeWasmMemory(isolate, allocation.backing_store); } else { isolate->array_buffer_allocator()->Free(allocation.allocation_base, allocation.length); @@ -150,10 +150,7 @@ Handle JSTypedArray::MaterializeArrayBuffer( Isolate* isolate = typed_array->GetIsolate(); - DCHECK(IsFixedTypedArrayElementsKind(typed_array->GetElementsKind())); - - Handle fixed_typed_array( - FixedTypedArrayBase::cast(typed_array->elements()), isolate); + DCHECK(IsTypedArrayElementsKind(typed_array->GetElementsKind())); Handle buffer(JSArrayBuffer::cast(typed_array->buffer()), isolate); @@ -162,14 +159,13 @@ Handle JSTypedArray::MaterializeArrayBuffer( void* backing_store = isolate->array_buffer_allocator()->AllocateUninitialized( - fixed_typed_array->DataSize()); + typed_array->byte_length()); if (backing_store == nullptr) { isolate->heap()->FatalProcessOutOfMemory( "JSTypedArray::MaterializeArrayBuffer"); } buffer->set_is_external(false); - DCHECK_EQ(buffer->byte_length(), - static_cast(fixed_typed_array->DataSize())); + DCHECK_EQ(buffer->byte_length(), typed_array->byte_length()); // Initialize backing store at last to avoid handling of |JSArrayBuffers| that // are currently being constructed in the |ArrayBufferTracker|. The // registration method below handles the case of registering a buffer that has @@ -177,14 +173,12 @@ Handle JSTypedArray::MaterializeArrayBuffer( buffer->set_backing_store(backing_store); // RegisterNewArrayBuffer expects a valid length for adjusting counters. isolate->heap()->RegisterNewArrayBuffer(*buffer); - memcpy(buffer->backing_store(), fixed_typed_array->DataPtr(), - fixed_typed_array->DataSize()); - Handle new_elements = - isolate->factory()->NewFixedTypedArrayWithExternalPointer( - fixed_typed_array->length(), typed_array->type(), - static_cast(buffer->backing_store())); - - typed_array->set_elements(*new_elements); + memcpy(buffer->backing_store(), typed_array->DataPtr(), + typed_array->byte_length()); + + typed_array->set_elements(ReadOnlyRoots(isolate).empty_byte_array()); + typed_array->set_external_pointer(backing_store); + typed_array->set_base_pointer(Smi::kZero); DCHECK(!typed_array->is_on_heap()); return buffer; @@ -226,7 +220,7 @@ Maybe JSTypedArray::DefineOwnProperty(Isolate* isolate, NewTypeError(MessageTemplate::kInvalidTypedArrayIndex)); } // 3b iv. Let length be O.[[ArrayLength]]. - size_t length = o->length_value(); + size_t length = o->length(); // 3b v. If numericIndex ≥ length, return false. if (o->WasDetached() || index >= length) { RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw), @@ -271,13 +265,13 @@ Maybe JSTypedArray::DefineOwnProperty(Isolate* isolate, } ExternalArrayType JSTypedArray::type() { - switch (elements()->map()->instance_type()) { -#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype) \ - case FIXED_##TYPE##_ARRAY_TYPE: \ + switch (map().elements_kind()) { +#define ELEMENTS_KIND_TO_ARRAY_TYPE(Type, type, TYPE, ctype) \ + case TYPE##_ELEMENTS: \ return kExternal##Type##Array; - TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE) -#undef INSTANCE_TYPE_TO_ARRAY_TYPE + TYPED_ARRAYS(ELEMENTS_KIND_TO_ARRAY_TYPE) +#undef ELEMENTS_KIND_TO_ARRAY_TYPE default: UNREACHABLE(); @@ -285,13 +279,13 @@ ExternalArrayType JSTypedArray::type() { } size_t JSTypedArray::element_size() { - switch (elements()->map()->instance_type()) { -#define INSTANCE_TYPE_TO_ELEMENT_SIZE(Type, type, TYPE, ctype) \ - case FIXED_##TYPE##_ARRAY_TYPE: \ + switch (map().elements_kind()) { +#define ELEMENTS_KIND_TO_ELEMENT_SIZE(Type, type, TYPE, ctype) \ + case TYPE##_ELEMENTS: \ return sizeof(ctype); - TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENT_SIZE) -#undef INSTANCE_TYPE_TO_ELEMENT_SIZE + TYPED_ARRAYS(ELEMENTS_KIND_TO_ELEMENT_SIZE) +#undef ELEMENTS_KIND_TO_ELEMENT_SIZE default: UNREACHABLE(); diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h index b77d1c9877..b22001f04a 100644 --- a/deps/v8/src/objects/js-array-buffer.h +++ b/deps/v8/src/objects/js-array-buffer.h @@ -172,16 +172,26 @@ class JSArrayBufferView : public JSObject { JS_ARRAY_BUFFER_VIEW_FIELDS) #undef JS_ARRAY_BUFFER_VIEW_FIELDS - class BodyDescriptor; + STATIC_ASSERT(IsAligned(kByteOffsetOffset, kUIntptrSize)); + STATIC_ASSERT(IsAligned(kByteLengthOffset, kUIntptrSize)); OBJECT_CONSTRUCTORS(JSArrayBufferView, JSObject); }; class JSTypedArray : public JSArrayBufferView { public: + // TODO(v8:4153): This should be equal to JSArrayBuffer::kMaxByteLength + // eventually. + static constexpr size_t kMaxLength = v8::TypedArray::kMaxLength; + // [length]: length of typed array in elements. - DECL_ACCESSORS(length, Object) - inline size_t length_value() const; + DECL_PRIMITIVE_ACCESSORS(length, size_t) + + // [external_pointer]: TODO(v8:4153) + DECL_PRIMITIVE_ACCESSORS(external_pointer, void*) + + // [base_pointer]: TODO(v8:4153) + DECL_ACCESSORS(base_pointer, Object) // ES6 9.4.5.3 V8_WARN_UNUSED_RESULT static Maybe DefineOwnProperty( @@ -195,9 +205,14 @@ class JSTypedArray : public JSArrayBufferView { V8_EXPORT_PRIVATE Handle GetBuffer(); + // Use with care: returns raw pointer into heap. + inline void* DataPtr(); + // Whether the buffer's backing store is on-heap or off-heap. inline bool is_on_heap() const; + static inline void* ExternalPointerForOnHeapArray(); + static inline MaybeHandle Validate(Isolate* isolate, Handle receiver, const char* method_name); @@ -207,32 +222,39 @@ class JSTypedArray : public JSArrayBufferView { DECL_VERIFIER(JSTypedArray) // Layout description. -#define JS_TYPED_ARRAY_FIELDS(V) \ - /* Raw data fields. */ \ - V(kLengthOffset, kTaggedSize) \ - /* Header size. */ \ +#define JS_TYPED_ARRAY_FIELDS(V) \ + /* Raw data fields. */ \ + V(kLengthOffset, kUIntptrSize) \ + V(kExternalPointerOffset, kSystemPointerSize) \ + V(kBasePointerOffset, kTaggedSize) \ + /* Header size. */ \ V(kHeaderSize, 0) DEFINE_FIELD_OFFSET_CONSTANTS(JSArrayBufferView::kHeaderSize, JS_TYPED_ARRAY_FIELDS) #undef JS_TYPED_ARRAY_FIELDS + STATIC_ASSERT(IsAligned(kLengthOffset, kUIntptrSize)); + STATIC_ASSERT(IsAligned(kExternalPointerOffset, kSystemPointerSize)); + static const int kSizeWithEmbedderFields = kHeaderSize + v8::ArrayBufferView::kEmbedderFieldCount * kEmbedderDataSlotSize; + class BodyDescriptor; + private: static Handle MaterializeArrayBuffer( Handle typed_array); -#ifdef VERIFY_HEAP - DECL_ACCESSORS(raw_length, Object) -#endif OBJECT_CONSTRUCTORS(JSTypedArray, JSArrayBufferView); }; class JSDataView : public JSArrayBufferView { public: + // [data_pointer]: pointer to the actual data. + DECL_PRIMITIVE_ACCESSORS(data_pointer, void*) + DECL_CAST(JSDataView) // Dispatched behavior. @@ -240,10 +262,24 @@ class JSDataView : public JSArrayBufferView { DECL_VERIFIER(JSDataView) // Layout description. +#define JS_DATA_VIEW_FIELDS(V) \ + /* Raw data fields. */ \ + V(kDataPointerOffset, kIntptrSize) \ + /* Header size. */ \ + V(kHeaderSize, 0) + + DEFINE_FIELD_OFFSET_CONSTANTS(JSArrayBufferView::kHeaderSize, + JS_DATA_VIEW_FIELDS) +#undef JS_DATA_VIEW_FIELDS + + STATIC_ASSERT(IsAligned(kDataPointerOffset, kUIntptrSize)); + static const int kSizeWithEmbedderFields = kHeaderSize + v8::ArrayBufferView::kEmbedderFieldCount * kEmbedderDataSlotSize; + class BodyDescriptor; + OBJECT_CONSTRUCTORS(JSDataView, JSArrayBufferView); }; diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h index 31c8735f62..335fabba86 100644 --- a/deps/v8/src/objects/js-array-inl.h +++ b/deps/v8/src/objects/js-array-inl.h @@ -7,7 +7,7 @@ #include "src/objects/js-array.h" -#include "src/objects-inl.h" // Needed for write barriers +#include "src/objects/objects-inl.h" // Needed for write barriers // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -33,8 +33,8 @@ bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) { } bool JSArray::AllowsSetLength() { - bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray(); - DCHECK(result == !HasFixedTypedArrayElements()); + bool result = elements().IsFixedArray() || elements().IsFixedDoubleArray(); + DCHECK(result == !HasTypedArrayElements()); return result; } @@ -55,7 +55,7 @@ void JSArray::SetContent(Handle array, } bool JSArray::HasArrayPrototype(Isolate* isolate) { - return map()->prototype() == *isolate->initial_array_prototype(); + return map().prototype() == *isolate->initial_array_prototype(); } ACCESSORS(JSArrayIterator, iterated_object, Object, kIteratedObjectOffset) @@ -63,7 +63,7 @@ ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset) IterationKind JSArrayIterator::kind() const { return static_cast( - Smi::cast(READ_FIELD(*this, kKindOffset))->value()); + Smi::cast(READ_FIELD(*this, kKindOffset)).value()); } void JSArrayIterator::set_kind(IterationKind kind) { diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h index 23d62c810e..4bc296e31e 100644 --- a/deps/v8/src/objects/js-array.h +++ b/deps/v8/src/objects/js-array.h @@ -8,7 +8,7 @@ #include "src/objects/allocation-site.h" #include "src/objects/fixed-array.h" #include "src/objects/js-objects.h" -#include "torque-generated/class-definitions-from-dsl.h" +#include "torque-generated/field-offsets-tq.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h index 7c22be25f6..177d9d352b 100644 --- a/deps/v8/src/objects/js-break-iterator-inl.h +++ b/deps/v8/src/objects/js-break-iterator-inl.h @@ -9,8 +9,8 @@ #ifndef V8_OBJECTS_JS_BREAK_ITERATOR_INL_H_ #define V8_OBJECTS_JS_BREAK_ITERATOR_INL_H_ -#include "src/objects-inl.h" #include "src/objects/js-break-iterator.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc index 4516b34aac..4879fb41a4 100644 --- a/deps/v8/src/objects/js-break-iterator.cc +++ b/deps/v8/src/objects/js-break-iterator.cc @@ -124,7 +124,7 @@ void JSV8BreakIterator::AdoptText( Isolate* isolate, Handle break_iterator_holder, Handle text) { icu::BreakIterator* break_iterator = - break_iterator_holder->break_iterator()->raw(); + break_iterator_holder->break_iterator().raw(); CHECK_NOT_NULL(break_iterator); Managed unicode_string = Intl::SetTextToBreakIterator(isolate, text, break_iterator); @@ -149,24 +149,24 @@ Handle JSV8BreakIterator::TypeAsString() const { Handle JSV8BreakIterator::Current( Isolate* isolate, Handle break_iterator) { return isolate->factory()->NewNumberFromInt( - break_iterator->break_iterator()->raw()->current()); + break_iterator->break_iterator().raw()->current()); } Handle JSV8BreakIterator::First( Isolate* isolate, Handle break_iterator) { return isolate->factory()->NewNumberFromInt( - break_iterator->break_iterator()->raw()->first()); + break_iterator->break_iterator().raw()->first()); } Handle JSV8BreakIterator::Next( Isolate* isolate, Handle break_iterator) { return isolate->factory()->NewNumberFromInt( - break_iterator->break_iterator()->raw()->next()); + break_iterator->break_iterator().raw()->next()); } String JSV8BreakIterator::BreakType(Isolate* isolate, Handle break_iterator) { - int32_t status = break_iterator->break_iterator()->raw()->getRuleStatus(); + int32_t status = break_iterator->break_iterator().raw()->getRuleStatus(); // Keep return values in sync with JavaScript BreakType enum. if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) { return ReadOnlyRoots(isolate).none_string(); diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h index 3eff347485..fe94c177c4 100644 --- a/deps/v8/src/objects/js-break-iterator.h +++ b/deps/v8/src/objects/js-break-iterator.h @@ -12,9 +12,9 @@ #include #include -#include "src/objects.h" #include "src/objects/intl-objects.h" #include "src/objects/managed.h" +#include "src/objects/objects.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h index b6fa239c31..e82351993d 100644 --- a/deps/v8/src/objects/js-collator-inl.h +++ b/deps/v8/src/objects/js-collator-inl.h @@ -9,8 +9,8 @@ #ifndef V8_OBJECTS_JS_COLLATOR_INL_H_ #define V8_OBJECTS_JS_COLLATOR_INL_H_ -#include "src/objects-inl.h" #include "src/objects/js-collator.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc index b75468c6f3..4a1e857403 100644 --- a/deps/v8/src/objects/js-collator.cc +++ b/deps/v8/src/objects/js-collator.cc @@ -8,9 +8,9 @@ #include "src/objects/js-collator.h" -#include "src/isolate.h" -#include "src/objects-inl.h" +#include "src/execution/isolate.h" #include "src/objects/js-collator-inl.h" +#include "src/objects/objects-inl.h" #include "unicode/coll.h" #include "unicode/locid.h" #include "unicode/strenum.h" @@ -68,7 +68,7 @@ Handle JSCollator::ResolvedOptions(Isolate* isolate, Handle options = isolate->factory()->NewJSObject(isolate->object_function()); - icu::Collator* icu_collator = collator->icu_collator()->raw(); + icu::Collator* icu_collator = collator->icu_collator().raw(); CHECK_NOT_NULL(icu_collator); UErrorCode status = U_ZERO_ERROR; diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h index e5d223aa24..2bedbf811a 100644 --- a/deps/v8/src/objects/js-collator.h +++ b/deps/v8/src/objects/js-collator.h @@ -12,12 +12,12 @@ #include #include +#include "src/execution/isolate.h" #include "src/heap/factory.h" -#include "src/isolate.h" -#include "src/objects.h" #include "src/objects/intl-objects.h" #include "src/objects/js-objects.h" #include "src/objects/managed.h" +#include "src/objects/objects.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h index 78b6cc5db3..fb0cf1652e 100644 --- a/deps/v8/src/objects/js-collection-inl.h +++ b/deps/v8/src/objects/js-collection-inl.h @@ -8,10 +8,10 @@ #include "src/objects/js-collection.h" #include "src/heap/heap-write-barrier-inl.h" -#include "src/objects-inl.h" #include "src/objects/heap-object-inl.h" +#include "src/objects/objects-inl.h" #include "src/objects/ordered-hash-table-inl.h" -#include "src/roots-inl.h" +#include "src/roots/roots-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -63,8 +63,8 @@ CAST_ACCESSOR(JSWeakSet) Object JSMapIterator::CurrentValue() { OrderedHashMap table = OrderedHashMap::cast(this->table()); int index = Smi::ToInt(this->index()); - Object value = table->ValueAt(index); - DCHECK(!value->IsTheHole()); + Object value = table.ValueAt(index); + DCHECK(!value.IsTheHole()); return value; } diff --git a/deps/v8/src/objects/js-collection-iterator.h b/deps/v8/src/objects/js-collection-iterator.h index f25753738b..4952f04a72 100644 --- a/deps/v8/src/objects/js-collection-iterator.h +++ b/deps/v8/src/objects/js-collection-iterator.h @@ -5,9 +5,9 @@ #ifndef V8_OBJECTS_JS_COLLECTION_ITERATOR_H_ #define V8_OBJECTS_JS_COLLECTION_ITERATOR_H_ -#include "src/globals.h" -#include "src/objects.h" +#include "src/common/globals.h" #include "src/objects/js-objects.h" +#include "src/objects/objects.h" #include "src/objects/smi.h" // Has to be the last include (doesn't have include guards): diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h index 0450de8fb1..6dfde352ca 100644 --- a/deps/v8/src/objects/js-collection.h +++ b/deps/v8/src/objects/js-collection.h @@ -5,8 +5,8 @@ #ifndef V8_OBJECTS_JS_COLLECTION_H_ #define V8_OBJECTS_JS_COLLECTION_H_ -#include "src/objects.h" #include "src/objects/js-collection-iterator.h" +#include "src/objects/objects.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -44,6 +44,8 @@ class JSSet : public JSCollection { // Dispatched behavior. DECL_PRINTER(JSSet) DECL_VERIFIER(JSSet) + DEFINE_FIELD_OFFSET_CONSTANTS(JSCollection::kHeaderSize, + TORQUE_GENERATED_JSWEAK_SET_FIELDS) OBJECT_CONSTRUCTORS(JSSet, JSCollection); }; @@ -72,6 +74,8 @@ class JSMap : public JSCollection { // Dispatched behavior. DECL_PRINTER(JSMap) DECL_VERIFIER(JSMap) + DEFINE_FIELD_OFFSET_CONSTANTS(JSCollection::kHeaderSize, + TORQUE_GENERATED_JSWEAK_MAP_FIELDS) OBJECT_CONSTRUCTORS(JSMap, JSCollection); }; @@ -121,6 +125,8 @@ class JSWeakCollection : public JSObject { // Visit the whole object. using BodyDescriptor = BodyDescriptorImpl; + static const int kSizeOfAllWeakCollections = kHeaderSize; + OBJECT_CONSTRUCTORS(JSWeakCollection, JSObject); }; @@ -133,6 +139,9 @@ class JSWeakMap : public JSWeakCollection { DECL_PRINTER(JSWeakMap) DECL_VERIFIER(JSWeakMap) + DEFINE_FIELD_OFFSET_CONSTANTS(JSWeakCollection::kHeaderSize, + TORQUE_GENERATED_JSWEAK_MAP_FIELDS) + STATIC_ASSERT(kSize == kSizeOfAllWeakCollections); OBJECT_CONSTRUCTORS(JSWeakMap, JSWeakCollection); }; @@ -144,6 +153,9 @@ class JSWeakSet : public JSWeakCollection { // Dispatched behavior. DECL_PRINTER(JSWeakSet) DECL_VERIFIER(JSWeakSet) + DEFINE_FIELD_OFFSET_CONSTANTS(JSWeakCollection::kHeaderSize, + TORQUE_GENERATED_JSWEAK_SET_FIELDS) + STATIC_ASSERT(kSize == kSizeOfAllWeakCollections); OBJECT_CONSTRUCTORS(JSWeakSet, JSWeakCollection); }; diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h index 1657241b07..8947ea7b4c 100644 --- a/deps/v8/src/objects/js-date-time-format-inl.h +++ b/deps/v8/src/objects/js-date-time-format-inl.h @@ -9,8 +9,8 @@ #ifndef V8_OBJECTS_JS_DATE_TIME_FORMAT_INL_H_ #define V8_OBJECTS_JS_DATE_TIME_FORMAT_INL_H_ -#include "src/objects-inl.h" #include "src/objects/js-date-time-format.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -20,11 +20,11 @@ namespace internal { OBJECT_CONSTRUCTORS_IMPL(JSDateTimeFormat, JSObject) -ACCESSORS(JSDateTimeFormat, icu_locale, Managed, kICULocaleOffset) +ACCESSORS(JSDateTimeFormat, icu_locale, Managed, kIcuLocaleOffset) ACCESSORS(JSDateTimeFormat, icu_simple_date_format, - Managed, kICUSimpleDateFormatOffset) + Managed, kIcuSimpleDateFormatOffset) ACCESSORS(JSDateTimeFormat, icu_date_interval_format, - Managed, kICUDateIntervalFormatOffset) + Managed, kIcuDateIntervalFormatOffset) ACCESSORS(JSDateTimeFormat, bound_format, Object, kBoundFormatOffset) SMI_ACCESSORS(JSDateTimeFormat, flags, kFlagsOffset) diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc index eda95f8773..8730e0a39b 100644 --- a/deps/v8/src/objects/js-date-time-format.cc +++ b/deps/v8/src/objects/js-date-time-format.cc @@ -12,9 +12,9 @@ #include #include -#include "src/date.h" +#include "src/date/date.h" +#include "src/execution/isolate.h" #include "src/heap/factory.h" -#include "src/isolate.h" #include "src/objects/intl-objects.h" #include "src/objects/js-date-time-format-inl.h" @@ -360,8 +360,8 @@ MaybeHandle JSDateTimeFormat::ResolvedOptions( Handle resolved_obj; CHECK(!date_time_format->icu_locale().is_null()); - CHECK_NOT_NULL(date_time_format->icu_locale()->raw()); - icu::Locale* icu_locale = date_time_format->icu_locale()->raw(); + CHECK_NOT_NULL(date_time_format->icu_locale().raw()); + icu::Locale* icu_locale = date_time_format->icu_locale().raw(); Maybe maybe_locale_str = Intl::ToLanguageTag(*icu_locale); MAYBE_RETURN(maybe_locale_str, MaybeHandle()); std::string locale_str = maybe_locale_str.FromJust(); @@ -369,7 +369,7 @@ MaybeHandle JSDateTimeFormat::ResolvedOptions( factory->NewStringFromAsciiChecked(locale_str.c_str()); icu::SimpleDateFormat* icu_simple_date_format = - date_time_format->icu_simple_date_format()->raw(); + date_time_format->icu_simple_date_format().raw(); // calendar const icu::Calendar* calendar = icu_simple_date_format->getCalendar(); // getType() returns legacy calendar type name instead of LDML/BCP47 calendar @@ -580,7 +580,7 @@ MaybeHandle JSDateTimeFormat::DateTimeFormat( } // 5. Return FormatDateTime(dtf, x). icu::SimpleDateFormat* format = - date_time_format->icu_simple_date_format()->raw(); + date_time_format->icu_simple_date_format().raw(); return FormatDateTime(isolate, *format, x); } @@ -612,7 +612,7 @@ MaybeHandle JSDateTimeFormat::ToLocaleDateTime( String); } - double const x = Handle::cast(date)->value()->Number(); + double const x = Handle::cast(date)->value().Number(); // 2. If x is NaN, return "Invalid Date" if (std::isnan(x)) { return factory->Invalid_Date_string(); @@ -640,9 +640,8 @@ MaybeHandle JSDateTimeFormat::ToLocaleDateTime( // 4. Let dateFormat be ? Construct(%DateTimeFormat%, « locales, options »). Handle constructor = Handle( - JSFunction::cast(isolate->context() - ->native_context() - ->intl_date_time_format_function()), + JSFunction::cast( + isolate->context().native_context().intl_date_time_format_function()), isolate); Handle obj; ASSIGN_RETURN_ON_EXCEPTION( @@ -658,12 +657,12 @@ MaybeHandle JSDateTimeFormat::ToLocaleDateTime( if (can_cache) { isolate->set_icu_object_in_cache( - cache_type, std::static_pointer_cast( - date_time_format->icu_simple_date_format()->get())); + cache_type, std::static_pointer_cast( + date_time_format->icu_simple_date_format().get())); } // 5. Return FormatDateTime(dateFormat, x). icu::SimpleDateFormat* format = - date_time_format->icu_simple_date_format()->raw(); + date_time_format->icu_simple_date_format().raw(); return FormatDateTime(isolate, *format, x); } @@ -779,7 +778,7 @@ MaybeHandle JSDateTimeFormat::ToDateTimeOptions( MaybeHandle JSDateTimeFormat::UnwrapDateTimeFormat( Isolate* isolate, Handle format_holder) { Handle native_context = - Handle(isolate->context()->native_context(), isolate); + Handle(isolate->context().native_context(), isolate); Handle constructor = Handle( JSFunction::cast(native_context->intl_date_time_format_function()), isolate); @@ -959,14 +958,41 @@ std::unique_ptr CreateICUDateFormatFromCache( cache.Pointer()->Create(icu_locale, skeleton, generator)); } -std::unique_ptr CreateICUDateIntervalFormat( - const icu::Locale& icu_locale, const icu::UnicodeString& skeleton) { +icu::UnicodeString SkeletonFromDateFormat( + const icu::SimpleDateFormat& icu_date_format) { + icu::UnicodeString pattern; + pattern = icu_date_format.toPattern(pattern); + + UErrorCode status = U_ZERO_ERROR; + icu::UnicodeString skeleton = + icu::DateTimePatternGenerator::staticGetSkeleton(pattern, status); + CHECK(U_SUCCESS(status)); + return skeleton; +} + +icu::DateIntervalFormat* LazyCreateDateIntervalFormat( + Isolate* isolate, Handle date_time_format) { + Managed managed_format = + date_time_format->icu_date_interval_format(); + if (managed_format.get()) { + return managed_format.raw(); + } + icu::SimpleDateFormat* icu_simple_date_format = + date_time_format->icu_simple_date_format().raw(); UErrorCode status = U_ZERO_ERROR; std::unique_ptr date_interval_format( - icu::DateIntervalFormat::createInstance(skeleton, icu_locale, status)); - if (U_FAILURE(status)) return std::unique_ptr(); - CHECK_NOT_NULL(date_interval_format.get()); - return date_interval_format; + icu::DateIntervalFormat::createInstance( + SkeletonFromDateFormat(*icu_simple_date_format), + *(date_time_format->icu_locale().raw()), status)); + if (U_FAILURE(status)) { + return nullptr; + } + date_interval_format->setTimeZone(icu_simple_date_format->getTimeZone()); + Handle> managed_interval_format = + Managed::FromUniquePtr( + isolate, 0, std::move(date_interval_format)); + date_time_format->set_icu_date_interval_format(*managed_interval_format); + return (*managed_interval_format).raw(); } Intl::HourCycle HourCycleFromPattern(const icu::UnicodeString pattern) { @@ -1103,18 +1129,6 @@ std::unique_ptr DateTimeStylePattern( generator); } -icu::UnicodeString SkeletonFromDateFormat( - const icu::SimpleDateFormat& icu_date_format) { - icu::UnicodeString pattern; - pattern = icu_date_format.toPattern(pattern); - - UErrorCode status = U_ZERO_ERROR; - icu::UnicodeString skeleton = - icu::DateTimePatternGenerator::staticGetSkeleton(pattern, status); - CHECK(U_SUCCESS(status)); - return skeleton; -} - class DateTimePatternGeneratorCache { public: // Return a clone copy that the caller have to free. @@ -1146,6 +1160,7 @@ MaybeHandle JSDateTimeFormat::Initialize( Isolate* isolate, Handle date_time_format, Handle locales, Handle input_options) { date_time_format->set_flags(0); + Factory* factory = isolate->factory(); // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales). Maybe> maybe_requested_locales = Intl::CanonicalizeLocaleList(isolate, locales); @@ -1163,6 +1178,36 @@ MaybeHandle JSDateTimeFormat::Initialize( // 4. Let matcher be ? GetOption(options, "localeMatcher", "string", // « "lookup", "best fit" », "best fit"). // 5. Set opt.[[localeMatcher]] to matcher. + + std::unique_ptr calendar_str = nullptr; + std::unique_ptr numbering_system_str = nullptr; + if (FLAG_harmony_intl_add_calendar_numbering_system) { + const std::vector empty_values = {}; + // 6. Let calendar be ? GetOption(options, "calendar", + // "string", undefined, undefined). + Maybe maybe_calendar = + Intl::GetStringOption(isolate, options, "calendar", empty_values, + "Intl.NumberFormat", &calendar_str); + MAYBE_RETURN(maybe_calendar, MaybeHandle()); + if (maybe_calendar.FromJust() && calendar_str != nullptr) { + icu::Locale default_locale; + if (!Intl::IsValidCalendar(default_locale, calendar_str.get())) { + THROW_NEW_ERROR( + isolate, + NewRangeError( + MessageTemplate::kInvalid, factory->calendar_string(), + factory->NewStringFromAsciiChecked(calendar_str.get())), + JSDateTimeFormat); + } + } + + // 8. Let numberingSystem be ? GetOption(options, "numberingSystem", + // "string", undefined, undefined). + Maybe maybe_numberingSystem = Intl::GetNumberingSystem( + isolate, options, "Intl.NumberFormat", &numbering_system_str); + MAYBE_RETURN(maybe_numberingSystem, MaybeHandle()); + } + Maybe maybe_locale_matcher = Intl::GetLocaleMatcher(isolate, options, "Intl.DateTimeFormat"); MAYBE_RETURN(maybe_locale_matcher, MaybeHandle()); @@ -1206,6 +1251,17 @@ MaybeHandle JSDateTimeFormat::Initialize( icu::Locale icu_locale = r.icu_locale; DCHECK(!icu_locale.isBogus()); + UErrorCode status = U_ZERO_ERROR; + if (calendar_str != nullptr) { + icu_locale.setUnicodeKeywordValue("ca", calendar_str.get(), status); + CHECK(U_SUCCESS(status)); + } + + if (numbering_system_str != nullptr) { + icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status); + CHECK(U_SUCCESS(status)); + } + // 17. Let timeZone be ? Get(options, "timeZone"). const std::vector empty_values; std::unique_ptr timezone = nullptr; @@ -1216,11 +1272,11 @@ MaybeHandle JSDateTimeFormat::Initialize( std::unique_ptr tz = CreateTimeZone(isolate, timezone.get()); if (tz.get() == nullptr) { - THROW_NEW_ERROR(isolate, - NewRangeError(MessageTemplate::kInvalidTimeZone, - isolate->factory()->NewStringFromAsciiChecked( - timezone.get())), - JSDateTimeFormat); + THROW_NEW_ERROR( + isolate, + NewRangeError(MessageTemplate::kInvalidTimeZone, + factory->NewStringFromAsciiChecked(timezone.get())), + JSDateTimeFormat); } std::unique_ptr calendar( @@ -1229,11 +1285,11 @@ MaybeHandle JSDateTimeFormat::Initialize( // 18.b If the result of IsValidTimeZoneName(timeZone) is false, then // i. Throw a RangeError exception. if (calendar.get() == nullptr) { - THROW_NEW_ERROR(isolate, - NewRangeError(MessageTemplate::kInvalidTimeZone, - isolate->factory()->NewStringFromAsciiChecked( - timezone.get())), - JSDateTimeFormat); + THROW_NEW_ERROR( + isolate, + NewRangeError(MessageTemplate::kInvalidTimeZone, + factory->NewStringFromAsciiChecked(timezone.get())), + JSDateTimeFormat); } static base::LazyInstance::type @@ -1243,7 +1299,6 @@ MaybeHandle JSDateTimeFormat::Initialize( generator_cache.Pointer()->CreateGenerator(icu_locale)); // 15.Let hcDefault be dataLocaleData.[[hourCycle]]. - UErrorCode status = U_ZERO_ERROR; icu::UnicodeString hour_pattern = generator->getBestPattern("jjmm", status); CHECK(U_SUCCESS(status)); Intl::HourCycle hc_default = HourCycleFromPattern(hour_pattern); @@ -1297,7 +1352,6 @@ MaybeHandle JSDateTimeFormat::Initialize( DateTimeStyle date_style = DateTimeStyle::kUndefined; DateTimeStyle time_style = DateTimeStyle::kUndefined; std::unique_ptr icu_date_format; - std::unique_ptr icu_date_interval_format; if (FLAG_harmony_intl_datetime_style) { // 28. Let dateStyle be ? GetOption(options, "dateStyle", "string", « @@ -1340,10 +1394,6 @@ MaybeHandle JSDateTimeFormat::Initialize( time_style != DateTimeStyle::kUndefined) { icu_date_format = DateTimeStylePattern(date_style, time_style, icu_locale, hc, *generator); - if (FLAG_harmony_intl_date_format_range) { - icu_date_interval_format = CreateICUDateIntervalFormat( - icu_locale, SkeletonFromDateFormat(*icu_date_format)); - } } } // 33. Else, @@ -1397,10 +1447,6 @@ MaybeHandle JSDateTimeFormat::Initialize( FATAL("Failed to create ICU date format, are ICU data files missing?"); } } - if (FLAG_harmony_intl_date_format_range) { - icu_date_interval_format = - CreateICUDateIntervalFormat(icu_locale, skeleton_ustr); - } // g. If dateTimeFormat.[[Hour]] is not undefined, then if (!has_hour_option) { @@ -1449,12 +1495,10 @@ MaybeHandle JSDateTimeFormat::Initialize( Managed::FromUniquePtr(isolate, 0, std::move(icu_date_format)); date_time_format->set_icu_simple_date_format(*managed_format); - if (FLAG_harmony_intl_date_format_range) { - Handle> managed_interval_format = - Managed::FromUniquePtr( - isolate, 0, std::move(icu_date_interval_format)); - date_time_format->set_icu_date_interval_format(*managed_interval_format); - } + + Handle> managed_interval_format = + Managed::FromRawPtr(isolate, 0, nullptr); + date_time_format->set_icu_date_interval_format(*managed_interval_format); return date_time_format; } @@ -1518,7 +1562,7 @@ MaybeHandle JSDateTimeFormat::FormatToParts( double date_value) { Factory* factory = isolate->factory(); icu::SimpleDateFormat* format = - date_time_format->icu_simple_date_format()->raw(); + date_time_format->icu_simple_date_format().raw(); CHECK_NOT_NULL(format); icu::UnicodeString formatted; @@ -1591,75 +1635,176 @@ Handle JSDateTimeFormat::HourCycleAsString() const { } } -MaybeHandle JSDateTimeFormat::FormatRange( - Isolate* isolate, Handle date_time_format, double x, - double y) { - // TODO(ftang): Merge the following with FormatRangeToParts after - // the landing of ICU64 to make it cleaner. +enum Source { kShared, kStartRange, kEndRange }; - // #sec-partitiondatetimerangepattern - // 1. Let x be TimeClip(x). - x = DateCache::TimeClip(x); - // 2. If x is NaN, throw a RangeError exception. - if (std::isnan(x)) { - THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue), - String); +namespace { + +class SourceTracker { + public: + SourceTracker() { start_[0] = start_[1] = limit_[0] = limit_[1] = 0; } + void Add(int32_t field, int32_t start, int32_t limit) { + CHECK_LT(field, 2); + start_[field] = start; + limit_[field] = limit; } - // 3. Let y be TimeClip(y). - y = DateCache::TimeClip(y); - // 4. If y is NaN, throw a RangeError exception. - if (std::isnan(y)) { - THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue), - String); + + Source GetSource(int32_t start, int32_t limit) const { + Source source = Source::kShared; + if (FieldContains(0, start, limit)) { + source = Source::kStartRange; + } else if (FieldContains(1, start, limit)) { + source = Source::kEndRange; + } + return source; } - icu::DateIntervalFormat* date_interval_format = - date_time_format->icu_date_interval_format()->raw(); - CHECK_NOT_NULL(date_interval_format); - icu::DateInterval interval(x, y); + private: + int32_t start_[2]; + int32_t limit_[2]; - icu::UnicodeString result; - icu::FieldPosition fpos; + bool FieldContains(int32_t field, int32_t start, int32_t limit) const { + CHECK_LT(field, 2); + return (start_[field] <= start) && (start <= limit_[field]) && + (start_[field] <= limit) && (limit <= limit_[field]); + } +}; + +Handle SourceString(Isolate* isolate, Source source) { + switch (source) { + case Source::kShared: + return ReadOnlyRoots(isolate).shared_string_handle(); + case Source::kStartRange: + return ReadOnlyRoots(isolate).startRange_string_handle(); + case Source::kEndRange: + return ReadOnlyRoots(isolate).endRange_string_handle(); + UNREACHABLE(); + } +} + +Maybe AddPartForFormatRange(Isolate* isolate, Handle array, + const icu::UnicodeString& string, + int32_t index, int32_t field, int32_t start, + int32_t end, const SourceTracker& tracker) { + Handle substring; + ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, substring, + Intl::ToString(isolate, string, start, end), + Nothing()); + Intl::AddElement(isolate, array, index, + IcuDateFieldIdToDateType(field, isolate), substring, + isolate->factory()->source_string(), + SourceString(isolate, tracker.GetSource(start, end))); + return Just(true); +} + +// A helper function to convert the FormattedDateInterval to a +// MaybeHandle for the implementation of formatRangeToParts. +MaybeHandle FormattedDateIntervalToJSArray( + Isolate* isolate, const icu::FormattedValue& formatted) { UErrorCode status = U_ZERO_ERROR; - date_interval_format->format(&interval, result, fpos, status); - CHECK(U_SUCCESS(status)); + icu::UnicodeString result = formatted.toString(status); - return Intl::ToString(isolate, result); + Factory* factory = isolate->factory(); + Handle array = factory->NewJSArray(0); + icu::ConstrainedFieldPosition cfpos; + int index = 0; + int32_t previous_end_pos = 0; + SourceTracker tracker; + while (formatted.nextPosition(cfpos, status)) { + int32_t category = cfpos.getCategory(); + int32_t field = cfpos.getField(); + int32_t start = cfpos.getStart(); + int32_t limit = cfpos.getLimit(); + + if (category == UFIELD_CATEGORY_DATE_INTERVAL_SPAN) { + CHECK_LE(field, 2); + tracker.Add(field, start, limit); + } else { + CHECK(category == UFIELD_CATEGORY_DATE); + if (start > previous_end_pos) { + // Add "literal" from the previous end position to the start if + // necessary. + Maybe maybe_added = + AddPartForFormatRange(isolate, array, result, index, -1, + previous_end_pos, start, tracker); + MAYBE_RETURN(maybe_added, Handle()); + previous_end_pos = start; + index++; + } + Maybe maybe_added = AddPartForFormatRange( + isolate, array, result, index, field, start, limit, tracker); + MAYBE_RETURN(maybe_added, Handle()); + previous_end_pos = limit; + ++index; + } + } + int32_t end = result.length(); + // Add "literal" in the end if necessary. + if (end > previous_end_pos) { + Maybe maybe_added = AddPartForFormatRange( + isolate, array, result, index, -1, previous_end_pos, end, tracker); + MAYBE_RETURN(maybe_added, Handle()); + } + + if (U_FAILURE(status)) { + THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), JSArray); + } + + JSObject::ValidateElements(*array); + return array; } -MaybeHandle JSDateTimeFormat::FormatRangeToParts( +// The shared code between formatRange and formatRangeToParts +template +MaybeHandle FormatRangeCommon( Isolate* isolate, Handle date_time_format, double x, - double y) { - // TODO(ftang): Merge the following with FormatRangeToParts after - // the landing of ICU64 to make it cleaner. - + double y, + MaybeHandle (*formatToResult)(Isolate*, const icu::FormattedValue&)) { // #sec-partitiondatetimerangepattern // 1. Let x be TimeClip(x). x = DateCache::TimeClip(x); // 2. If x is NaN, throw a RangeError exception. if (std::isnan(x)) { THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue), - JSArray); + T); } // 3. Let y be TimeClip(y). y = DateCache::TimeClip(y); // 4. If y is NaN, throw a RangeError exception. if (std::isnan(y)) { THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue), - JSArray); + T); } + icu::DateInterval interval(x, y); - icu::DateIntervalFormat* date_interval_format = - date_time_format->icu_date_interval_format()->raw(); - CHECK_NOT_NULL(date_interval_format); - Factory* factory = isolate->factory(); - Handle result = factory->NewJSArray(0); + icu::DateIntervalFormat* format = + LazyCreateDateIntervalFormat(isolate, date_time_format); + if (format == nullptr) { + THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), T); + } - // TODO(ftang) To be implemented after ICU64 landed that support - // DateIntervalFormat::formatToValue() and FormattedDateInterval. + UErrorCode status = U_ZERO_ERROR; + icu::FormattedDateInterval formatted = + format->formatToValue(interval, status); + if (U_FAILURE(status)) { + THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), T); + } + return formatToResult(isolate, formatted); +} - JSObject::ValidateElements(*result); - return result; +} // namespace + +MaybeHandle JSDateTimeFormat::FormatRange( + Isolate* isolate, Handle date_time_format, double x, + double y) { + return FormatRangeCommon(isolate, date_time_format, x, y, + Intl::FormattedToString); +} + +MaybeHandle JSDateTimeFormat::FormatRangeToParts( + Isolate* isolate, Handle date_time_format, double x, + double y) { + return FormatRangeCommon(isolate, date_time_format, x, y, + FormattedDateIntervalToJSArray); } } // namespace internal diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h index cf73af2aa8..664ccdcdf7 100644 --- a/deps/v8/src/objects/js-date-time-format.h +++ b/deps/v8/src/objects/js-date-time-format.h @@ -12,9 +12,10 @@ #include #include -#include "src/isolate.h" +#include "src/execution/isolate.h" #include "src/objects/intl-objects.h" #include "src/objects/managed.h" +#include "torque-generated/field-offsets-tq.h" #include "unicode/uversion.h" // Has to be the last include (doesn't have include guards): @@ -92,18 +93,8 @@ class JSDateTimeFormat : public JSObject { enum class DateTimeStyle { kUndefined, kFull, kLong, kMedium, kShort }; // Layout description. -#define JS_DATE_TIME_FORMAT_FIELDS(V) \ - V(kICULocaleOffset, kTaggedSize) \ - V(kICUSimpleDateFormatOffset, kTaggedSize) \ - V(kICUDateIntervalFormatOffset, kTaggedSize) \ - V(kBoundFormatOffset, kTaggedSize) \ - V(kFlagsOffset, kTaggedSize) \ - /* Total size. */ \ - V(kSize, 0) - DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - JS_DATE_TIME_FORMAT_FIELDS) -#undef JS_DATE_TIME_FORMAT_FIELDS + TORQUE_GENERATED_JSDATE_TIME_FORMAT_FIELDS) inline void set_hour_cycle(Intl::HourCycle hour_cycle); inline Intl::HourCycle hour_cycle() const; diff --git a/deps/v8/src/objects/js-generator-inl.h b/deps/v8/src/objects/js-generator-inl.h index c2895e29f9..d0fe2cd90e 100644 --- a/deps/v8/src/objects/js-generator-inl.h +++ b/deps/v8/src/objects/js-generator-inl.h @@ -8,7 +8,7 @@ #include "src/objects/js-generator.h" #include "src/objects/js-promise-inl.h" -#include "src/objects-inl.h" // Needed for write barriers +#include "src/objects/objects-inl.h" // Needed for write barriers // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h index dd3f4dceb9..96e61c2205 100644 --- a/deps/v8/src/objects/js-list-format-inl.h +++ b/deps/v8/src/objects/js-list-format-inl.h @@ -9,8 +9,8 @@ #ifndef V8_OBJECTS_JS_LIST_FORMAT_INL_H_ #define V8_OBJECTS_JS_LIST_FORMAT_INL_H_ -#include "src/objects-inl.h" #include "src/objects/js-list-format.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -23,7 +23,7 @@ OBJECT_CONSTRUCTORS_IMPL(JSListFormat, JSObject) // Base list format accessors. ACCESSORS(JSListFormat, locale, String, kLocaleOffset) ACCESSORS(JSListFormat, icu_formatter, Managed, - kICUFormatterOffset) + kIcuFormatterOffset) SMI_ACCESSORS(JSListFormat, flags, kFlagsOffset) inline void JSListFormat::set_style(Style style) { diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc index c4329401a4..84691194ec 100644 --- a/deps/v8/src/objects/js-list-format.cc +++ b/deps/v8/src/objects/js-list-format.cc @@ -11,15 +11,15 @@ #include #include -#include "src/elements-inl.h" -#include "src/elements.h" +#include "src/execution/isolate.h" #include "src/heap/factory.h" -#include "src/isolate.h" -#include "src/objects-inl.h" +#include "src/objects/elements-inl.h" +#include "src/objects/elements.h" #include "src/objects/intl-objects.h" #include "src/objects/js-array-inl.h" #include "src/objects/js-list-format-inl.h" #include "src/objects/managed.h" +#include "src/objects/objects-inl.h" #include "unicode/fieldpos.h" #include "unicode/fpositer.h" #include "unicode/listformatter.h" @@ -286,8 +286,9 @@ Maybe> ToUnicodeStringArray( factory->NewNumber(i), factory->String_string()), Nothing>()); } - result.push_back( - Intl::ToICUUnicodeString(isolate, Handle::cast(item))); + Handle item_str = Handle::cast(item); + if (!item_str->IsFlat()) item_str = String::Flatten(isolate, item_str); + result.push_back(Intl::ToICUUnicodeString(isolate, item_str)); } DCHECK(!array->HasDictionaryElements()); return Just(result); @@ -296,7 +297,7 @@ Maybe> ToUnicodeStringArray( template MaybeHandle FormatListCommon( Isolate* isolate, Handle format, Handle list, - MaybeHandle (*formatToResult)(Isolate*, const icu::FormattedList&)) { + MaybeHandle (*formatToResult)(Isolate*, const icu::FormattedValue&)) { DCHECK(!list->IsUndefined()); // ecma402 #sec-createpartsfromlist // 2. If list contains any element value such that Type(value) is not String, @@ -306,7 +307,7 @@ MaybeHandle FormatListCommon( MAYBE_RETURN(maybe_array, Handle()); std::vector array = maybe_array.FromJust(); - icu::ListFormatter* formatter = format->icu_formatter()->raw(); + icu::ListFormatter* formatter = format->icu_formatter().raw(); CHECK_NOT_NULL(formatter); UErrorCode status = U_ZERO_ERROR; @@ -318,18 +319,6 @@ MaybeHandle FormatListCommon( return formatToResult(isolate, formatted); } -// A helper function to convert the FormattedList to a -// MaybeHandle for the implementation of format. -MaybeHandle FormattedToString(Isolate* isolate, - const icu::FormattedList& formatted) { - UErrorCode status = U_ZERO_ERROR; - icu::UnicodeString result = formatted.toString(status); - if (U_FAILURE(status)) { - THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String); - } - return Intl::ToString(isolate, result); -} - Handle IcuFieldIdToType(Isolate* isolate, int32_t field_id) { switch (field_id) { case ULISTFMT_LITERAL_FIELD: @@ -345,8 +334,8 @@ Handle IcuFieldIdToType(Isolate* isolate, int32_t field_id) { // A helper function to convert the FormattedList to a // MaybeHandle for the implementation of formatToParts. -MaybeHandle FormattedToJSArray(Isolate* isolate, - const icu::FormattedList& formatted) { +MaybeHandle FormattedListToJSArray( + Isolate* isolate, const icu::FormattedValue& formatted) { Handle array = isolate->factory()->NewJSArray(0); icu::ConstrainedFieldPosition cfpos; cfpos.constrainCategory(UFIELD_CATEGORY_LIST); @@ -375,13 +364,15 @@ MaybeHandle FormattedToJSArray(Isolate* isolate, MaybeHandle JSListFormat::FormatList(Isolate* isolate, Handle format, Handle list) { - return FormatListCommon(isolate, format, list, FormattedToString); + return FormatListCommon(isolate, format, list, + Intl::FormattedToString); } // ecma42 #sec-formatlisttoparts MaybeHandle JSListFormat::FormatListToParts( Isolate* isolate, Handle format, Handle list) { - return FormatListCommon(isolate, format, list, FormattedToJSArray); + return FormatListCommon(isolate, format, list, + FormattedListToJSArray); } const std::set& JSListFormat::GetAvailableLocales() { diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h index ee576b3ff2..0284d05d42 100644 --- a/deps/v8/src/objects/js-list-format.h +++ b/deps/v8/src/objects/js-list-format.h @@ -12,10 +12,10 @@ #include #include +#include "src/execution/isolate.h" #include "src/heap/factory.h" -#include "src/isolate.h" -#include "src/objects.h" #include "src/objects/managed.h" +#include "src/objects/objects.h" #include "unicode/uversion.h" // Has to be the last include (doesn't have include guards): @@ -105,15 +105,8 @@ class JSListFormat : public JSObject { DECL_VERIFIER(JSListFormat) // Layout description. -#define JS_LIST_FORMAT_FIELDS(V) \ - V(kLocaleOffset, kTaggedSize) \ - V(kICUFormatterOffset, kTaggedSize) \ - V(kFlagsOffset, kTaggedSize) \ - /* Header size. */ \ - V(kSize, 0) - - DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_LIST_FORMAT_FIELDS) -#undef JS_LIST_FORMAT_FIELDS + DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, + TORQUE_GENERATED_JSLIST_FORMAT_FIELDS) OBJECT_CONSTRUCTORS(JSListFormat, JSObject); }; diff --git a/deps/v8/src/objects/js-locale-inl.h b/deps/v8/src/objects/js-locale-inl.h index 44e223ef06..17859ea6ab 100644 --- a/deps/v8/src/objects/js-locale-inl.h +++ b/deps/v8/src/objects/js-locale-inl.h @@ -9,9 +9,9 @@ #ifndef V8_OBJECTS_JS_LOCALE_INL_H_ #define V8_OBJECTS_JS_LOCALE_INL_H_ -#include "src/api-inl.h" -#include "src/objects-inl.h" +#include "src/api/api-inl.h" #include "src/objects/js-locale.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -21,7 +21,7 @@ namespace internal { OBJECT_CONSTRUCTORS_IMPL(JSLocale, JSObject) -ACCESSORS(JSLocale, icu_locale, Managed, kICULocaleOffset) +ACCESSORS(JSLocale, icu_locale, Managed, kIcuLocaleOffset) CAST_ACCESSOR(JSLocale) diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc index 4e35c16b0f..509f9a3069 100644 --- a/deps/v8/src/objects/js-locale.cc +++ b/deps/v8/src/objects/js-locale.cc @@ -13,14 +13,15 @@ #include #include -#include "src/api.h" -#include "src/global-handles.h" +#include "src/api/api.h" +#include "src/execution/isolate.h" +#include "src/handles/global-handles.h" #include "src/heap/factory.h" -#include "src/isolate.h" -#include "src/objects-inl.h" #include "src/objects/intl-objects.h" #include "src/objects/js-locale-inl.h" +#include "src/objects/objects-inl.h" #include "unicode/char16ptr.h" +#include "unicode/localebuilder.h" #include "unicode/locid.h" #include "unicode/uloc.h" #include "unicode/unistr.h" @@ -30,21 +31,6 @@ namespace internal { namespace { -// Helper function to check a locale is valid. It will return false if -// the length of the extension fields are incorrect. For example, en-u-a or -// en-u-co-b will return false. -bool IsValidLocale(const icu::Locale& locale) { - // icu::Locale::toLanguageTag won't return U_STRING_NOT_TERMINATED_WARNING for - // incorrect locale yet. So we still need the following uloc_toLanguageTag - // TODO(ftang): Change to use icu::Locale::toLanguageTag once it indicate - // error. - char result[ULOC_FULLNAME_CAPACITY]; - UErrorCode status = U_ZERO_ERROR; - uloc_toLanguageTag(locale.getName(), result, ULOC_FULLNAME_CAPACITY, true, - &status); - return U_SUCCESS(status) && status != U_STRING_NOT_TERMINATED_WARNING; -} - struct OptionData { const char* name; const char* key; @@ -55,9 +41,8 @@ struct OptionData { // Inserts tags from options into locale string. Maybe InsertOptionsIntoLocale(Isolate* isolate, Handle options, - icu::Locale* icu_locale) { + icu::LocaleBuilder* builder) { CHECK(isolate); - CHECK(!icu_locale->isBogus()); const std::vector hour_cycle_values = {"h11", "h12", "h23", "h24"}; @@ -75,7 +60,6 @@ Maybe InsertOptionsIntoLocale(Isolate* isolate, // TODO(cira): Pass in values as per the spec to make this to be // spec compliant. - UErrorCode status = U_ZERO_ERROR; for (const auto& option_to_bcp47 : kOptionToUnicodeTagMap) { std::unique_ptr value_str = nullptr; bool value_bool = false; @@ -99,32 +83,18 @@ Maybe InsertOptionsIntoLocale(Isolate* isolate, DCHECK_NOT_NULL(value_str.get()); // Overwrite existing, or insert new key-value to the locale string. - if (uloc_toLegacyType(uloc_toLegacyKey(option_to_bcp47.key), - value_str.get())) { - // Only call setUnicodeKeywordValue if that value is a valid one. - icu_locale->setUnicodeKeywordValue(option_to_bcp47.key, value_str.get(), - status); - if (U_FAILURE(status)) { - return Just(false); - } - } else { + if (!uloc_toLegacyType(uloc_toLegacyKey(option_to_bcp47.key), + value_str.get())) { return Just(false); } + builder->setUnicodeLocaleKeyword(option_to_bcp47.key, value_str.get()); } - - // Check all the unicode extension fields are in the right length. - if (!IsValidLocale(*icu_locale)) { - THROW_NEW_ERROR_RETURN_VALUE( - isolate, NewRangeError(MessageTemplate::kLocaleBadParameters), - Nothing()); - } - return Just(true); } Handle UnicodeKeywordValue(Isolate* isolate, Handle locale, const char* key) { - icu::Locale* icu_locale = locale->icu_locale()->raw(); + icu::Locale* icu_locale = locale->icu_locale().raw(); UErrorCode status = U_ZERO_ERROR; std::string value = icu_locale->getUnicodeKeywordValue(key, status); @@ -237,32 +207,29 @@ bool StartsWithUnicodeLanguageId(const std::string& value) { return true; } -Maybe ApplyOptionsToTag(Isolate* isolate, Handle tag, - Handle options) { +Maybe ApplyOptionsToTag(Isolate* isolate, Handle tag, + Handle options, + icu::LocaleBuilder* builder) { v8::Isolate* v8_isolate = reinterpret_cast(isolate); if (tag->length() == 0) { THROW_NEW_ERROR_RETURN_VALUE( isolate, NewRangeError(MessageTemplate::kLocaleNotEmpty), - Nothing()); + Nothing()); } v8::String::Utf8Value bcp47_tag(v8_isolate, v8::Utils::ToLocal(tag)); + builder->setLanguageTag({*bcp47_tag, bcp47_tag.length()}); CHECK_LT(0, bcp47_tag.length()); CHECK_NOT_NULL(*bcp47_tag); // 2. If IsStructurallyValidLanguageTag(tag) is false, throw a RangeError // exception. if (!StartsWithUnicodeLanguageId(*bcp47_tag)) { - THROW_NEW_ERROR_RETURN_VALUE( - isolate, NewRangeError(MessageTemplate::kLocaleBadParameters), - Nothing()); + return Just(false); } UErrorCode status = U_ZERO_ERROR; - icu::Locale icu_locale = - icu::Locale::forLanguageTag({*bcp47_tag, bcp47_tag.length()}, status); + builder->build(status); if (U_FAILURE(status)) { - THROW_NEW_ERROR_RETURN_VALUE( - isolate, NewRangeError(MessageTemplate::kLocaleBadParameters), - Nothing()); + return Just(false); } // 3. Let language be ? GetOption(options, "language", "string", undefined, @@ -272,15 +239,16 @@ Maybe ApplyOptionsToTag(Isolate* isolate, Handle tag, Maybe maybe_language = Intl::GetStringOption(isolate, options, "language", empty_values, "ApplyOptionsToTag", &language_str); - MAYBE_RETURN(maybe_language, Nothing()); + MAYBE_RETURN(maybe_language, Nothing()); // 4. If language is not undefined, then if (maybe_language.FromJust()) { + builder->setLanguage(language_str.get()); + builder->build(status); // a. If language does not match the unicode_language_subtag production, // throw a RangeError exception. - if (!IsUnicodeLanguageSubtag(language_str.get())) { - THROW_NEW_ERROR_RETURN_VALUE( - isolate, NewRangeError(MessageTemplate::kLocaleBadParameters), - Nothing()); + if (U_FAILURE(status) || language_str[0] == '\0' || + IsAlpha(language_str.get(), 4, 4)) { + return Just(false); } } // 5. Let script be ? GetOption(options, "script", "string", undefined, @@ -289,15 +257,15 @@ Maybe ApplyOptionsToTag(Isolate* isolate, Handle tag, Maybe maybe_script = Intl::GetStringOption(isolate, options, "script", empty_values, "ApplyOptionsToTag", &script_str); - MAYBE_RETURN(maybe_script, Nothing()); + MAYBE_RETURN(maybe_script, Nothing()); // 6. If script is not undefined, then if (maybe_script.FromJust()) { + builder->setScript(script_str.get()); + builder->build(status); // a. If script does not match the unicode_script_subtag production, throw // a RangeError exception. - if (!IsUnicodeScriptSubtag(script_str.get())) { - THROW_NEW_ERROR_RETURN_VALUE( - isolate, NewRangeError(MessageTemplate::kLocaleBadParameters), - Nothing()); + if (U_FAILURE(status) || script_str[0] == '\0') { + return Just(false); } } // 7. Let region be ? GetOption(options, "region", "string", undefined, @@ -306,85 +274,41 @@ Maybe ApplyOptionsToTag(Isolate* isolate, Handle tag, Maybe maybe_region = Intl::GetStringOption(isolate, options, "region", empty_values, "ApplyOptionsToTag", ®ion_str); - MAYBE_RETURN(maybe_region, Nothing()); + MAYBE_RETURN(maybe_region, Nothing()); // 8. If region is not undefined, then if (maybe_region.FromJust()) { // a. If region does not match the region production, throw a RangeError // exception. - if (!IsUnicodeRegionSubtag(region_str.get())) { - THROW_NEW_ERROR_RETURN_VALUE( - isolate, NewRangeError(MessageTemplate::kLocaleBadParameters), - Nothing()); + builder->setRegion(region_str.get()); + builder->build(status); + if (U_FAILURE(status) || region_str[0] == '\0') { + return Just(false); } } - // 9. Set tag to CanonicalizeLanguageTag(tag). - Maybe maybe_canonicalized = - Intl::CanonicalizeLanguageTag(isolate, tag); - MAYBE_RETURN(maybe_canonicalized, Nothing()); - - std::vector tokens; - std::string token; - std::istringstream token_stream(maybe_canonicalized.FromJust()); - while (std::getline(token_stream, token, '-')) { - tokens.push_back(token); - } + // 9. Set tag to CanonicalizeLanguageTag(tag). // 10. If language is not undefined, - std::string locale_str; - if (maybe_language.FromJust()) { - // a. Assert: tag matches the unicode_locale_id production. - // b. Set tag to tag with the substring corresponding to the - // unicode_language_subtag production replaced by the string language. - tokens[0] = language_str.get(); - } - + // a. Assert: tag matches the unicode_locale_id production. + // b. Set tag to tag with the substring corresponding to the + // unicode_language_subtag production replaced by the string language. // 11. If script is not undefined, then - if (maybe_script.FromJust()) { - // a. If tag does not contain a unicode_script_subtag production, then - if (tokens.size() < 2 || !IsUnicodeScriptSubtag(tokens[1])) { - // i. Set tag to the concatenation of the unicode_language_subtag - // production of tag, "-", script, and the rest of tag. - tokens.insert(tokens.begin() + 1, script_str.get()); - // b. Else, - } else { - // i. Set tag to tag with the substring corresponding to the - // unicode_script_subtag production replaced by the string script. - tokens[1] = script_str.get(); - } - } + // a. If tag does not contain a unicode_script_subtag production, then + // i. Set tag to the concatenation of the unicode_language_subtag + // production of tag, "-", script, and the rest of tag. + // b. Else, + // i. Set tag to tag with the substring corresponding to the + // unicode_script_subtag production replaced by the string script. // 12. If region is not undefined, then - if (maybe_region.FromJust()) { - // a. If tag does not contain a unicode_region_subtag production, then - // i. Set tag to the concatenation of the unicode_language_subtag - // production of tag, the substring corresponding to the "-" - // unicode_script_subtag production if present, "-", region, and - // the rest of tag. - // b. Else, - // i. Set tag to tag with the substring corresponding to the - // unicode_region_subtag production replaced by the string region. - if (tokens.size() > 1 && IsUnicodeRegionSubtag(tokens[1])) { - tokens[1] = region_str.get(); - } else if (tokens.size() > 1 && IsUnicodeScriptSubtag(tokens[1])) { - if (tokens.size() > 2 && IsUnicodeRegionSubtag(tokens[2])) { - tokens[2] = region_str.get(); - } else { - tokens.insert(tokens.begin() + 2, region_str.get()); - } - } else { - tokens.insert(tokens.begin() + 1, region_str.get()); - } - } - - std::string replaced; - for (auto it = tokens.begin(); it != tokens.end(); it++) { - replaced += *it; - if (it + 1 != tokens.end()) { - replaced += '-'; - } - } - + // a. If tag does not contain a unicode_region_subtag production, then + // i. Set tag to the concatenation of the unicode_language_subtag + // production of tag, the substring corresponding to the "-" + // unicode_script_subtag production if present, "-", region, and + // the rest of tag. + // b. Else, + // i. Set tag to tag with the substring corresponding to the + // unicode_region_subtag production replaced by the string region. // 13. Return CanonicalizeLanguageTag(tag). - return Intl::CanonicalizeLanguageTag(isolate, replaced); + return Just(true); } } // namespace @@ -393,21 +317,22 @@ MaybeHandle JSLocale::Initialize(Isolate* isolate, Handle locale, Handle locale_str, Handle options) { - Maybe maybe_locale = - ApplyOptionsToTag(isolate, locale_str, options); - MAYBE_RETURN(maybe_locale, MaybeHandle()); - UErrorCode status = U_ZERO_ERROR; - icu::Locale icu_locale = - icu::Locale::forLanguageTag(maybe_locale.FromJust().c_str(), status); - if (U_FAILURE(status)) { + icu::LocaleBuilder builder; + Maybe maybe_apply = + ApplyOptionsToTag(isolate, locale_str, options, &builder); + MAYBE_RETURN(maybe_apply, MaybeHandle()); + if (!maybe_apply.FromJust()) { THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kLocaleBadParameters), JSLocale); } - Maybe error = InsertOptionsIntoLocale(isolate, options, &icu_locale); - MAYBE_RETURN(error, MaybeHandle()); - if (!error.FromJust()) { + Maybe maybe_insert = + InsertOptionsIntoLocale(isolate, options, &builder); + MAYBE_RETURN(maybe_insert, MaybeHandle()); + UErrorCode status = U_ZERO_ERROR; + icu::Locale icu_locale = builder.build(status); + if (!maybe_insert.FromJust() || U_FAILURE(status)) { THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kLocaleBadParameters), JSLocale); @@ -458,28 +383,28 @@ Handle JSLocale::Minimize(Isolate* isolate, String locale) { Handle JSLocale::Language(Isolate* isolate, Handle locale) { Factory* factory = isolate->factory(); - const char* language = locale->icu_locale()->raw()->getLanguage(); + const char* language = locale->icu_locale().raw()->getLanguage(); if (strlen(language) == 0) return factory->undefined_value(); return factory->NewStringFromAsciiChecked(language); } Handle JSLocale::Script(Isolate* isolate, Handle locale) { Factory* factory = isolate->factory(); - const char* script = locale->icu_locale()->raw()->getScript(); + const char* script = locale->icu_locale().raw()->getScript(); if (strlen(script) == 0) return factory->undefined_value(); return factory->NewStringFromAsciiChecked(script); } Handle JSLocale::Region(Isolate* isolate, Handle locale) { Factory* factory = isolate->factory(); - const char* region = locale->icu_locale()->raw()->getCountry(); + const char* region = locale->icu_locale().raw()->getCountry(); if (strlen(region) == 0) return factory->undefined_value(); return factory->NewStringFromAsciiChecked(region); } Handle JSLocale::BaseName(Isolate* isolate, Handle locale) { icu::Locale icu_locale = - icu::Locale::createFromName(locale->icu_locale()->raw()->getBaseName()); + icu::Locale::createFromName(locale->icu_locale().raw()->getBaseName()); std::string base_name = Intl::ToLanguageTag(icu_locale).FromJust(); return isolate->factory()->NewStringFromAsciiChecked(base_name.c_str()); } @@ -502,7 +427,7 @@ Handle JSLocale::HourCycle(Isolate* isolate, Handle locale) { Handle JSLocale::Numeric(Isolate* isolate, Handle locale) { Factory* factory = isolate->factory(); - icu::Locale* icu_locale = locale->icu_locale()->raw(); + icu::Locale* icu_locale = locale->icu_locale().raw(); UErrorCode status = U_ZERO_ERROR; std::string numeric = icu_locale->getUnicodeKeywordValue("kn", status); @@ -515,7 +440,7 @@ Handle JSLocale::NumberingSystem(Isolate* isolate, } std::string JSLocale::ToString(Handle locale) { - icu::Locale* icu_locale = locale->icu_locale()->raw(); + icu::Locale* icu_locale = locale->icu_locale().raw(); return Intl::ToLanguageTag(*icu_locale).FromJust(); } diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h index 120ddeb965..1a833e0e18 100644 --- a/deps/v8/src/objects/js-locale.h +++ b/deps/v8/src/objects/js-locale.h @@ -9,11 +9,11 @@ #ifndef V8_OBJECTS_JS_LOCALE_H_ #define V8_OBJECTS_JS_LOCALE_H_ -#include "src/global-handles.h" +#include "src/execution/isolate.h" +#include "src/handles/global-handles.h" #include "src/heap/factory.h" -#include "src/isolate.h" -#include "src/objects.h" #include "src/objects/managed.h" +#include "src/objects/objects.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -58,12 +58,8 @@ class JSLocale : public JSObject { DECL_VERIFIER(JSLocale) // Layout description. -#define JS_LOCALE_FIELDS(V) \ - V(kICULocaleOffset, kTaggedSize) \ - V(kSize, 0) - - DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_LOCALE_FIELDS) -#undef JS_LOCALE_FIELDS + DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, + TORQUE_GENERATED_JSLOCALE_FIELDS) OBJECT_CONSTRUCTORS(JSLocale, JSObject); }; diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h index 3edf6f1ea3..bd76dfe556 100644 --- a/deps/v8/src/objects/js-number-format-inl.h +++ b/deps/v8/src/objects/js-number-format-inl.h @@ -9,8 +9,8 @@ #ifndef V8_OBJECTS_JS_NUMBER_FORMAT_INL_H_ #define V8_OBJECTS_JS_NUMBER_FORMAT_INL_H_ -#include "src/objects-inl.h" #include "src/objects/js-number-format.h" +#include "src/objects/objects-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -21,35 +21,51 @@ namespace internal { OBJECT_CONSTRUCTORS_IMPL(JSNumberFormat, JSObject) ACCESSORS(JSNumberFormat, locale, String, kLocaleOffset) -ACCESSORS(JSNumberFormat, icu_number_format, Managed, - kICUNumberFormatOffset) +ACCESSORS(JSNumberFormat, icu_number_formatter, + Managed, + kIcuNumberFormatterOffset) ACCESSORS(JSNumberFormat, bound_format, Object, kBoundFormatOffset) + +// Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits +// uncondictionally while the unified number proposal eventually will only +// record either (Min|Max)imumFractionDigits or (Min|Max)imumSignaficantDigits +// Since LocalizedNumberFormatter can only remember one set, and during +// 2019-1-17 ECMA402 meeting that the committee decide not to take a PR to +// address that prior to the unified number proposal, we have to add these two +// 5 bits int into flags to remember the (Min|Max)imumFractionDigits while +// (Min|Max)imumSignaficantDigits is present. +// TODO(ftang) remove the following once we ship int-number-format-unified +// * SMI_ACCESSORS of flags +// * Four inline functions: (set_)?(min|max)imum_fraction_digits + SMI_ACCESSORS(JSNumberFormat, flags, kFlagsOffset) -inline void JSNumberFormat::set_style(Style style) { - DCHECK_LT(style, Style::COUNT); +inline int JSNumberFormat::minimum_fraction_digits() const { + return MinimumFractionDigitsBits::decode(flags()); +} + +inline void JSNumberFormat::set_minimum_fraction_digits(int digits) { + DCHECK_GE(MinimumFractionDigitsBits::kMax, digits); + DCHECK_LE(0, digits); + DCHECK_GE(20, digits); int hints = flags(); - hints = StyleBits::update(hints, style); + hints = MinimumFractionDigitsBits::update(hints, digits); set_flags(hints); } -inline JSNumberFormat::Style JSNumberFormat::style() const { - return StyleBits::decode(flags()); +inline int JSNumberFormat::maximum_fraction_digits() const { + return MaximumFractionDigitsBits::decode(flags()); } -inline void JSNumberFormat::set_currency_display( - CurrencyDisplay currency_display) { - DCHECK_LT(currency_display, CurrencyDisplay::COUNT); +inline void JSNumberFormat::set_maximum_fraction_digits(int digits) { + DCHECK_GE(MaximumFractionDigitsBits::kMax, digits); + DCHECK_LE(0, digits); + DCHECK_GE(20, digits); int hints = flags(); - hints = CurrencyDisplayBits::update(hints, currency_display); + hints = MaximumFractionDigitsBits::update(hints, digits); set_flags(hints); } -inline JSNumberFormat::CurrencyDisplay JSNumberFormat::currency_display() - const { - return CurrencyDisplayBits::decode(flags()); -} - CAST_ACCESSOR(JSNumberFormat) } // namespace internal diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc index c490eeef57..67d545e0be 100644 --- a/deps/v8/src/objects/js-number-format.cc +++ b/deps/v8/src/objects/js-number-format.cc @@ -11,34 +11,269 @@ #include #include -#include "src/isolate.h" -#include "src/objects-inl.h" +#include "src/execution/isolate.h" #include "src/objects/intl-objects.h" #include "src/objects/js-number-format-inl.h" +#include "src/objects/objects-inl.h" +#include "unicode/currunit.h" #include "unicode/decimfmt.h" #include "unicode/locid.h" +#include "unicode/nounit.h" +#include "unicode/numberformatter.h" #include "unicode/numfmt.h" +#include "unicode/ucurr.h" #include "unicode/uloc.h" +#include "unicode/unumberformatter.h" +#include "unicode/uvernum.h" // for U_ICU_VERSION_MAJOR_NUM namespace v8 { namespace internal { namespace { -UNumberFormatStyle ToNumberFormatStyle( - JSNumberFormat::CurrencyDisplay currency_display) { +// [[Style]] is one of the values "decimal", "percent", "currency", +// or "unit" identifying the style of the number format. +// Note: "unit" is added in proposal-unified-intl-numberformat +enum class Style { + DECIMAL, + PERCENT, + CURRENCY, + UNIT, +}; + +// [[CurrencyDisplay]] is one of the values "code", "symbol", "name", +// or "narrow-symbol" identifying the display of the currency number format. +// Note: "narrow-symbol" is added in proposal-unified-intl-numberformat +enum class CurrencyDisplay { + CODE, + SYMBOL, + NAME, + NARROW_SYMBOL, +}; + +// [[CurrencySign]] is one of the String values "standard" or "accounting", +// specifying whether to render negative numbers in accounting format, often +// signified by parenthesis. It is only used when [[Style]] has the value +// "currency" and when [[SignDisplay]] is not "never". +enum class CurrencySign { + STANDARD, + ACCOUNTING, +}; + +// [[UnitDisplay]] is one of the String values "short", "narrow", or "long", +// specifying whether to display the unit as a symbol, narrow symbol, or +// localized long name if formatting with the "unit" or "percent" style. It is +// only used when [[Style]] has the value "unit" or "percent". +enum class UnitDisplay { + SHORT, + NARROW, + LONG, +}; + +// [[Notation]] is one of the String values "standard", "scientific", +// "engineering", or "compact", specifying whether the number should be +// displayed without scaling, scaled to the units place with the power of ten +// in scientific notation, scaled to the nearest thousand with the power of +// ten in scientific notation, or scaled to the nearest locale-dependent +// compact decimal notation power of ten with the corresponding compact +// decimal notation affix. + +enum class Notation { + STANDARD, + SCIENTIFIC, + ENGINEERING, + COMPACT, +}; + +// [[CompactDisplay]] is one of the String values "short" or "long", +// specifying whether to display compact notation affixes in short form ("5K") +// or long form ("5 thousand") if formatting with the "compact" notation. It +// is only used when [[Notation]] has the value "compact". +enum class CompactDisplay { + SHORT, + LONG, +}; + +// [[SignDisplay]] is one of the String values "auto", "always", "never", or +// "except-zero", specifying whether to show the sign on negative numbers +// only, positive and negative numbers including zero, neither positive nor +// negative numbers, or positive and negative numbers but not zero. +enum class SignDisplay { + AUTO, + ALWAYS, + NEVER, + EXCEPT_ZERO, +}; + +UNumberUnitWidth ToUNumberUnitWidth(CurrencyDisplay currency_display) { switch (currency_display) { - case JSNumberFormat::CurrencyDisplay::SYMBOL: - return UNUM_CURRENCY; - case JSNumberFormat::CurrencyDisplay::CODE: - return UNUM_CURRENCY_ISO; - case JSNumberFormat::CurrencyDisplay::NAME: - return UNUM_CURRENCY_PLURAL; - case JSNumberFormat::CurrencyDisplay::COUNT: - UNREACHABLE(); + case CurrencyDisplay::SYMBOL: + return UNumberUnitWidth::UNUM_UNIT_WIDTH_SHORT; + case CurrencyDisplay::CODE: + return UNumberUnitWidth::UNUM_UNIT_WIDTH_ISO_CODE; + case CurrencyDisplay::NAME: + return UNumberUnitWidth::UNUM_UNIT_WIDTH_FULL_NAME; + case CurrencyDisplay::NARROW_SYMBOL: + return UNumberUnitWidth::UNUM_UNIT_WIDTH_NARROW; + } +} + +UNumberUnitWidth ToUNumberUnitWidth(UnitDisplay unit_display) { + switch (unit_display) { + case UnitDisplay::SHORT: + return UNumberUnitWidth::UNUM_UNIT_WIDTH_SHORT; + case UnitDisplay::LONG: + return UNumberUnitWidth::UNUM_UNIT_WIDTH_FULL_NAME; + case UnitDisplay::NARROW: + return UNumberUnitWidth::UNUM_UNIT_WIDTH_NARROW; + } +} + +UNumberSignDisplay ToUNumberSignDisplay(SignDisplay sign_display, + CurrencySign currency_sign) { + switch (sign_display) { + case SignDisplay::AUTO: + if (currency_sign == CurrencySign::ACCOUNTING) { + return UNumberSignDisplay::UNUM_SIGN_ACCOUNTING; + } + DCHECK(currency_sign == CurrencySign::STANDARD); + return UNumberSignDisplay::UNUM_SIGN_AUTO; + case SignDisplay::NEVER: + return UNumberSignDisplay::UNUM_SIGN_NEVER; + case SignDisplay::ALWAYS: + if (currency_sign == CurrencySign::ACCOUNTING) { + return UNumberSignDisplay::UNUM_SIGN_ACCOUNTING_ALWAYS; + } + DCHECK(currency_sign == CurrencySign::STANDARD); + return UNumberSignDisplay::UNUM_SIGN_ALWAYS; + case SignDisplay::EXCEPT_ZERO: + if (currency_sign == CurrencySign::ACCOUNTING) { + return UNumberSignDisplay::UNUM_SIGN_ACCOUNTING_EXCEPT_ZERO; + } + DCHECK(currency_sign == CurrencySign::STANDARD); + return UNumberSignDisplay::UNUM_SIGN_EXCEPT_ZERO; + } +} + +icu::number::Notation ToICUNotation(Notation notation, + CompactDisplay compact_display) { + switch (notation) { + case Notation::STANDARD: + return icu::number::Notation::simple(); + case Notation::SCIENTIFIC: + return icu::number::Notation::scientific(); + case Notation::ENGINEERING: + return icu::number::Notation::engineering(); + case Notation::COMPACT: + if (compact_display == CompactDisplay::SHORT) { + return icu::number::Notation::compactShort(); + } + DCHECK(compact_display == CompactDisplay::LONG); + return icu::number::Notation::compactLong(); } } +std::map CreateUnitMap() { + UErrorCode status = U_ZERO_ERROR; + int32_t total = icu::MeasureUnit::getAvailable(nullptr, 0, status); + CHECK(U_FAILURE(status)); + status = U_ZERO_ERROR; + // See the list in ecma402 #sec-issanctionedsimpleunitidentifier + std::set sanctioned( + {"acre", "bit", "byte", "celsius", + "centimeter", "day", "degree", "fahrenheit", + "foot", "gigabit", "gigabyte", "gram", + "hectare", "hour", "inch", "kilobit", + "kilobyte", "kilogram", "kilometer", "megabit", + "megabyte", "meter", "mile", "mile-scandinavian", + "millimeter", "millisecond", "minute", "month", + "ounce", "percent", "petabyte", "pound", + "second", "stone", "terabit", "terabyte", + "week", "yard", "year"}); + std::vector units(total); + total = icu::MeasureUnit::getAvailable(units.data(), total, status); + CHECK(U_SUCCESS(status)); + std::map map; + for (auto it = units.begin(); it != units.end(); ++it) { + if (sanctioned.count(it->getSubtype()) > 0) { + map[it->getSubtype()] = *it; + } + } + return map; +} + +class UnitFactory { + public: + UnitFactory() : map_(CreateUnitMap()) {} + virtual ~UnitFactory() {} + + // ecma402 #sec-issanctionedsimpleunitidentifier + icu::MeasureUnit create(const std::string& unitIdentifier) { + // 1. If unitIdentifier is in the following list, return true. + auto found = map_.find(unitIdentifier); + if (found != map_.end()) { + return found->second; + } + // 2. Return false. + return icu::NoUnit::base(); + } + + private: + std::map map_; +}; + +// ecma402 #sec-issanctionedsimpleunitidentifier +icu::MeasureUnit IsSanctionedUnitIdentifier(const std::string& unit) { + static base::LazyInstance::type factory = + LAZY_INSTANCE_INITIALIZER; + return factory.Pointer()->create(unit); +} + +// ecma402 #sec-iswellformedunitidentifier +Maybe> IsWellFormedUnitIdentifier( + Isolate* isolate, const std::string& unit) { + icu::MeasureUnit result = IsSanctionedUnitIdentifier(unit); + icu::MeasureUnit none = icu::NoUnit::base(); + // 1. If the result of IsSanctionedUnitIdentifier(unitIdentifier) is true, + // then + if (result != none) { + // a. Return true. + std::pair pair(result, none); + return Just(pair); + } + // 2. If the substring "-per-" does not occur exactly once in unitIdentifier, + // then + size_t first_per = unit.find("-per-"); + if (first_per == std::string::npos || + unit.find("-per-", first_per + 5) != std::string::npos) { + // a. Return false. + return Nothing>(); + } + // 3. Let numerator be the substring of unitIdentifier from the beginning to + // just before "-per-". + std::string numerator = unit.substr(0, first_per); + + // 4. If the result of IsSanctionedUnitIdentifier(numerator) is false, then + result = IsSanctionedUnitIdentifier(numerator); + if (result == none) { + // a. Return false. + return Nothing>(); + } + // 5. Let denominator be the substring of unitIdentifier from just after + // "-per-" to the end. + std::string denominator = unit.substr(first_per + 5); + + // 6. If the result of IsSanctionedUnitIdentifier(denominator) is false, then + icu::MeasureUnit den_result = IsSanctionedUnitIdentifier(denominator); + if (den_result == none) { + // a. Return false. + return Nothing>(); + } + // 7. Return true. + std::pair pair(result, den_result); + return Just(pair); +} + // ecma-402/#sec-currencydigits // The currency is expected to an all upper case string value. int CurrencyDigits(const icu::UnicodeString& currency) { @@ -69,23 +304,351 @@ bool IsWellFormedCurrencyCode(const std::string& currency) { return (IsAToZ(currency[0]) && IsAToZ(currency[1]) && IsAToZ(currency[2])); } +// Parse the 'style' from the skeleton. +Style StyleFromSkeleton(const icu::UnicodeString& skeleton) { + // Ex: skeleton as + // "percent precision-integer rounding-mode-half-up scale/100" + if (skeleton.indexOf("percent") >= 0 && skeleton.indexOf("scale/100") >= 0) { + return Style::PERCENT; + } + // Ex: skeleton as "currency/TWD .00 rounding-mode-half-up" + if (skeleton.indexOf("currency") >= 0) { + return Style::CURRENCY; + } + // Ex: skeleton as + // "measure-unit/length-meter .### rounding-mode-half-up unit-width-narrow" + // or special case for "percent .### rounding-mode-half-up" + if (skeleton.indexOf("measure-unit") >= 0 || + skeleton.indexOf("percent") >= 0) { + return Style::UNIT; + } + // Ex: skeleton as ".### rounding-mode-half-up" + return Style::DECIMAL; +} + +// Return the style as a String. +Handle StyleAsString(Isolate* isolate, Style style) { + switch (style) { + case Style::PERCENT: + return ReadOnlyRoots(isolate).percent_string_handle(); + case Style::CURRENCY: + return ReadOnlyRoots(isolate).currency_string_handle(); + case Style::UNIT: + return ReadOnlyRoots(isolate).unit_string_handle(); + case Style::DECIMAL: + return ReadOnlyRoots(isolate).decimal_string_handle(); + } + UNREACHABLE(); +} + +// Parse the 'currencyDisplay' from the skeleton. +Handle CurrencyDisplayString(Isolate* isolate, + const icu::UnicodeString& skeleton) { + // Ex: skeleton as + // "currency/TWD .00 rounding-mode-half-up unit-width-iso-code" + if (skeleton.indexOf("unit-width-iso-code") >= 0) { + return ReadOnlyRoots(isolate).code_string_handle(); + } + // Ex: skeleton as + // "currency/TWD .00 rounding-mode-half-up unit-width-full-name;" + if (skeleton.indexOf("unit-width-full-name") >= 0) { + return ReadOnlyRoots(isolate).name_string_handle(); + } + // Ex: skeleton as + // "currency/TWD .00 rounding-mode-half-up unit-width-narrow; + if (skeleton.indexOf("unit-width-narrow") >= 0) { + return ReadOnlyRoots(isolate).narrow_symbol_string_handle(); + } + // Ex: skeleton as "currency/TWD .00 rounding-mode-half-up" + return ReadOnlyRoots(isolate).symbol_string_handle(); +} + +// Return true if there are no "group-off" in the skeleton. +bool UseGroupingFromSkeleton(const icu::UnicodeString& skeleton) { + return skeleton.indexOf("group-off") == -1; +} + +// Parse currency code from skeleton. For example, skeleton as +// "currency/TWD .00 rounding-mode-half-up unit-width-full-name;" +std::string CurrencyFromSkeleton(const icu::UnicodeString& skeleton) { + std::string str; + str = skeleton.toUTF8String(str); + std::string search("currency/"); + size_t index = str.find(search); + if (index == str.npos) return ""; + return str.substr(index + search.size(), 3); +} + +// Return CurrencySign as string based on skeleton. +Handle CurrencySignString(Isolate* isolate, + const icu::UnicodeString& skeleton) { + // Ex: skeleton as + // "currency/TWD .00 rounding-mode-half-up sign-accounting-always" OR + // "currency/TWD .00 rounding-mode-half-up sign-accounting-except-zero" + if (skeleton.indexOf("sign-accounting") >= 0) { + return ReadOnlyRoots(isolate).accounting_string_handle(); + } + return ReadOnlyRoots(isolate).standard_string_handle(); +} + +// Return UnitDisplay as string based on skeleton. +Handle UnitDisplayString(Isolate* isolate, + const icu::UnicodeString& skeleton) { + // Ex: skeleton as + // "measure-unit/length-meter .### rounding-mode-half-up unit-width-full-name" + if (skeleton.indexOf("unit-width-full-name") >= 0) { + return ReadOnlyRoots(isolate).long_string_handle(); + } + // Ex: skeleton as + // "measure-unit/length-meter .### rounding-mode-half-up unit-width-narrow". + if (skeleton.indexOf("unit-width-narrow") >= 0) { + return ReadOnlyRoots(isolate).narrow_string_handle(); + } + // Ex: skeleton as + // "measure-unit/length-foot .### rounding-mode-half-up" + return ReadOnlyRoots(isolate).short_string_handle(); +} + +// Parse Notation from skeleton. +Notation NotationFromSkeleton(const icu::UnicodeString& skeleton) { + // Ex: skeleton as + // "scientific .### rounding-mode-half-up" + if (skeleton.indexOf("scientific") >= 0) { + return Notation::SCIENTIFIC; + } + // Ex: skeleton as + // "engineering .### rounding-mode-half-up" + if (skeleton.indexOf("engineering") >= 0) { + return Notation::ENGINEERING; + } + // Ex: skeleton as + // "compact-short .### rounding-mode-half-up" or + // "compact-long .### rounding-mode-half-up + if (skeleton.indexOf("compact-") >= 0) { + return Notation::COMPACT; + } + // Ex: skeleton as + // "measure-unit/length-foot .### rounding-mode-half-up" + return Notation::STANDARD; +} + +Handle NotationAsString(Isolate* isolate, Notation notation) { + switch (notation) { + case Notation::SCIENTIFIC: + return ReadOnlyRoots(isolate).scientific_string_handle(); + case Notation::ENGINEERING: + return ReadOnlyRoots(isolate).engineering_string_handle(); + case Notation::COMPACT: + return ReadOnlyRoots(isolate).compact_string_handle(); + case Notation::STANDARD: + return ReadOnlyRoots(isolate).standard_string_handle(); + } + UNREACHABLE(); +} + +// Return CompactString as string based on skeleton. +Handle CompactDisplayString(Isolate* isolate, + const icu::UnicodeString& skeleton) { + // Ex: skeleton as + // "compact-long .### rounding-mode-half-up" + if (skeleton.indexOf("compact-long") >= 0) { + return ReadOnlyRoots(isolate).long_string_handle(); + } + // Ex: skeleton as + // "compact-short .### rounding-mode-half-up" + DCHECK_GE(skeleton.indexOf("compact-short"), 0); + return ReadOnlyRoots(isolate).short_string_handle(); +} + +// Return SignDisplay as string based on skeleton. +Handle SignDisplayString(Isolate* isolate, + const icu::UnicodeString& skeleton) { + // Ex: skeleton as + // "currency/TWD .00 rounding-mode-half-up sign-never" + if (skeleton.indexOf("sign-never") >= 0) { + return ReadOnlyRoots(isolate).never_string_handle(); + } + // Ex: skeleton as + // ".### rounding-mode-half-up sign-always" or + // "currency/TWD .00 rounding-mode-half-up sign-accounting-always" + if (skeleton.indexOf("sign-always") >= 0 || + skeleton.indexOf("sign-accounting-always") >= 0) { + return ReadOnlyRoots(isolate).always_string_handle(); + } + // Ex: skeleton as + // "currency/TWD .00 rounding-mode-half-up sign-accounting-except-zero" or + // "currency/TWD .00 rounding-mode-half-up sign-except-zero" + if (skeleton.indexOf("sign-accounting-except-zero") >= 0 || + skeleton.indexOf("sign-except-zero") >= 0) { + return ReadOnlyRoots(isolate).except_zero_string_handle(); + } + return ReadOnlyRoots(isolate).auto_string_handle(); +} + +// Return the minimum integer digits by counting the number of '0' after +// "integer-width/+" in the skeleton. +// Ex: Return 15 for skeleton as +// “currency/TWD .00 rounding-mode-half-up integer-width/+000000000000000” +// 1 +// 123456789012345 +// Return default value as 1 if there are no "integer-width/+". +int32_t MinimumIntegerDigitsFromSkeleton(const icu::UnicodeString& skeleton) { + // count the number of 0 after "integer-width/+" + icu::UnicodeString search("integer-width/+"); + int32_t index = skeleton.indexOf(search); + if (index < 0) return 1; // return 1 if cannot find it. + index += search.length(); + int32_t matched = 0; + while (index < skeleton.length() && skeleton[index] == '0') { + matched++; + index++; + } + CHECK_GT(matched, 0); + return matched; +} + +// Return true if there are fraction digits, false if not. +// The minimum fraction digits is the number of '0' after '.' in the skeleton +// The maximum fraction digits is the number of '#' after the above '0's plus +// the minimum fraction digits. +// For example, as skeleton “.000#### rounding-mode-half-up” +// 123 +// 4567 +// Set The minimum as 3 and maximum as 7. +bool FractionDigitsFromSkeleton(const icu::UnicodeString& skeleton, + int32_t* minimum, int32_t* maximum) { + icu::UnicodeString search("."); + int32_t index = skeleton.indexOf(search); + if (index < 0) return false; + *minimum = 0; + index++; // skip the '.' + while (index < skeleton.length() && skeleton[index] == '0') { + (*minimum)++; + index++; + } + *maximum = *minimum; + while (index < skeleton.length() && skeleton[index] == '#') { + (*maximum)++; + index++; + } + return true; +} + +// Return true if there are significant digits, false if not. +// The minimum significant digits is the number of '@' in the skeleton +// The maximum significant digits is the number of '#' after these '@'s plus +// the minimum significant digits. +// Ex: Skeleton as "@@@@@####### rounding-mode-half-up" +// 12345 +// 6789012 +// Set The minimum as 5 and maximum as 12. +bool SignificantDigitsFromSkeleton(const icu::UnicodeString& skeleton, + int32_t* minimum, int32_t* maximum) { + icu::UnicodeString search("@"); + int32_t index = skeleton.indexOf(search); + if (index < 0) return false; + *minimum = 1; + index++; // skip the first '@' + while (index < skeleton.length() && skeleton[index] == '@') { + (*minimum)++; + index++; + } + *maximum = *minimum; + while (index < skeleton.length() && skeleton[index] == '#') { + (*maximum)++; + index++; + } + return true; +} + +// Ex: percent .### rounding-mode-half-up +// Special case for "percent" +// Ex: "measure-unit/length-kilometer per-measure-unit/duration-hour .### +// rounding-mode-half-up" should return "kilometer-per-unit". +// Ex: "measure-unit/duration-year .### rounding-mode-half-up" should return +// "year". +std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) { + std::string str; + str = skeleton.toUTF8String(str); + // Special case for "percent" first. + if (str.find("percent") != str.npos) { + return "percent"; + } + std::string search("measure-unit/"); + size_t begin = str.find(search); + if (begin == str.npos) { + return ""; + } + // Skip the type (ex: "length"). + // "measure-unit/length-kilometer per-measure-unit/duration-hour" + // b + begin = str.find("-", begin + search.size()); + if (begin == str.npos) { + return ""; + } + begin++; // Skip the '-'. + // Find the end of the subtype. + size_t end = str.find(" ", begin); + // "measure-unit/length-kilometer per-measure-unit/duration-hour" + // b e + if (end == str.npos) { + end = str.size(); + return str.substr(begin, end - begin); + } + // "measure-unit/length-kilometer per-measure-unit/duration-hour" + // b e + // [result ] + std::string result = str.substr(begin, end - begin); + begin = end + 1; + // "measure-unit/length-kilometer per-measure-unit/duration-hour" + // [result ]eb + std::string search_per("per-measure-unit/"); + begin = str.find(search_per, begin); + // "measure-unit/length-kilometer per-measure-unit/duration-hour" + // [result ]e b + if (begin == str.npos) { + return result; + } + // Skip the type (ex: "duration"). + begin = str.find("-", begin + search_per.size()); + // "measure-unit/length-kilometer per-measure-unit/duration-hour" + // [result ]e b + if (begin == str.npos) { + return result; + } + begin++; // Skip the '-'. + // "measure-unit/length-kilometer per-measure-unit/duration-hour" + // [result ]e b + end = str.find(" ", begin); + if (end == str.npos) { + end = str.size(); + } + // "measure-unit/length-kilometer per-measure-unit/duration-hour" + // [result ] b e + return result + "-per-" + str.substr(begin, end - begin); +} + } // anonymous namespace // static // ecma402 #sec-intl.numberformat.prototype.resolvedoptions Handle JSNumberFormat::ResolvedOptions( - Isolate* isolate, Handle number_format_holder) { + Isolate* isolate, Handle number_format) { Factory* factory = isolate->factory(); + UErrorCode status = U_ZERO_ERROR; + icu::number::LocalizedNumberFormatter* icu_number_formatter = + number_format->icu_number_formatter().raw(); + icu::UnicodeString skeleton = icu_number_formatter->toSkeleton(status); + CHECK(U_SUCCESS(status)); + + std::string s_str; + s_str = skeleton.toUTF8String(s_str); + // 4. Let options be ! ObjectCreate(%ObjectPrototype%). Handle options = factory->NewJSObject(isolate->object_function()); - icu::NumberFormat* number_format = - number_format_holder->icu_number_format()->raw(); - CHECK_NOT_NULL(number_format); - - Handle locale = - Handle(number_format_holder->locale(), isolate); + Handle locale = Handle(number_format->locale(), isolate); std::unique_ptr locale_str = locale->ToCString(); icu::Locale icu_locale = Intl::CreateICULocale(locale_str.get()); @@ -117,69 +680,120 @@ Handle JSNumberFormat::ResolvedOptions( Just(kDontThrow)) .FromJust()); } + Style style = StyleFromSkeleton(skeleton); CHECK(JSReceiver::CreateDataProperty( isolate, options, factory->style_string(), - number_format_holder->StyleAsString(), Just(kDontThrow)) + StyleAsString(isolate, style), Just(kDontThrow)) .FromJust()); - if (number_format_holder->style() == Style::CURRENCY) { - icu::UnicodeString currency(number_format->getCurrency()); - DCHECK(!currency.isEmpty()); + std::string currency = CurrencyFromSkeleton(skeleton); + if (!currency.empty()) { CHECK(JSReceiver::CreateDataProperty( isolate, options, factory->currency_string(), - factory - ->NewStringFromTwoByte(Vector( - reinterpret_cast(currency.getBuffer()), - currency.length())) - .ToHandleChecked(), + factory->NewStringFromAsciiChecked(currency.c_str()), Just(kDontThrow)) .FromJust()); CHECK(JSReceiver::CreateDataProperty( isolate, options, factory->currencyDisplay_string(), - number_format_holder->CurrencyDisplayAsString(), Just(kDontThrow)) + CurrencyDisplayString(isolate, skeleton), Just(kDontThrow)) .FromJust()); + if (FLAG_harmony_intl_numberformat_unified) { + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->currencySign_string(), + CurrencySignString(isolate, skeleton), Just(kDontThrow)) + .FromJust()); + } } - CHECK(JSReceiver::CreateDataProperty( - isolate, options, factory->minimumIntegerDigits_string(), - factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()), - Just(kDontThrow)) - .FromJust()); - CHECK( - JSReceiver::CreateDataProperty( - isolate, options, factory->minimumFractionDigits_string(), - factory->NewNumberFromInt(number_format->getMinimumFractionDigits()), - Just(kDontThrow)) - .FromJust()); + + if (FLAG_harmony_intl_numberformat_unified) { + std::string unit = UnitFromSkeleton(skeleton); + if (!unit.empty()) { + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->unit_string(), + isolate->factory()->NewStringFromAsciiChecked(unit.c_str()), + Just(kDontThrow)) + .FromJust()); + } + if (style == Style::UNIT || style == Style::PERCENT) { + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->unitDisplay_string(), + UnitDisplayString(isolate, skeleton), Just(kDontThrow)) + .FromJust()); + } + } + CHECK( JSReceiver::CreateDataProperty( - isolate, options, factory->maximumFractionDigits_string(), - factory->NewNumberFromInt(number_format->getMaximumFractionDigits()), + isolate, options, factory->minimumIntegerDigits_string(), + factory->NewNumberFromInt(MinimumIntegerDigitsFromSkeleton(skeleton)), Just(kDontThrow)) .FromJust()); - CHECK(number_format->getDynamicClassID() == - icu::DecimalFormat::getStaticClassID()); - icu::DecimalFormat* decimal_format = - static_cast(number_format); - CHECK_NOT_NULL(decimal_format); - if (decimal_format->areSignificantDigitsUsed()) { + int32_t minimum = 0, maximum = 0; + bool output_fraction = + FractionDigitsFromSkeleton(skeleton, &minimum, &maximum); + + if (!FLAG_harmony_intl_numberformat_unified && !output_fraction) { + // Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits + // uncondictionally while the unified number proposal eventually will only + // record either (Min|Max)imumFractionDigits or + // (Min|Max)imumSignaficantDigits Since LocalizedNumberFormatter can only + // remember one set, and during 2019-1-17 ECMA402 meeting that the committee + // decide not to take a PR to address that prior to the unified number + // proposal, we have to add these two 5 bits int into flags to remember the + // (Min|Max)imumFractionDigits while (Min|Max)imumSignaficantDigits is + // present. + // TODO(ftang) remove the following two lines once we ship + // int-number-format-unified + output_fraction = true; + minimum = number_format->minimum_fraction_digits(); + maximum = number_format->maximum_fraction_digits(); + } + if (output_fraction) { + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->minimumFractionDigits_string(), + factory->NewNumberFromInt(minimum), Just(kDontThrow)) + .FromJust()); + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->maximumFractionDigits_string(), + factory->NewNumberFromInt(maximum), Just(kDontThrow)) + .FromJust()); + } + minimum = 0; + maximum = 0; + if (SignificantDigitsFromSkeleton(skeleton, &minimum, &maximum)) { CHECK(JSReceiver::CreateDataProperty( isolate, options, factory->minimumSignificantDigits_string(), - factory->NewNumberFromInt( - decimal_format->getMinimumSignificantDigits()), - Just(kDontThrow)) + factory->NewNumberFromInt(minimum), Just(kDontThrow)) .FromJust()); CHECK(JSReceiver::CreateDataProperty( isolate, options, factory->maximumSignificantDigits_string(), - factory->NewNumberFromInt( - decimal_format->getMaximumSignificantDigits()), - Just(kDontThrow)) + factory->NewNumberFromInt(maximum), Just(kDontThrow)) .FromJust()); } + CHECK(JSReceiver::CreateDataProperty( isolate, options, factory->useGrouping_string(), - factory->ToBoolean((number_format->isGroupingUsed() == TRUE)), + factory->ToBoolean(UseGroupingFromSkeleton(skeleton)), Just(kDontThrow)) .FromJust()); + if (FLAG_harmony_intl_numberformat_unified) { + Notation notation = NotationFromSkeleton(skeleton); + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->notation_string(), + NotationAsString(isolate, notation), Just(kDontThrow)) + .FromJust()); + // Only output compactDisplay when notation is compact. + if (notation == Notation::COMPACT) { + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->compactDisplay_string(), + CompactDisplayString(isolate, skeleton), Just(kDontThrow)) + .FromJust()); + } + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->signDisplay_string(), + SignDisplayString(isolate, skeleton), Just(kDontThrow)) + .FromJust()); + } return options; } @@ -189,7 +803,7 @@ MaybeHandle JSNumberFormat::UnwrapNumberFormat( // old code copy from NumberFormat::Unwrap that has no spec comment and // compiled but fail unit tests. Handle native_context = - Handle(isolate->context()->native_context(), isolate); + Handle(isolate->context().native_context(), isolate); Handle constructor = Handle( JSFunction::cast(native_context->intl_number_format_function()), isolate); Handle object; @@ -216,7 +830,6 @@ MaybeHandle JSNumberFormat::UnwrapNumberFormat( MaybeHandle JSNumberFormat::Initialize( Isolate* isolate, Handle number_format, Handle locales, Handle options_obj) { - // set the flags to 0 ASAP. number_format->set_flags(0); Factory* factory = isolate->factory(); @@ -252,6 +865,19 @@ MaybeHandle JSNumberFormat::Initialize( MAYBE_RETURN(maybe_locale_matcher, MaybeHandle()); Intl::MatcherOption matcher = maybe_locale_matcher.FromJust(); + std::unique_ptr numbering_system_str = nullptr; + if (FLAG_harmony_intl_add_calendar_numbering_system) { + // 7. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`, + // `"string"`, *undefined*, *undefined*). + Maybe maybe_numberingSystem = Intl::GetNumberingSystem( + isolate, options, "Intl.RelativeTimeFormat", &numbering_system_str); + // 8. If _numberingSystem_ is not *undefined*, then + // a. If _numberingSystem_ does not match the + // `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError* + // exception. + MAYBE_RETURN(maybe_numberingSystem, MaybeHandle()); + } + // 7. Let localeData be %NumberFormat%.[[LocaleData]]. // 8. Let r be ResolveLocale(%NumberFormat%.[[AvailableLocales]], // requestedLocales, opt, %NumberFormat%.[[RelevantExtensionKeys]], @@ -261,24 +887,43 @@ MaybeHandle JSNumberFormat::Initialize( Intl::ResolveLocale(isolate, JSNumberFormat::GetAvailableLocales(), requested_locales, matcher, relevant_extension_keys); + UErrorCode status = U_ZERO_ERROR; + if (numbering_system_str != nullptr) { + r.icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), + status); + CHECK(U_SUCCESS(status)); + r.locale = Intl::ToLanguageTag(r.icu_locale).FromJust(); + } + // 9. Set numberFormat.[[Locale]] to r.[[locale]]. Handle locale_str = isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str()); number_format->set_locale(*locale_str); // 11. Let dataLocale be r.[[dataLocale]]. - // + + icu::number::LocalizedNumberFormatter icu_number_formatter = + icu::number::NumberFormatter::withLocale(r.icu_locale) + .roundingMode(UNUM_ROUND_HALFUP); + // 12. Let style be ? GetOption(options, "style", "string", « "decimal", // "percent", "currency" », "decimal"). const char* service = "Intl.NumberFormat"; + + std::vector style_str_values({"decimal", "percent", "currency"}); + std::vector