summaryrefslogtreecommitdiff
path: root/deps/v8/src/objects
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-08-01 08:38:30 +0200
committerMichaël Zasso <targos@protonmail.com>2019-08-01 12:53:56 +0200
commit2dcc3665abf57c3607cebffdeeca062f5894885d (patch)
tree4f560748132edcfb4c22d6f967a7e80d23d7ea2c /deps/v8/src/objects
parent1ee47d550c6de132f06110aa13eceb7551d643b3 (diff)
downloadandroid-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.tar.gz
android-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.tar.bz2
android-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.zip
deps: update V8 to 7.6.303.28
PR-URL: https://github.com/nodejs/node/pull/28016 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Refael Ackermann (רפאל פלחי) <refack@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com> Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Diffstat (limited to 'deps/v8/src/objects')
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h22
-rw-r--r--deps/v8/src/objects/allocation-site-scopes-inl.h59
-rw-r--r--deps/v8/src/objects/allocation-site-scopes.h69
-rw-r--r--deps/v8/src/objects/allocation-site.h45
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h10
-rw-r--r--deps/v8/src/objects/arguments-inl.h20
-rw-r--r--deps/v8/src/objects/arguments.h2
-rw-r--r--deps/v8/src/objects/bigint.cc64
-rw-r--r--deps/v8/src/objects/bigint.h8
-rw-r--r--deps/v8/src/objects/cell-inl.h2
-rw-r--r--deps/v8/src/objects/cell.h2
-rw-r--r--deps/v8/src/objects/code-inl.h207
-rw-r--r--deps/v8/src/objects/code.cc143
-rw-r--r--deps/v8/src/objects/code.h27
-rw-r--r--deps/v8/src/objects/compilation-cache-inl.h32
-rw-r--r--deps/v8/src/objects/compilation-cache.h2
-rw-r--r--deps/v8/src/objects/compressed-slots-inl.h28
-rw-r--r--deps/v8/src/objects/contexts-inl.h256
-rw-r--r--deps/v8/src/objects/contexts.cc512
-rw-r--r--deps/v8/src/objects/contexts.h720
-rw-r--r--deps/v8/src/objects/data-handler-inl.h10
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h12
-rw-r--r--deps/v8/src/objects/debug-objects.cc75
-rw-r--r--deps/v8/src/objects/debug-objects.h37
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h34
-rw-r--r--deps/v8/src/objects/descriptor-array.h27
-rw-r--r--deps/v8/src/objects/dictionary-inl.h50
-rw-r--r--deps/v8/src/objects/dictionary.h16
-rw-r--r--deps/v8/src/objects/elements-inl.h38
-rw-r--r--deps/v8/src/objects/elements-kind.cc266
-rw-r--r--deps/v8/src/objects/elements-kind.h317
-rw-r--r--deps/v8/src/objects/elements.cc4798
-rw-r--r--deps/v8/src/objects/elements.h241
-rw-r--r--deps/v8/src/objects/embedder-data-array.cc2
-rw-r--r--deps/v8/src/objects/embedder-data-array.h6
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h8
-rw-r--r--deps/v8/src/objects/embedder-data-slot.h4
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h13
-rw-r--r--deps/v8/src/objects/feedback-cell.h19
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h354
-rw-r--r--deps/v8/src/objects/feedback-vector.cc1420
-rw-r--r--deps/v8/src/objects/feedback-vector.h772
-rw-r--r--deps/v8/src/objects/field-index-inl.h73
-rw-r--r--deps/v8/src/objects/field-index.h127
-rw-r--r--deps/v8/src/objects/field-type.cc87
-rw-r--r--deps/v8/src/objects/field-type.h56
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h523
-rw-r--r--deps/v8/src/objects/fixed-array.h153
-rw-r--r--deps/v8/src/objects/foreign-inl.h8
-rw-r--r--deps/v8/src/objects/foreign.h2
-rw-r--r--deps/v8/src/objects/frame-array-inl.h6
-rw-r--r--deps/v8/src/objects/frame-array.h2
-rw-r--r--deps/v8/src/objects/free-space-inl.h10
-rw-r--r--deps/v8/src/objects/free-space.h2
-rw-r--r--deps/v8/src/objects/function-kind.h194
-rw-r--r--deps/v8/src/objects/hash-table-inl.h8
-rw-r--r--deps/v8/src/objects/hash-table.h6
-rw-r--r--deps/v8/src/objects/heap-number-inl.h18
-rw-r--r--deps/v8/src/objects/heap-object-inl.h11
-rw-r--r--deps/v8/src/objects/heap-object.h14
-rw-r--r--deps/v8/src/objects/instance-type-inl.h15
-rw-r--r--deps/v8/src/objects/instance-type.h40
-rw-r--r--deps/v8/src/objects/intl-objects.cc223
-rw-r--r--deps/v8/src/objects/intl-objects.h34
-rw-r--r--deps/v8/src/objects/intl-objects.tq64
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h71
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc52
-rw-r--r--deps/v8/src/objects/js-array-buffer.h56
-rw-r--r--deps/v8/src/objects/js-array-inl.h10
-rw-r--r--deps/v8/src/objects/js-array.h2
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc10
-rw-r--r--deps/v8/src/objects/js-break-iterator.h2
-rw-r--r--deps/v8/src/objects/js-collator-inl.h2
-rw-r--r--deps/v8/src/objects/js-collator.cc6
-rw-r--r--deps/v8/src/objects/js-collator.h4
-rw-r--r--deps/v8/src/objects/js-collection-inl.h8
-rw-r--r--deps/v8/src/objects/js-collection-iterator.h4
-rw-r--r--deps/v8/src/objects/js-collection.h14
-rw-r--r--deps/v8/src/objects/js-date-time-format-inl.h8
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc349
-rw-r--r--deps/v8/src/objects/js-date-time-format.h15
-rw-r--r--deps/v8/src/objects/js-generator-inl.h2
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h4
-rw-r--r--deps/v8/src/objects/js-list-format.cc39
-rw-r--r--deps/v8/src/objects/js-list-format.h15
-rw-r--r--deps/v8/src/objects/js-locale-inl.h6
-rw-r--r--deps/v8/src/objects/js-locale.cc215
-rw-r--r--deps/v8/src/objects/js-locale.h14
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h50
-rw-r--r--deps/v8/src/objects/js-number-format.cc1151
-rw-r--r--deps/v8/src/objects/js-number-format.h101
-rw-r--r--deps/v8/src/objects/js-objects-inl.h288
-rw-r--r--deps/v8/src/objects/js-objects.cc926
-rw-r--r--deps/v8/src/objects/js-objects.h81
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h8
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc31
-rw-r--r--deps/v8/src/objects/js-plural-rules.h17
-rw-r--r--deps/v8/src/objects/js-promise-inl.h4
-rw-r--r--deps/v8/src/objects/js-proxy-inl.h4
-rw-r--r--deps/v8/src/objects/js-proxy.h2
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h32
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-regexp.h8
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h4
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc56
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h14
-rw-r--r--deps/v8/src/objects/js-segment-iterator-inl.h4
-rw-r--r--deps/v8/src/objects/js-segment-iterator.cc16
-rw-r--r--deps/v8/src/objects/js-segment-iterator.h16
-rw-r--r--deps/v8/src/objects/js-segmenter-inl.h4
-rw-r--r--deps/v8/src/objects/js-segmenter.cc4
-rw-r--r--deps/v8/src/objects/js-segmenter.h16
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h100
-rw-r--r--deps/v8/src/objects/keys.cc1017
-rw-r--r--deps/v8/src/objects/keys.h175
-rw-r--r--deps/v8/src/objects/layout-descriptor-inl.h253
-rw-r--r--deps/v8/src/objects/layout-descriptor.cc288
-rw-r--r--deps/v8/src/objects/layout-descriptor.h175
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h4
-rw-r--r--deps/v8/src/objects/literal-objects.cc50
-rw-r--r--deps/v8/src/objects/lookup-cache-inl.h43
-rw-r--r--deps/v8/src/objects/lookup-cache.cc15
-rw-r--r--deps/v8/src/objects/lookup-cache.h60
-rw-r--r--deps/v8/src/objects/lookup-inl.h194
-rw-r--r--deps/v8/src/objects/lookup.cc1215
-rw-r--r--deps/v8/src/objects/lookup.h281
-rw-r--r--deps/v8/src/objects/managed.h8
-rw-r--r--deps/v8/src/objects/map-inl.h142
-rw-r--r--deps/v8/src/objects/map-updater.cc805
-rw-r--r--deps/v8/src/objects/map-updater.h205
-rw-r--r--deps/v8/src/objects/map.cc672
-rw-r--r--deps/v8/src/objects/map.h83
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h123
-rw-r--r--deps/v8/src/objects/maybe-object.h126
-rw-r--r--deps/v8/src/objects/microtask-inl.h2
-rw-r--r--deps/v8/src/objects/microtask.h2
-rw-r--r--deps/v8/src/objects/module-inl.h16
-rw-r--r--deps/v8/src/objects/module.cc102
-rw-r--r--deps/v8/src/objects/module.h2
-rw-r--r--deps/v8/src/objects/name-inl.h24
-rw-r--r--deps/v8/src/objects/name.h6
-rw-r--r--deps/v8/src/objects/object-list-macros.h270
-rw-r--r--deps/v8/src/objects/object-macros-undef.h32
-rw-r--r--deps/v8/src/objects/object-macros.h248
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h1116
-rw-r--r--deps/v8/src/objects/objects-body-descriptors.h186
-rw-r--r--deps/v8/src/objects/objects-definitions.h400
-rw-r--r--deps/v8/src/objects/objects-inl.h1039
-rw-r--r--deps/v8/src/objects/objects.cc8200
-rw-r--r--deps/v8/src/objects/objects.h836
-rw-r--r--deps/v8/src/objects/oddball-inl.h29
-rw-r--r--deps/v8/src/objects/oddball.h37
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h10
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc204
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h10
-rw-r--r--deps/v8/src/objects/promise.h12
-rw-r--r--deps/v8/src/objects/property-array-inl.h13
-rw-r--r--deps/v8/src/objects/property-array.h6
-rw-r--r--deps/v8/src/objects/property-cell.h2
-rw-r--r--deps/v8/src/objects/property-descriptor-object-inl.h2
-rw-r--r--deps/v8/src/objects/property-descriptor-object.h2
-rw-r--r--deps/v8/src/objects/property-descriptor.cc370
-rw-r--r--deps/v8/src/objects/property-descriptor.h134
-rw-r--r--deps/v8/src/objects/property-details.h409
-rw-r--r--deps/v8/src/objects/property.cc146
-rw-r--r--deps/v8/src/objects/property.h75
-rw-r--r--deps/v8/src/objects/prototype-info-inl.h10
-rw-r--r--deps/v8/src/objects/prototype-info.h2
-rw-r--r--deps/v8/src/objects/prototype-inl.h144
-rw-r--r--deps/v8/src/objects/prototype.h89
-rw-r--r--deps/v8/src/objects/regexp-match-info.h2
-rw-r--r--deps/v8/src/objects/scope-info.cc152
-rw-r--r--deps/v8/src/objects/scope-info.h15
-rw-r--r--deps/v8/src/objects/script-inl.h14
-rw-r--r--deps/v8/src/objects/script.h2
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h186
-rw-r--r--deps/v8/src/objects/shared-function-info.h64
-rw-r--r--deps/v8/src/objects/slots-atomic-inl.h1
-rw-r--r--deps/v8/src/objects/slots-inl.h19
-rw-r--r--deps/v8/src/objects/slots.h4
-rw-r--r--deps/v8/src/objects/smi-inl.h4
-rw-r--r--deps/v8/src/objects/smi.h8
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h8
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc36
-rw-r--r--deps/v8/src/objects/stack-frame-info.h16
-rw-r--r--deps/v8/src/objects/string-comparator.cc2
-rw-r--r--deps/v8/src/objects/string-comparator.h4
-rw-r--r--deps/v8/src/objects/string-inl.h265
-rw-r--r--deps/v8/src/objects/string-table-inl.h31
-rw-r--r--deps/v8/src/objects/string-table.h29
-rw-r--r--deps/v8/src/objects/string.cc349
-rw-r--r--deps/v8/src/objects/string.h126
-rw-r--r--deps/v8/src/objects/struct-inl.h25
-rw-r--r--deps/v8/src/objects/struct.h37
-rw-r--r--deps/v8/src/objects/tagged-impl-inl.h257
-rw-r--r--deps/v8/src/objects/tagged-impl.cc39
-rw-r--r--deps/v8/src/objects/tagged-impl.h181
-rw-r--r--deps/v8/src/objects/tagged-value-inl.h39
-rw-r--r--deps/v8/src/objects/tagged-value.h42
-rw-r--r--deps/v8/src/objects/template-objects-inl.h2
-rw-r--r--deps/v8/src/objects/template-objects.cc16
-rw-r--r--deps/v8/src/objects/template-objects.h13
-rw-r--r--deps/v8/src/objects/templates-inl.h54
-rw-r--r--deps/v8/src/objects/templates.h18
-rw-r--r--deps/v8/src/objects/transitions-inl.h321
-rw-r--r--deps/v8/src/objects/transitions.cc657
-rw-r--r--deps/v8/src/objects/transitions.h350
-rw-r--r--deps/v8/src/objects/type-hints.cc89
-rw-r--r--deps/v8/src/objects/type-hints.h77
-rw-r--r--deps/v8/src/objects/value-serializer.cc2213
-rw-r--r--deps/v8/src/objects/value-serializer.h314
-rw-r--r--deps/v8/src/objects/visitors.cc33
-rw-r--r--deps/v8/src/objects/visitors.h157
214 files changed, 38146 insertions, 4520 deletions
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index ac0a16c944..aaf0105e51 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -46,7 +46,7 @@ void AllocationSite::set_boilerplate(JSObject object, WriteBarrierMode mode) {
int AllocationSite::transition_info() const {
DCHECK(!PointsToLiteral());
- return Smi::cast(transition_info_or_boilerplate())->value();
+ return Smi::cast(transition_info_or_boilerplate()).value();
}
void AllocationSite::set_transition_info(int value) {
@@ -105,9 +105,9 @@ void AllocationSite::SetDoNotInlineCall() {
bool AllocationSite::PointsToLiteral() const {
Object raw_value = transition_info_or_boilerplate();
- DCHECK_EQ(!raw_value->IsSmi(),
- raw_value->IsJSArray() || raw_value->IsJSObject());
- return !raw_value->IsSmi();
+ DCHECK_EQ(!raw_value.IsSmi(),
+ raw_value.IsJSArray() || raw_value.IsJSObject());
+ return !raw_value.IsSmi();
}
// Heuristic: We only need to create allocation site info if the boilerplate
@@ -181,8 +181,8 @@ inline void AllocationSite::IncrementMementoCreateCount() {
}
bool AllocationMemento::IsValid() const {
- return allocation_site()->IsAllocationSite() &&
- !AllocationSite::cast(allocation_site())->IsZombie();
+ return allocation_site().IsAllocationSite() &&
+ !AllocationSite::cast(allocation_site()).IsZombie();
}
AllocationSite AllocationMemento::GetAllocationSite() const {
@@ -191,7 +191,7 @@ AllocationSite AllocationMemento::GetAllocationSite() const {
}
Address AllocationMemento::GetAllocationSiteUnchecked() const {
- return allocation_site()->ptr();
+ return allocation_site().ptr();
}
template <AllocationSiteUpdateMode update_or_check>
@@ -200,7 +200,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
Isolate* isolate = site->GetIsolate();
bool result = false;
- if (site->PointsToLiteral() && site->boilerplate()->IsJSArray()) {
+ if (site->PointsToLiteral() && site->boilerplate().IsJSArray()) {
Handle<JSArray> boilerplate(JSArray::cast(site->boilerplate()), isolate);
ElementsKind kind = boilerplate->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
@@ -211,7 +211,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
// If the array is huge, it's not likely to be defined in a local
// function, so we shouldn't make new instances of it very often.
uint32_t length = 0;
- CHECK(boilerplate->length()->ToArrayLength(&length));
+ CHECK(boilerplate->length().ToArrayLength(&length));
if (length <= kMaximumArrayBytesToPretransition) {
if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) {
return true;
@@ -224,7 +224,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
ElementsKindToString(to_kind));
}
JSObject::TransitionElementsKind(boilerplate, to_kind);
- site->dependent_code()->DeoptimizeDependentCodeGroup(
+ site->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
result = true;
}
@@ -244,7 +244,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
ElementsKindToString(to_kind));
}
site->SetElementsKind(to_kind);
- site->dependent_code()->DeoptimizeDependentCodeGroup(
+ site->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
result = true;
}
diff --git a/deps/v8/src/objects/allocation-site-scopes-inl.h b/deps/v8/src/objects/allocation-site-scopes-inl.h
new file mode 100644
index 0000000000..350b243e46
--- /dev/null
+++ b/deps/v8/src/objects/allocation-site-scopes-inl.h
@@ -0,0 +1,59 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ALLOCATION_SITE_SCOPES_INL_H_
+#define V8_OBJECTS_ALLOCATION_SITE_SCOPES_INL_H_
+
+#include "src/objects/allocation-site-scopes.h"
+
+#include "src/objects/allocation-site-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void AllocationSiteContext::InitializeTraversal(Handle<AllocationSite> site) {
+ top_ = site;
+ // {current_} is updated in place to not create unnecessary Handles, hence
+ // we initially need a separate handle.
+ current_ = Handle<AllocationSite>::New(*top_, isolate());
+}
+
+Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() {
+ if (top().is_null()) {
+ InitializeTraversal(top_site_);
+ } else {
+ // Advance current site
+ Object nested_site = current()->nested_site();
+ // Something is wrong if we advance to the end of the list here.
+ update_current_site(AllocationSite::cast(nested_site));
+ }
+ return Handle<AllocationSite>(*current(), isolate());
+}
+
+void AllocationSiteUsageContext::ExitScope(Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) {
+ // This assert ensures that we are pointing at the right sub-object in a
+ // recursive walk of a nested literal.
+ DCHECK(object.is_null() || *object == scope_site->boilerplate());
+}
+
+bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
+ if (activated_ && AllocationSite::CanTrack(object->map().instance_type())) {
+ if (FLAG_allocation_site_pretenuring ||
+ AllocationSite::ShouldTrack(object->GetElementsKind())) {
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating Memento for %s %p\n",
+ object->IsJSArray() ? "JSArray" : "JSObject",
+ reinterpret_cast<void*>(object->ptr()));
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_ALLOCATION_SITE_SCOPES_INL_H_
diff --git a/deps/v8/src/objects/allocation-site-scopes.h b/deps/v8/src/objects/allocation-site-scopes.h
new file mode 100644
index 0000000000..8f5fb42986
--- /dev/null
+++ b/deps/v8/src/objects/allocation-site-scopes.h
@@ -0,0 +1,69 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ALLOCATION_SITE_SCOPES_H_
+#define V8_OBJECTS_ALLOCATION_SITE_SCOPES_H_
+
+#include "src/handles/handles.h"
+#include "src/objects/allocation-site.h"
+#include "src/objects/map.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// AllocationSiteContext is the base class for walking and copying a nested
+// boilerplate with AllocationSite and AllocationMemento support.
+class AllocationSiteContext {
+ public:
+ explicit AllocationSiteContext(Isolate* isolate) { isolate_ = isolate; }
+
+ Handle<AllocationSite> top() { return top_; }
+ Handle<AllocationSite> current() { return current_; }
+
+ bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
+
+ Isolate* isolate() { return isolate_; }
+
+ protected:
+ void update_current_site(AllocationSite site) {
+ *(current_.location()) = site.ptr();
+ }
+
+ inline void InitializeTraversal(Handle<AllocationSite> site);
+
+ private:
+ Isolate* isolate_;
+ Handle<AllocationSite> top_;
+ Handle<AllocationSite> current_;
+};
+
+// AllocationSiteUsageContext aids in the creation of AllocationMementos placed
+// behind some/all components of a copied object literal.
+class AllocationSiteUsageContext : public AllocationSiteContext {
+ public:
+ AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site,
+ bool activated)
+ : AllocationSiteContext(isolate),
+ top_site_(site),
+ activated_(activated) {}
+
+ inline Handle<AllocationSite> EnterNewScope();
+
+ inline void ExitScope(Handle<AllocationSite> scope_site,
+ Handle<JSObject> object);
+
+ inline bool ShouldCreateMemento(Handle<JSObject> object);
+
+ static const bool kCopying = true;
+
+ private:
+ Handle<AllocationSite> top_site_;
+ bool activated_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_ALLOCATION_SITE_SCOPES_H_
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
index b221bd02dd..9289a83f70 100644
--- a/deps/v8/src/objects/allocation-site.h
+++ b/deps/v8/src/objects/allocation-site.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_ALLOCATION_SITE_H_
#define V8_OBJECTS_ALLOCATION_SITE_H_
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
@@ -134,25 +134,25 @@ class AllocationSite : public Struct {
static bool ShouldTrack(ElementsKind from, ElementsKind to);
static inline bool CanTrack(InstanceType type);
-// Layout description.
-// AllocationSite has to start with TransitionInfoOrboilerPlateOffset
-// and end with WeakNext field.
-#define ALLOCATION_SITE_FIELDS(V) \
- V(kStartOffset, 0) \
- V(kTransitionInfoOrBoilerplateOffset, kTaggedSize) \
- V(kNestedSiteOffset, kTaggedSize) \
- V(kDependentCodeOffset, kTaggedSize) \
- V(kCommonPointerFieldEndOffset, 0) \
- V(kPretenureDataOffset, kInt32Size) \
- V(kPretenureCreateCountOffset, kInt32Size) \
- /* Size of AllocationSite without WeakNext field */ \
- V(kSizeWithoutWeakNext, 0) \
- V(kWeakNextOffset, kTaggedSize) \
- /* Size of AllocationSite with WeakNext field */ \
- V(kSizeWithWeakNext, 0)
+ // Layout description.
+ // AllocationSite has to start with TransitionInfoOrboilerPlateOffset
+ // and end with WeakNext field.
+ #define ALLOCATION_SITE_FIELDS(V) \
+ V(kStartOffset, 0) \
+ V(kTransitionInfoOrBoilerplateOffset, kTaggedSize) \
+ V(kNestedSiteOffset, kTaggedSize) \
+ V(kDependentCodeOffset, kTaggedSize) \
+ V(kCommonPointerFieldEndOffset, 0) \
+ V(kPretenureDataOffset, kInt32Size) \
+ V(kPretenureCreateCountOffset, kInt32Size) \
+ /* Size of AllocationSite without WeakNext field */ \
+ V(kSizeWithoutWeakNext, 0) \
+ V(kWeakNextOffset, kTaggedSize) \
+ /* Size of AllocationSite with WeakNext field */ \
+ V(kSizeWithWeakNext, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ALLOCATION_SITE_FIELDS)
-#undef ALLOCATION_SITE_FIELDS
+ #undef ALLOCATION_SITE_FIELDS
class BodyDescriptor;
@@ -164,14 +164,9 @@ class AllocationSite : public Struct {
class AllocationMemento : public Struct {
public:
-// Layout description.
-#define ALLOCATION_MEMENTO_FIELDS(V) \
- V(kAllocationSiteOffset, kTaggedSize) \
- V(kSize, 0)
-
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- ALLOCATION_MEMENTO_FIELDS)
-#undef ALLOCATION_MEMENTO_FIELDS
+ TORQUE_GENERATED_ALLOCATION_MEMENTO_FIELDS)
DECL_ACCESSORS(allocation_site, Object)
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index 7cd08f7052..041247637a 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -46,7 +46,7 @@ bool AccessorInfo::has_getter() {
bool result = getter() != Smi::kZero;
DCHECK_EQ(result,
getter() != Smi::kZero &&
- Foreign::cast(getter())->foreign_address() != kNullAddress);
+ Foreign::cast(getter()).foreign_address() != kNullAddress);
return result;
}
@@ -54,7 +54,7 @@ bool AccessorInfo::has_setter() {
bool result = setter() != Smi::kZero;
DCHECK_EQ(result,
setter() != Smi::kZero &&
- Foreign::cast(setter())->foreign_address() != kNullAddress);
+ Foreign::cast(setter()).foreign_address() != kNullAddress);
return result;
}
@@ -88,13 +88,13 @@ BIT_FIELD_ACCESSORS(AccessorInfo, flags, initial_property_attributes,
bool AccessorInfo::IsCompatibleReceiver(Object receiver) {
if (!HasExpectedReceiverType()) return true;
- if (!receiver->IsJSObject()) return false;
+ if (!receiver.IsJSObject()) return false;
return FunctionTemplateInfo::cast(expected_receiver_type())
- ->IsTemplateFor(JSObject::cast(receiver)->map());
+ .IsTemplateFor(JSObject::cast(receiver).map());
}
bool AccessorInfo::HasExpectedReceiverType() {
- return expected_receiver_type()->IsFunctionTemplateInfo();
+ return expected_receiver_type().IsFunctionTemplateInfo();
}
ACCESSORS(AccessCheckInfo, callback, Object, kCallbackOffset)
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index 4132aec04d..c2ef59a896 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -7,10 +7,10 @@
#include "src/objects/arguments.h"
-#include "src/contexts-inl.h"
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/objects/contexts-inl.h"
#include "src/objects/fixed-array-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -62,23 +62,23 @@ bool JSSloppyArgumentsObject::GetSloppyArgumentsLength(Isolate* isolate,
int* out) {
Context context = *isolate->native_context();
Map map = object->map();
- if (map != context->sloppy_arguments_map() &&
- map != context->strict_arguments_map() &&
- map != context->fast_aliased_arguments_map()) {
+ if (map != context.sloppy_arguments_map() &&
+ map != context.strict_arguments_map() &&
+ map != context.fast_aliased_arguments_map()) {
return false;
}
DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
Object len_obj =
object->InObjectPropertyAt(JSArgumentsObjectWithLength::kLengthIndex);
- if (!len_obj->IsSmi()) return false;
+ if (!len_obj.IsSmi()) return false;
*out = Max(0, Smi::ToInt(len_obj));
FixedArray parameters = FixedArray::cast(object->elements());
if (object->HasSloppyArgumentsElements()) {
- FixedArray arguments = FixedArray::cast(parameters->get(1));
- return *out <= arguments->length();
+ FixedArray arguments = FixedArray::cast(parameters.get(1));
+ return *out <= arguments.length();
}
- return *out <= parameters->length();
+ return *out <= parameters.length();
}
} // namespace internal
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 720820268c..a1d39f1f36 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -8,7 +8,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/struct.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 7b67aa3ffb..92b78f8821 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -19,14 +19,14 @@
#include "src/objects/bigint.h"
-#include "src/conversions.h"
-#include "src/double.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions.h"
+#include "src/numbers/double.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/instance-type-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -202,11 +202,11 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
}
inline void initialize_bitfield(bool sign, int length) {
int32_t bitfield = LengthBits::encode(length) | SignBits::encode(sign);
- WRITE_INT32_FIELD(*this, kBitfieldOffset, bitfield);
+ WriteField<int32_t>(kBitfieldOffset, bitfield);
}
inline void set_digit(int n, digit_t value) {
SLOW_DCHECK(0 <= n && n < length());
- WRITE_UINTPTR_FIELD(*this, kDigitsOffset + n * kDigitSize, value);
+ WriteField<digit_t>(kDigitsOffset + n * kDigitSize, value);
}
void set_64_bits(uint64_t bits);
@@ -498,7 +498,7 @@ MaybeHandle<BigInt> BigInt::Multiply(Isolate* isolate, Handle<BigInt> x,
work_estimate = 0;
StackLimitCheck interrupt_check(isolate);
if (interrupt_check.InterruptRequested() &&
- isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) {
+ isolate->stack_guard()->HandleInterrupts().IsException(isolate)) {
return MaybeHandle<BigInt>();
}
}
@@ -655,10 +655,10 @@ ComparisonResult BigInt::CompareToBigInt(Handle<BigInt> x, Handle<BigInt> y) {
}
bool BigInt::EqualToBigInt(BigInt x, BigInt y) {
- if (x->sign() != y->sign()) return false;
- if (x->length() != y->length()) return false;
- for (int i = 0; i < x->length(); i++) {
- if (x->digit(i) != y->digit(i)) return false;
+ if (x.sign() != y.sign()) return false;
+ if (x.length() != y.length()) return false;
+ for (int i = 0; i < x.length(); i++) {
+ if (x.digit(i) != y.digit(i)) return false;
}
return true;
}
@@ -979,7 +979,7 @@ MaybeHandle<BigInt> BigInt::FromNumber(Isolate* isolate,
if (number->IsSmi()) {
return MutableBigInt::NewFromInt(isolate, Smi::ToInt(*number));
}
- double value = HeapNumber::cast(*number)->value();
+ double value = HeapNumber::cast(*number).value();
if (!std::isfinite(value) || (DoubleToInteger(value) != value)) {
THROW_NEW_ERROR(isolate,
NewRangeError(MessageTemplate::kBigIntFromNumber, number),
@@ -1311,8 +1311,8 @@ inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
if (result_storage.is_null()) {
result = New(isolate, result_length).ToHandleChecked();
} else {
- DCHECK(result_storage->length() >= result_length);
- result_length = result_storage->length();
+ DCHECK(result_storage.length() >= result_length);
+ result_length = result_storage.length();
}
int i = 0;
for (; i < num_pairs; i++) {
@@ -1428,12 +1428,12 @@ void MutableBigInt::MultiplyAccumulate(Handle<BigIntBase> multiplicand,
void MutableBigInt::InternalMultiplyAdd(BigIntBase source, digit_t factor,
digit_t summand, int n,
MutableBigInt result) {
- DCHECK(source->length() >= n);
- DCHECK(result->length() >= n);
+ DCHECK(source.length() >= n);
+ DCHECK(result.length() >= n);
digit_t carry = summand;
digit_t high = 0;
for (int i = 0; i < n; i++) {
- digit_t current = source->digit(i);
+ digit_t current = source.digit(i);
digit_t new_carry = 0;
// Compute this round's multiplication.
digit_t new_high = 0;
@@ -1442,15 +1442,15 @@ void MutableBigInt::InternalMultiplyAdd(BigIntBase source, digit_t factor,
current = digit_add(current, high, &new_carry);
current = digit_add(current, carry, &new_carry);
// Store result and prepare for next round.
- result->set_digit(i, current);
+ result.set_digit(i, current);
carry = new_carry;
high = new_high;
}
- if (result->length() > n) {
- result->set_digit(n++, carry + high);
+ if (result.length() > n) {
+ result.set_digit(n++, carry + high);
// Current callers don't pass in such large results, but let's be robust.
- while (n < result->length()) {
- result->set_digit(n++, 0);
+ while (n < result.length()) {
+ result.set_digit(n++, 0);
}
} else {
CHECK_EQ(carry + high, 0);
@@ -1601,7 +1601,7 @@ bool MutableBigInt::AbsoluteDivLarge(Isolate* isolate,
work_estimate = 0;
StackLimitCheck interrupt_check(isolate);
if (interrupt_check.InterruptRequested() &&
- isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) {
+ isolate->stack_guard()->HandleInterrupts().IsException(isolate)) {
return false;
}
}
@@ -1949,14 +1949,14 @@ MaybeHandle<BigInt> BigInt::FromSerializedDigits(
void* digits =
reinterpret_cast<void*>(result->ptr() + kDigitsOffset - kHeapObjectTag);
#if defined(V8_TARGET_LITTLE_ENDIAN)
- memcpy(digits, digits_storage.start(), bytelength);
+ memcpy(digits, digits_storage.begin(), bytelength);
void* padding_start =
reinterpret_cast<void*>(reinterpret_cast<Address>(digits) + bytelength);
memset(padding_start, 0, length * kDigitSize - bytelength);
#elif defined(V8_TARGET_BIG_ENDIAN)
digit_t* digit = reinterpret_cast<digit_t*>(digits);
const digit_t* digit_storage =
- reinterpret_cast<const digit_t*>(digits_storage.start());
+ reinterpret_cast<const digit_t*>(digits_storage.begin());
for (int i = 0; i < bytelength / kDigitSize; i++) {
*digit = ByteReverse(*digit_storage);
digit_storage++;
@@ -2146,7 +2146,7 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
if (interrupt_check.InterruptRequested()) {
{
AllowHeapAllocation might_throw;
- if (isolate->stack_guard()->HandleInterrupts()->IsException(
+ if (isolate->stack_guard()->HandleInterrupts().IsException(
isolate)) {
return MaybeHandle<String>();
}
@@ -2156,7 +2156,7 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
chars = result->GetChars(no_gc);
}
if (interrupt_check.InterruptRequested() &&
- isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) {
+ isolate->stack_guard()->HandleInterrupts().IsException(isolate)) {
return MaybeHandle<String>();
}
}
@@ -2457,16 +2457,16 @@ void BigInt::ToWordsArray64(int* sign_bit, int* words64_count,
uint64_t MutableBigInt::GetRawBits(BigIntBase x, bool* lossless) {
if (lossless != nullptr) *lossless = true;
- if (x->is_zero()) return 0;
- int len = x->length();
+ if (x.is_zero()) return 0;
+ int len = x.length();
STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
if (lossless != nullptr && len > 64 / kDigitBits) *lossless = false;
- uint64_t raw = static_cast<uint64_t>(x->digit(0));
+ uint64_t raw = static_cast<uint64_t>(x.digit(0));
if (kDigitBits == 32 && len > 1) {
- raw |= static_cast<uint64_t>(x->digit(1)) << 32;
+ raw |= static_cast<uint64_t>(x.digit(1)) << 32;
}
// Simulate two's complement. MSVC dislikes "-raw".
- return x->sign() ? ((~raw) + 1u) : raw;
+ return x.sign() ? ((~raw) + 1u) : raw;
}
int64_t BigInt::AsInt64(bool* lossless) {
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index e59c7d6982..3f5d35878b 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_BIGINT_H_
#define V8_OBJECTS_BIGINT_H_
-#include "src/globals.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
#include "src/objects/heap-object.h"
-#include "src/utils.h"
+#include "src/objects/objects.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -87,7 +87,7 @@ class BigIntBase : public HeapObject {
inline digit_t digit(int n) const {
SLOW_DCHECK(0 <= n && n < length());
- return READ_UINTPTR_FIELD(*this, kDigitsOffset + n * kDigitSize);
+ return ReadField<digit_t>(kDigitsOffset + n * kDigitSize);
}
bool is_zero() const { return length() == 0; }
diff --git a/deps/v8/src/objects/cell-inl.h b/deps/v8/src/objects/cell-inl.h
index c48a82fd31..90266b7599 100644
--- a/deps/v8/src/objects/cell-inl.h
+++ b/deps/v8/src/objects/cell-inl.h
@@ -8,7 +8,7 @@
#include "src/objects/cell.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h
index c15b31a61c..9c77f5d332 100644
--- a/deps/v8/src/objects/cell.h
+++ b/deps/v8/src/objects/cell.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_CELL_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index a0dc3b3ae1..0877746d11 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -7,16 +7,16 @@
#include "src/objects/code.h"
-#include "src/code-desc.h"
+#include "src/codegen/code-desc.h"
+#include "src/common/v8memory.h"
+#include "src/execution/isolate.h"
#include "src/interpreter/bytecode-register.h"
-#include "src/isolate.h"
#include "src/objects/dictionary.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/smi-inl.h"
-#include "src/v8memory.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -29,7 +29,7 @@ OBJECT_CONSTRUCTORS_IMPL(BytecodeArray, FixedArrayBase)
OBJECT_CONSTRUCTORS_IMPL(AbstractCode, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakFixedArray)
OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache, Tuple2)
+OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache, Struct)
NEVER_READ_ONLY_SPACE_IMPL(AbstractCode)
@@ -42,94 +42,94 @@ CAST_ACCESSOR(DeoptimizationData)
CAST_ACCESSOR(SourcePositionTableWithFrameCache)
ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
- kSourcePositionTableIndex)
+ kSourcePositionTableOffset)
ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
- SimpleNumberDictionary, kStackFrameCacheIndex)
+ SimpleNumberDictionary, kStackFrameCacheOffset)
int AbstractCode::raw_instruction_size() {
if (IsCode()) {
- return GetCode()->raw_instruction_size();
+ return GetCode().raw_instruction_size();
} else {
- return GetBytecodeArray()->length();
+ return GetBytecodeArray().length();
}
}
int AbstractCode::InstructionSize() {
if (IsCode()) {
- return GetCode()->InstructionSize();
+ return GetCode().InstructionSize();
} else {
- return GetBytecodeArray()->length();
+ return GetBytecodeArray().length();
}
}
ByteArray AbstractCode::source_position_table() {
if (IsCode()) {
- return GetCode()->SourcePositionTable();
+ return GetCode().SourcePositionTable();
} else {
- return GetBytecodeArray()->SourcePositionTable();
+ return GetBytecodeArray().SourcePositionTable();
}
}
Object AbstractCode::stack_frame_cache() {
Object maybe_table;
if (IsCode()) {
- maybe_table = GetCode()->source_position_table();
+ maybe_table = GetCode().source_position_table();
} else {
- maybe_table = GetBytecodeArray()->source_position_table();
+ maybe_table = GetBytecodeArray().source_position_table();
}
- if (maybe_table->IsSourcePositionTableWithFrameCache()) {
+ if (maybe_table.IsSourcePositionTableWithFrameCache()) {
return SourcePositionTableWithFrameCache::cast(maybe_table)
- ->stack_frame_cache();
+ .stack_frame_cache();
}
return Smi::kZero;
}
int AbstractCode::SizeIncludingMetadata() {
if (IsCode()) {
- return GetCode()->SizeIncludingMetadata();
+ return GetCode().SizeIncludingMetadata();
} else {
- return GetBytecodeArray()->SizeIncludingMetadata();
+ return GetBytecodeArray().SizeIncludingMetadata();
}
}
int AbstractCode::ExecutableSize() {
if (IsCode()) {
- return GetCode()->ExecutableSize();
+ return GetCode().ExecutableSize();
} else {
- return GetBytecodeArray()->BytecodeArraySize();
+ return GetBytecodeArray().BytecodeArraySize();
}
}
Address AbstractCode::raw_instruction_start() {
if (IsCode()) {
- return GetCode()->raw_instruction_start();
+ return GetCode().raw_instruction_start();
} else {
- return GetBytecodeArray()->GetFirstBytecodeAddress();
+ return GetBytecodeArray().GetFirstBytecodeAddress();
}
}
Address AbstractCode::InstructionStart() {
if (IsCode()) {
- return GetCode()->InstructionStart();
+ return GetCode().InstructionStart();
} else {
- return GetBytecodeArray()->GetFirstBytecodeAddress();
+ return GetBytecodeArray().GetFirstBytecodeAddress();
}
}
Address AbstractCode::raw_instruction_end() {
if (IsCode()) {
- return GetCode()->raw_instruction_end();
+ return GetCode().raw_instruction_end();
} else {
- return GetBytecodeArray()->GetFirstBytecodeAddress() +
- GetBytecodeArray()->length();
+ return GetBytecodeArray().GetFirstBytecodeAddress() +
+ GetBytecodeArray().length();
}
}
Address AbstractCode::InstructionEnd() {
if (IsCode()) {
- return GetCode()->InstructionEnd();
+ return GetCode().InstructionEnd();
} else {
- return GetBytecodeArray()->GetFirstBytecodeAddress() +
- GetBytecodeArray()->length();
+ return GetBytecodeArray().GetFirstBytecodeAddress() +
+ GetBytecodeArray().length();
}
}
@@ -139,7 +139,7 @@ bool AbstractCode::contains(Address inner_pointer) {
AbstractCode::Kind AbstractCode::kind() {
if (IsCode()) {
- return static_cast<AbstractCode::Kind>(GetCode()->kind());
+ return static_cast<AbstractCode::Kind>(GetCode().kind());
} else {
return INTERPRETED_FUNCTION;
}
@@ -236,26 +236,26 @@ void Code::clear_padding() {
ByteArray Code::SourcePositionTableIfCollected() const {
ReadOnlyRoots roots = GetReadOnlyRoots();
Object maybe_table = source_position_table();
- if (maybe_table->IsUndefined(roots) || maybe_table->IsException(roots))
+ if (maybe_table.IsUndefined(roots) || maybe_table.IsException(roots))
return roots.empty_byte_array();
return SourcePositionTable();
}
ByteArray Code::SourcePositionTable() const {
Object maybe_table = source_position_table();
- DCHECK(!maybe_table->IsUndefined() && !maybe_table->IsException());
- if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ DCHECK(!maybe_table.IsUndefined() && !maybe_table.IsException());
+ if (maybe_table.IsByteArray()) return ByteArray::cast(maybe_table);
+ DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
return SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table();
+ .source_position_table();
}
Object Code::next_code_link() const {
- return code_data_container()->next_code_link();
+ return code_data_container().next_code_link();
}
void Code::set_next_code_link(Object value) {
- code_data_container()->set_next_code_link(value);
+ code_data_container().set_next_code_link(value);
}
int Code::InstructionSize() const {
@@ -297,13 +297,12 @@ int Code::GetUnwindingInfoSizeOffset() const {
int Code::unwinding_info_size() const {
DCHECK(has_unwinding_info());
- return static_cast<int>(
- READ_UINT64_FIELD(*this, GetUnwindingInfoSizeOffset()));
+ return static_cast<int>(ReadField<uint64_t>(GetUnwindingInfoSizeOffset()));
}
void Code::set_unwinding_info_size(int value) {
DCHECK(has_unwinding_info());
- WRITE_UINT64_FIELD(*this, GetUnwindingInfoSizeOffset(), value);
+ WriteField<uint64_t>(GetUnwindingInfoSizeOffset(), value);
}
Address Code::unwinding_info_start() const {
@@ -326,8 +325,8 @@ int Code::body_size() const {
int Code::SizeIncludingMetadata() const {
int size = CodeSize();
- size += relocation_info()->Size();
- size += deoptimization_data()->Size();
+ size += relocation_info().Size();
+ size += deoptimization_data().Size();
return size;
}
@@ -336,15 +335,15 @@ ByteArray Code::unchecked_relocation_info() const {
}
byte* Code::relocation_start() const {
- return unchecked_relocation_info()->GetDataStartAddress();
+ return unchecked_relocation_info().GetDataStartAddress();
}
byte* Code::relocation_end() const {
- return unchecked_relocation_info()->GetDataEndAddress();
+ return unchecked_relocation_info().GetDataEndAddress();
}
int Code::relocation_size() const {
- return unchecked_relocation_info()->length();
+ return unchecked_relocation_info().length();
}
Address Code::entry() const { return raw_instruction_start(); }
@@ -369,8 +368,8 @@ int Code::ExecutableSize() const {
// static
void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
- DCHECK_EQ(dest->length(), desc.reloc_size);
- CopyBytes(dest->GetDataStartAddress(),
+ DCHECK_EQ(dest.length(), desc.reloc_size);
+ CopyBytes(dest.GetDataStartAddress(),
desc.buffer + desc.buffer_size - desc.reloc_size,
static_cast<size_t>(desc.reloc_size));
}
@@ -378,7 +377,7 @@ void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
int Code::CodeSize() const { return SizeFor(body_size()); }
Code::Kind Code::kind() const {
- return KindField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
+ return KindField::decode(ReadField<uint32_t>(kFlagsOffset));
}
void Code::initialize_flags(Kind kind, bool has_unwinding_info,
@@ -391,7 +390,7 @@ void Code::initialize_flags(Kind kind, bool has_unwinding_info,
IsTurbofannedField::encode(is_turbofanned) |
StackSlotsField::encode(stack_slots) |
IsOffHeapTrampoline::encode(is_off_heap_trampoline);
- WRITE_UINT32_FIELD(*this, kFlagsOffset, flags);
+ WriteField<uint32_t>(kFlagsOffset, flags);
DCHECK_IMPLIES(stack_slots != 0, has_safepoint_info());
}
@@ -417,54 +416,54 @@ inline bool Code::has_tagged_params() const {
}
inline bool Code::has_unwinding_info() const {
- return HasUnwindingInfoField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
+ return HasUnwindingInfoField::decode(ReadField<uint32_t>(kFlagsOffset));
}
inline bool Code::is_turbofanned() const {
- return IsTurbofannedField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
+ return IsTurbofannedField::decode(ReadField<uint32_t>(kFlagsOffset));
}
inline bool Code::can_have_weak_objects() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return CanHaveWeakObjectsField::decode(flags);
}
inline void Code::set_can_have_weak_objects(bool value) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = CanHaveWeakObjectsField::update(previous, value);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
inline bool Code::is_promise_rejection() const {
DCHECK(kind() == BUILTIN);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return IsPromiseRejectionField::decode(flags);
}
inline void Code::set_is_promise_rejection(bool value) {
DCHECK(kind() == BUILTIN);
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = IsPromiseRejectionField::update(previous, value);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
inline bool Code::is_exception_caught() const {
DCHECK(kind() == BUILTIN);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return IsExceptionCaughtField::decode(flags);
}
inline void Code::set_is_exception_caught(bool value) {
DCHECK(kind() == BUILTIN);
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = IsExceptionCaughtField::update(previous, value);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
inline bool Code::is_off_heap_trampoline() const {
- return IsOffHeapTrampoline::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
+ return IsOffHeapTrampoline::decode(ReadField<uint32_t>(kFlagsOffset));
}
inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
@@ -474,14 +473,14 @@ inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
}
int Code::builtin_index() const {
- int index = READ_INT_FIELD(*this, kBuiltinIndexOffset);
+ int index = ReadField<int>(kBuiltinIndexOffset);
DCHECK(index == -1 || Builtins::IsBuiltinId(index));
return index;
}
void Code::set_builtin_index(int index) {
DCHECK(index == -1 || Builtins::IsBuiltinId(index));
- WRITE_INT_FIELD(*this, kBuiltinIndexOffset, index);
+ WriteField<int>(kBuiltinIndexOffset, index);
}
bool Code::is_builtin() const { return builtin_index() != -1; }
@@ -492,49 +491,49 @@ bool Code::has_safepoint_info() const {
int Code::stack_slots() const {
DCHECK(has_safepoint_info());
- return StackSlotsField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
+ return StackSlotsField::decode(ReadField<uint32_t>(kFlagsOffset));
}
bool Code::marked_for_deoptimization() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return MarkedForDeoptimizationField::decode(flags);
}
void Code::set_marked_for_deoptimization(bool flag) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = MarkedForDeoptimizationField::update(previous, flag);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
bool Code::embedded_objects_cleared() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return EmbeddedObjectsClearedField::decode(flags);
}
void Code::set_embedded_objects_cleared(bool flag) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
DCHECK_IMPLIES(flag, marked_for_deoptimization());
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = EmbeddedObjectsClearedField::update(previous, flag);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
bool Code::deopt_already_counted() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return DeoptAlreadyCountedField::decode(flags);
}
void Code::set_deopt_already_counted(bool flag) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = DeoptAlreadyCountedField::update(previous, flag);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
bool Code::is_optimized_code() const { return kind() == OPTIMIZED_FUNCTION; }
@@ -542,13 +541,13 @@ bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; }
int Code::constant_pool_offset() const {
if (!FLAG_enable_embedded_constant_pool) return code_comments_offset();
- return READ_INT_FIELD(*this, kConstantPoolOffsetOffset);
+ return ReadField<int>(kConstantPoolOffsetOffset);
}
void Code::set_constant_pool_offset(int value) {
if (!FLAG_enable_embedded_constant_pool) return;
DCHECK_LE(value, InstructionSize());
- WRITE_INT_FIELD(*this, kConstantPoolOffsetOffset, value);
+ WriteField<int>(kConstantPoolOffsetOffset, value);
}
Address Code::constant_pool() const {
@@ -592,10 +591,10 @@ bool Code::IsWeakObject(HeapObject object) {
}
bool Code::IsWeakObjectInOptimizedCode(HeapObject object) {
- Map map = object->synchronized_map();
- InstanceType instance_type = map->instance_type();
+ Map map = object.synchronized_map();
+ InstanceType instance_type = map.instance_type();
if (InstanceTypeChecker::IsMap(instance_type)) {
- return Map::cast(object)->CanTransition();
+ return Map::cast(object).CanTransition();
}
return InstanceTypeChecker::IsPropertyCell(instance_type) ||
InstanceTypeChecker::IsJSReceiver(instance_type) ||
@@ -615,22 +614,22 @@ void CodeDataContainer::clear_padding() {
byte BytecodeArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize);
+ return ReadField<byte>(kHeaderSize + index * kCharSize);
}
void BytecodeArray::set(int index, byte value) {
DCHECK(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize, value);
+ WriteField<byte>(kHeaderSize + index * kCharSize, value);
}
void BytecodeArray::set_frame_size(int frame_size) {
DCHECK_GE(frame_size, 0);
DCHECK(IsAligned(frame_size, kSystemPointerSize));
- WRITE_INT_FIELD(*this, kFrameSizeOffset, frame_size);
+ WriteField<int>(kFrameSizeOffset, frame_size);
}
int BytecodeArray::frame_size() const {
- return READ_INT_FIELD(*this, kFrameSizeOffset);
+ return ReadField<int>(kFrameSizeOffset);
}
int BytecodeArray::register_count() const {
@@ -641,14 +640,14 @@ void BytecodeArray::set_parameter_count(int number_of_parameters) {
DCHECK_GE(number_of_parameters, 0);
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- WRITE_INT_FIELD(*this, kParameterSizeOffset,
+ WriteField<int>(kParameterSizeOffset,
(number_of_parameters << kSystemPointerSizeLog2));
}
interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
const {
int register_operand =
- READ_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset);
+ ReadField<int>(kIncomingNewTargetOrGeneratorRegisterOffset);
if (register_operand == 0) {
return interpreter::Register::invalid_value();
} else {
@@ -659,24 +658,24 @@ interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
void BytecodeArray::set_incoming_new_target_or_generator_register(
interpreter::Register incoming_new_target_or_generator_register) {
if (!incoming_new_target_or_generator_register.is_valid()) {
- WRITE_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset, 0);
+ WriteField<int>(kIncomingNewTargetOrGeneratorRegisterOffset, 0);
} else {
DCHECK(incoming_new_target_or_generator_register.index() <
register_count());
DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand());
- WRITE_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset,
+ WriteField<int>(kIncomingNewTargetOrGeneratorRegisterOffset,
incoming_new_target_or_generator_register.ToOperand());
}
}
int BytecodeArray::osr_loop_nesting_level() const {
- return READ_INT8_FIELD(*this, kOSRNestingLevelOffset);
+ return ReadField<int8_t>(kOSRNestingLevelOffset);
}
void BytecodeArray::set_osr_loop_nesting_level(int depth) {
DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
- WRITE_INT8_FIELD(*this, kOSRNestingLevelOffset, depth);
+ WriteField<int8_t>(kOSRNestingLevelOffset, depth);
}
BytecodeArray::Age BytecodeArray::bytecode_age() const {
@@ -695,7 +694,7 @@ void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
int BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- return READ_INT_FIELD(*this, kParameterSizeOffset) >> kSystemPointerSizeLog2;
+ return ReadField<int>(kParameterSizeOffset) >> kSystemPointerSizeLog2;
}
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
@@ -715,11 +714,11 @@ Address BytecodeArray::GetFirstBytecodeAddress() {
bool BytecodeArray::HasSourcePositionTable() const {
Object maybe_table = source_position_table();
- return !(maybe_table->IsUndefined() || DidSourcePositionGenerationFail());
+ return !(maybe_table.IsUndefined() || DidSourcePositionGenerationFail());
}
bool BytecodeArray::DidSourcePositionGenerationFail() const {
- return source_position_table()->IsException();
+ return source_position_table().IsException();
}
void BytecodeArray::SetSourcePositionsFailedToCollect() {
@@ -728,14 +727,14 @@ void BytecodeArray::SetSourcePositionsFailedToCollect() {
ByteArray BytecodeArray::SourcePositionTable() const {
Object maybe_table = source_position_table();
- if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
+ if (maybe_table.IsByteArray()) return ByteArray::cast(maybe_table);
ReadOnlyRoots roots = GetReadOnlyRoots();
- if (maybe_table->IsException(roots)) return roots.empty_byte_array();
+ if (maybe_table.IsException(roots)) return roots.empty_byte_array();
- DCHECK(!maybe_table->IsUndefined(roots));
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ DCHECK(!maybe_table.IsUndefined(roots));
+ DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
return SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table();
+ .source_position_table();
}
ByteArray BytecodeArray::SourcePositionTableIfCollected() const {
@@ -746,20 +745,20 @@ ByteArray BytecodeArray::SourcePositionTableIfCollected() const {
void BytecodeArray::ClearFrameCacheFromSourcePositionTable() {
Object maybe_table = source_position_table();
- if (maybe_table->IsUndefined() || maybe_table->IsByteArray()) return;
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ if (maybe_table.IsUndefined() || maybe_table.IsByteArray()) return;
+ DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
set_source_position_table(SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table());
+ .source_position_table());
}
int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); }
int BytecodeArray::SizeIncludingMetadata() {
int size = BytecodeArraySize();
- size += constant_pool()->Size();
- size += handler_table()->Size();
+ size += constant_pool().Size();
+ size += handler_table().Size();
if (HasSourcePositionTable()) {
- size += SourcePositionTable()->Size();
+ size += SourcePositionTable().Size();
}
return size;
}
@@ -777,7 +776,7 @@ DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
BailoutId DeoptimizationData::BytecodeOffset(int i) {
- return BailoutId(BytecodeOffsetRaw(i)->value());
+ return BailoutId(BytecodeOffsetRaw(i).value());
}
void DeoptimizationData::SetBytecodeOffset(int i, BailoutId value) {
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index ab929db8a7..89180693a5 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -6,24 +6,24 @@
#include "src/objects/code.h"
-#include "src/assembler-inl.h"
-#include "src/cpu-features.h"
-#include "src/deoptimizer.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/cpu-features.h"
+#include "src/codegen/reloc-info.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/allocation-site-inl.h"
-#include "src/ostreams.h"
-#include "src/reloc-info.h"
-#include "src/roots-inl.h"
-#include "src/safepoint-table.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/roots/roots-inl.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/utils/ostreams.h"
#ifdef ENABLE_DISASSEMBLER
-#include "src/code-comments.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/eh-frame.h"
+#include "src/codegen/code-comments.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/diagnostics/eh-frame.h"
#endif
namespace v8 {
@@ -63,12 +63,10 @@ int Code::ExecutableInstructionSize() const { return safepoint_table_offset(); }
void Code::ClearEmbeddedObjects(Heap* heap) {
HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ int mode_mask = RelocInfo::EmbeddedObjectModeMask();
for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER);
- }
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
+ it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER);
}
set_embedded_objects_cleared(true);
}
@@ -107,7 +105,7 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
const int mode_mask = RelocInfo::PostCodegenRelocationMask();
for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ if (RelocInfo::IsEmbeddedObjectMode(mode)) {
Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
it.rinfo()->set_target_object(heap, *p, UPDATE_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
@@ -116,7 +114,7 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
// code object.
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code code = Code::cast(*p);
- it.rinfo()->set_target_address(code->raw_instruction_start(),
+ it.rinfo()->set_target_address(code.raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
@@ -194,12 +192,12 @@ void AbstractCode::SetStackFrameCache(Handle<AbstractCode> abstract_code,
namespace {
template <typename Code>
void DropStackFrameCacheCommon(Code code) {
- i::Object maybe_table = code->source_position_table();
- if (maybe_table->IsUndefined() || maybe_table->IsByteArray()) return;
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
- code->set_source_position_table(
+ i::Object maybe_table = code.source_position_table();
+ if (maybe_table.IsUndefined() || maybe_table.IsByteArray()) return;
+ DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
+ code.set_source_position_table(
i::SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table());
+ .source_position_table());
}
} // namespace
@@ -213,7 +211,7 @@ void AbstractCode::DropStackFrameCache() {
int AbstractCode::SourcePosition(int offset) {
Object maybe_table = source_position_table();
- if (maybe_table->IsException()) return kNoSourcePosition;
+ if (maybe_table.IsException()) return kNoSourcePosition;
ByteArray source_position_table = ByteArray::cast(maybe_table);
int position = 0;
@@ -259,10 +257,10 @@ bool Code::CanDeoptAt(Address pc) {
DeoptimizationData deopt_data =
DeoptimizationData::cast(deoptimization_data());
Address code_start_address = InstructionStart();
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- Address address = code_start_address + deopt_data->Pc(i)->value();
- if (address == pc && deopt_data->BytecodeOffset(i) != BailoutId::None()) {
+ for (int i = 0; i < deopt_data.DeoptCount(); i++) {
+ if (deopt_data.Pc(i).value() == -1) continue;
+ Address address = code_start_address + deopt_data.Pc(i).value();
+ if (address == pc && deopt_data.BytecodeOffset(i) != BailoutId::None()) {
return true;
}
}
@@ -302,7 +300,8 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
STATIC_ASSERT(mode_mask ==
(RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
@@ -324,7 +323,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue;
Code target = Code::GetCodeFromTargetAddress(target_address);
- CHECK(target->IsCode());
+ CHECK(target.IsCode());
if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
}
#endif
@@ -340,12 +339,12 @@ bool Code::Inlines(SharedFunctionInfo sfi) {
DisallowHeapAllocation no_gc;
DeoptimizationData const data =
DeoptimizationData::cast(deoptimization_data());
- if (data->length() == 0) return false;
- if (data->SharedFunctionInfo() == sfi) return true;
- FixedArray const literals = data->LiteralArray();
- int const inlined_count = data->InlinedFunctionCount()->value();
+ if (data.length() == 0) return false;
+ if (data.SharedFunctionInfo() == sfi) return true;
+ FixedArray const literals = data.LiteralArray();
+ int const inlined_count = data.InlinedFunctionCount().value();
for (int i = 0; i < inlined_count; ++i) {
- if (SharedFunctionInfo::cast(literals->get(i)) == sfi) return true;
+ if (SharedFunctionInfo::cast(literals.get(i)) == sfi) return true;
}
return false;
}
@@ -353,7 +352,7 @@ bool Code::Inlines(SharedFunctionInfo sfi) {
Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
isolate_ = isolate;
Object list = isolate->heap()->native_contexts_list();
- next_context_ = list->IsUndefined(isolate_) ? Context() : Context::cast(list);
+ next_context_ = list.IsUndefined(isolate_) ? Context() : Context::cast(list);
}
Code Code::OptimizedCodeIterator::Next() {
@@ -361,21 +360,21 @@ Code Code::OptimizedCodeIterator::Next() {
Object next;
if (!current_code_.is_null()) {
// Get next code in the linked list.
- next = current_code_->next_code_link();
+ next = current_code_.next_code_link();
} else if (!next_context_.is_null()) {
// Linked list of code exhausted. Get list of next context.
- next = next_context_->OptimizedCodeListHead();
- Object next_context = next_context_->next_context_link();
- next_context_ = next_context->IsUndefined(isolate_)
+ next = next_context_.OptimizedCodeListHead();
+ Object next_context = next_context_.next_context_link();
+ next_context_ = next_context.IsUndefined(isolate_)
? Context()
: Context::cast(next_context);
} else {
// Exhausted contexts.
return Code();
}
- current_code_ = next->IsUndefined(isolate_) ? Code() : Code::cast(next);
+ current_code_ = next.IsUndefined(isolate_) ? Code() : Code::cast(next);
} while (current_code_.is_null());
- DCHECK_EQ(Code::OPTIMIZED_FUNCTION, current_code_->kind());
+ DCHECK_EQ(Code::OPTIMIZED_FUNCTION, current_code_.kind());
return current_code_;
}
@@ -395,7 +394,7 @@ SharedFunctionInfo DeoptimizationData::GetInlinedFunction(int index) {
if (index == -1) {
return SharedFunctionInfo::cast(SharedFunctionInfo());
} else {
- return SharedFunctionInfo::cast(LiteralArray()->get(index));
+ return SharedFunctionInfo::cast(LiteralArray().get(index));
}
}
@@ -428,10 +427,10 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
}
disasm::NameConverter converter;
- int const inlined_function_count = InlinedFunctionCount()->value();
+ int const inlined_function_count = InlinedFunctionCount().value();
os << "Inlined functions (count = " << inlined_function_count << ")\n";
for (int id = 0; id < inlined_function_count; ++id) {
- Object info = LiteralArray()->get(id);
+ Object info = LiteralArray().get(id);
os << " " << Brief(SharedFunctionInfo::cast(info)) << "\n";
}
os << "\n";
@@ -445,7 +444,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
for (int i = 0; i < deopt_count; i++) {
os << std::setw(6) << i << " " << std::setw(15)
<< BytecodeOffset(i).ToInt() << " " << std::setw(4);
- print_pc(os, Pc(i)->value());
+ print_pc(os, Pc(i).value());
os << std::setw(2);
if (!FLAG_print_code_verbose) {
@@ -454,7 +453,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
}
// Print details of the frame translation.
- int translation_index = TranslationIndex(i)->value();
+ int translation_index = TranslationIndex(i).value();
TranslationIterator iterator(TranslationByteArray(), translation_index);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator.Next());
@@ -483,9 +482,9 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
unsigned height = iterator.Next();
int return_value_offset = iterator.Next();
int return_value_count = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray().get(shared_info_id);
os << "{bytecode_offset=" << bytecode_offset << ", function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << Brief(SharedFunctionInfo::cast(shared_info).DebugName())
<< ", height=" << height << ", retval=@" << return_value_offset
<< "(#" << return_value_count << ")}";
break;
@@ -494,10 +493,10 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::CONSTRUCT_STUB_FRAME: {
int bailout_id = iterator.Next();
int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray().get(shared_info_id);
unsigned height = iterator.Next();
os << "{bailout_id=" << bailout_id << ", function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << Brief(SharedFunctionInfo::cast(shared_info).DebugName())
<< ", height=" << height << "}";
break;
}
@@ -507,20 +506,20 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
int bailout_id = iterator.Next();
int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray().get(shared_info_id);
unsigned height = iterator.Next();
os << "{bailout_id=" << bailout_id << ", function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << Brief(SharedFunctionInfo::cast(shared_info).DebugName())
<< ", height=" << height << "}";
break;
}
case Translation::ARGUMENTS_ADAPTOR_FRAME: {
int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray().get(shared_info_id);
unsigned height = iterator.Next();
os << "{function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << Brief(SharedFunctionInfo::cast(shared_info).DebugName())
<< ", height=" << height << "}";
break;
}
@@ -610,7 +609,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::LITERAL: {
int literal_index = iterator.Next();
- Object literal_value = LiteralArray()->get(literal_index);
+ Object literal_value = LiteralArray().get(literal_index);
os << "{literal_id=" << literal_index << " (" << Brief(literal_value)
<< ")}";
break;
@@ -705,7 +704,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
constant_pool_offset());
for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) {
SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
- os << static_cast<const void*>(ptr) << " " << buf.start() << "\n";
+ os << static_cast<const void*>(ptr) << " " << buf.begin() << "\n";
}
}
}
@@ -744,7 +743,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
if (kind() == OPTIMIZED_FUNCTION) {
DeoptimizationData data =
DeoptimizationData::cast(this->deoptimization_data());
- data->DeoptimizationDataPrint(os);
+ data.DeoptimizationDataPrint(os);
}
os << "\n";
@@ -854,16 +853,16 @@ void BytecodeArray::Disassemble(std::ostream& os) {
iterator.Advance();
}
- os << "Constant pool (size = " << constant_pool()->length() << ")\n";
+ os << "Constant pool (size = " << constant_pool().length() << ")\n";
#ifdef OBJECT_PRINT
- if (constant_pool()->length() > 0) {
- constant_pool()->Print();
+ if (constant_pool().length() > 0) {
+ constant_pool().Print();
}
#endif
- os << "Handler Table (size = " << handler_table()->length() << ")\n";
+ os << "Handler Table (size = " << handler_table().length() << ")\n";
#ifdef ENABLE_DISASSEMBLER
- if (handler_table()->length() > 0) {
+ if (handler_table().length() > 0) {
HandlerTable table(*this);
table.HandlerTableRangePrint(os);
}
@@ -872,10 +871,10 @@ void BytecodeArray::Disassemble(std::ostream& os) {
void BytecodeArray::CopyBytecodesTo(BytecodeArray to) {
BytecodeArray from = *this;
- DCHECK_EQ(from->length(), to->length());
- CopyBytes(reinterpret_cast<byte*>(to->GetFirstBytecodeAddress()),
- reinterpret_cast<byte*>(from->GetFirstBytecodeAddress()),
- from->length());
+ DCHECK_EQ(from.length(), to.length());
+ CopyBytes(reinterpret_cast<byte*>(to.GetFirstBytecodeAddress()),
+ reinterpret_cast<byte*>(from.GetFirstBytecodeAddress()),
+ from.length());
}
void BytecodeArray::MakeOlder() {
@@ -1018,7 +1017,7 @@ bool DependentCode::MarkCodeForDeoptimization(
}
if (this->group() < group) {
// The group comes later in the list.
- return next_link()->MarkCodeForDeoptimization(isolate, group);
+ return next_link().MarkCodeForDeoptimization(isolate, group);
}
DCHECK_EQ(group, this->group());
DisallowHeapAllocation no_allocation_scope;
@@ -1029,8 +1028,8 @@ bool DependentCode::MarkCodeForDeoptimization(
MaybeObject obj = object_at(i);
if (obj->IsCleared()) continue;
Code code = Code::cast(obj->GetHeapObjectAssumeWeak());
- if (!code->marked_for_deoptimization()) {
- code->SetMarkedForDeoptimization(DependencyGroupName(group));
+ if (!code.marked_for_deoptimization()) {
+ code.SetMarkedForDeoptimization(DependencyGroupName(group));
marked = true;
}
}
@@ -1061,7 +1060,7 @@ void Code::SetMarkedForDeoptimization(const char* reason) {
PrintF(scope.file(),
"[marking dependent code " V8PRIxPTR_FMT
" (opt #%d) for deoptimization, reason: %s]\n",
- ptr(), deopt_data->OptimizationId()->value(), reason);
+ ptr(), deopt_data.OptimizationId().value(), reason);
}
}
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 647cfebe69..a950261103 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -5,11 +5,11 @@
#ifndef V8_OBJECTS_CODE_H_
#define V8_OBJECTS_CODE_H_
-#include "src/contexts.h"
-#include "src/handler-table.h"
-#include "src/objects.h"
+#include "src/codegen/handler-table.h"
+#include "src/objects/contexts.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
@@ -22,7 +22,6 @@ class ByteArray;
class BytecodeArray;
class CodeDataContainer;
class CodeDesc;
-class MaybeObject;
namespace interpreter {
class Register;
@@ -43,6 +42,7 @@ class Code : public HeapObject {
V(BUILTIN) \
V(REGEXP) \
V(WASM_FUNCTION) \
+ V(WASM_TO_CAPI_FUNCTION) \
V(WASM_TO_JS_FUNCTION) \
V(JS_TO_WASM_FUNCTION) \
V(WASM_INTERPRETER_ENTRY) \
@@ -948,25 +948,22 @@ class DeoptimizationData : public FixedArray {
OBJECT_CONSTRUCTORS(DeoptimizationData, FixedArray);
};
-class SourcePositionTableWithFrameCache : public Tuple2 {
+class SourcePositionTableWithFrameCache : public Struct {
public:
DECL_ACCESSORS(source_position_table, ByteArray)
DECL_ACCESSORS(stack_frame_cache, SimpleNumberDictionary)
DECL_CAST(SourcePositionTableWithFrameCache)
-// Layout description.
-#define SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS(V) \
- V(kSourcePositionTableIndex, kTaggedSize) \
- V(kStackFrameCacheIndex, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
+ DECL_PRINTER(SourcePositionTableWithFrameCache)
+ DECL_VERIFIER(SourcePositionTableWithFrameCache)
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS)
-#undef SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ Struct::kHeaderSize,
+ TORQUE_GENERATED_SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_FIELDS)
- OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache, Tuple2);
+ OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-inl.h
index 18491118ad..81b953a589 100644
--- a/deps/v8/src/objects/compilation-cache-inl.h
+++ b/deps/v8/src/objects/compilation-cache-inl.h
@@ -28,22 +28,22 @@ NEVER_READ_ONLY_SPACE_IMPL(CompilationCacheTable)
CAST_ACCESSOR(CompilationCacheTable)
uint32_t CompilationCacheShape::RegExpHash(String string, Smi flags) {
- return string->Hash() + flags->value();
+ return string.Hash() + flags.value();
}
uint32_t CompilationCacheShape::StringSharedHash(String source,
SharedFunctionInfo shared,
LanguageMode language_mode,
int position) {
- uint32_t hash = source->Hash();
- if (shared->HasSourceCode()) {
+ uint32_t hash = source.Hash();
+ if (shared.HasSourceCode()) {
// Instead of using the SharedFunctionInfo pointer in the hash
// code computation, we use a combination of the hash of the
// script source code and the start position of the calling scope.
// We do this to ensure that the cache entries can survive garbage
// collection.
- Script script(Script::cast(shared->script()));
- hash ^= String::cast(script->source())->Hash();
+ Script script(Script::cast(shared.script()));
+ hash ^= String::cast(script.source()).Hash();
STATIC_ASSERT(LanguageModeSize == 2);
if (is_strict(language_mode)) hash ^= 0x8000;
hash += position;
@@ -53,27 +53,27 @@ uint32_t CompilationCacheShape::StringSharedHash(String source,
uint32_t CompilationCacheShape::HashForObject(ReadOnlyRoots roots,
Object object) {
- if (object->IsNumber()) return static_cast<uint32_t>(object->Number());
+ if (object.IsNumber()) return static_cast<uint32_t>(object.Number());
FixedArray val = FixedArray::cast(object);
- if (val->map() == roots.fixed_cow_array_map()) {
- DCHECK_EQ(4, val->length());
- SharedFunctionInfo shared = SharedFunctionInfo::cast(val->get(0));
- String source = String::cast(val->get(1));
- int language_unchecked = Smi::ToInt(val->get(2));
+ if (val.map() == roots.fixed_cow_array_map()) {
+ DCHECK_EQ(4, val.length());
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(val.get(0));
+ String source = String::cast(val.get(1));
+ int language_unchecked = Smi::ToInt(val.get(2));
DCHECK(is_valid_language_mode(language_unchecked));
LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- int position = Smi::ToInt(val->get(3));
+ int position = Smi::ToInt(val.get(3));
return StringSharedHash(source, shared, language_mode, position);
}
- DCHECK_LT(2, val->length());
- return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
- Smi::cast(val->get(JSRegExp::kFlagsIndex)));
+ DCHECK_LT(2, val.length());
+ return RegExpHash(String::cast(val.get(JSRegExp::kSourceIndex)),
+ Smi::cast(val.get(JSRegExp::kFlagsIndex)));
}
InfoCellPair::InfoCellPair(SharedFunctionInfo shared,
FeedbackCell feedback_cell)
- : is_compiled_scope_(!shared.is_null() ? shared->is_compiled_scope()
+ : is_compiled_scope_(!shared.is_null() ? shared.is_compiled_scope()
: IsCompiledScope()),
shared_(shared),
feedback_cell_(feedback_cell) {}
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index d3feb1b233..2072339c5e 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -9,7 +9,7 @@
#include "src/objects/hash-table.h"
#include "src/objects/js-regexp.h"
#include "src/objects/shared-function-info.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/compressed-slots-inl.h b/deps/v8/src/objects/compressed-slots-inl.h
index 9c55de9ae6..b08bc938e5 100644
--- a/deps/v8/src/objects/compressed-slots-inl.h
+++ b/deps/v8/src/objects/compressed-slots-inl.h
@@ -7,10 +7,10 @@
#ifdef V8_COMPRESS_POINTERS
+#include "src/common/ptr-compr-inl.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/maybe-object-inl.h"
-#include "src/ptr-compr-inl.h"
namespace v8 {
namespace internal {
@@ -28,7 +28,7 @@ Object CompressedObjectSlot::operator*() const {
}
void CompressedObjectSlot::store(Object value) const {
- *location() = CompressTagged(value->ptr());
+ *location() = CompressTagged(value.ptr());
}
Object CompressedObjectSlot::Acquire_Load() const {
@@ -42,19 +42,19 @@ Object CompressedObjectSlot::Relaxed_Load() const {
}
void CompressedObjectSlot::Relaxed_Store(Object value) const {
- Tagged_t ptr = CompressTagged(value->ptr());
+ Tagged_t ptr = CompressTagged(value.ptr());
AsAtomicTagged::Relaxed_Store(location(), ptr);
}
void CompressedObjectSlot::Release_Store(Object value) const {
- Tagged_t ptr = CompressTagged(value->ptr());
+ Tagged_t ptr = CompressTagged(value.ptr());
AsAtomicTagged::Release_Store(location(), ptr);
}
Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
Object target) const {
- Tagged_t old_ptr = CompressTagged(old->ptr());
- Tagged_t target_ptr = CompressTagged(target->ptr());
+ Tagged_t old_ptr = CompressTagged(old.ptr());
+ Tagged_t target_ptr = CompressTagged(target.ptr());
Tagged_t result =
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
return Object(DecompressTaggedAny(address(), result));
@@ -95,14 +95,14 @@ Object CompressedMapWordSlot::Acquire_Load() const {
}
void CompressedMapWordSlot::Release_Store(Object value) const {
- Tagged_t ptr = CompressTagged(value->ptr());
+ Tagged_t ptr = CompressTagged(value.ptr());
AsAtomicTagged::Release_Store(location(), ptr);
}
Object CompressedMapWordSlot::Release_CompareAndSwap(Object old,
Object target) const {
- Tagged_t old_ptr = CompressTagged(old->ptr());
- Tagged_t target_ptr = CompressTagged(target->ptr());
+ Tagged_t old_ptr = CompressTagged(old.ptr());
+ Tagged_t target_ptr = CompressTagged(target.ptr());
Tagged_t result =
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
return Object(DecompressTaggedPointer(address(), result));
@@ -118,7 +118,7 @@ MaybeObject CompressedMaybeObjectSlot::operator*() const {
}
void CompressedMaybeObjectSlot::store(MaybeObject value) const {
- *location() = CompressTagged(value->ptr());
+ *location() = CompressTagged(value.ptr());
}
MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
@@ -127,14 +127,14 @@ MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
}
void CompressedMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
- Tagged_t ptr = CompressTagged(value->ptr());
+ Tagged_t ptr = CompressTagged(value.ptr());
AsAtomicTagged::Relaxed_Store(location(), ptr);
}
void CompressedMaybeObjectSlot::Release_CompareAndSwap(
MaybeObject old, MaybeObject target) const {
- Tagged_t old_ptr = CompressTagged(old->ptr());
- Tagged_t target_ptr = CompressTagged(target->ptr());
+ Tagged_t old_ptr = CompressTagged(old.ptr());
+ Tagged_t target_ptr = CompressTagged(target.ptr());
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
}
@@ -158,7 +158,7 @@ HeapObject CompressedHeapObjectSlot::ToHeapObject() const {
}
void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
- *location() = CompressTagged(value->ptr());
+ *location() = CompressTagged(value.ptr());
}
} // namespace internal
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
new file mode 100644
index 0000000000..bb861a1d1e
--- /dev/null
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -0,0 +1,256 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_CONTEXTS_INL_H_
+#define V8_OBJECTS_CONTEXTS_INL_H_
+
+#include "src/objects/contexts.h"
+
+#include "src/heap/heap-write-barrier.h"
+#include "src/objects/dictionary-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/js-objects-inl.h"
+#include "src/objects/map-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/regexp-match-info.h"
+#include "src/objects/scope-info.h"
+#include "src/objects/shared-function-info.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(ScriptContextTable, FixedArray)
+CAST_ACCESSOR(ScriptContextTable)
+
+int ScriptContextTable::used() const { return Smi::ToInt(get(kUsedSlotIndex)); }
+
+void ScriptContextTable::set_used(int used) {
+ set(kUsedSlotIndex, Smi::FromInt(used));
+}
+
+// static
+Handle<Context> ScriptContextTable::GetContext(Isolate* isolate,
+ Handle<ScriptContextTable> table,
+ int i) {
+ return handle(table->get_context(i), isolate);
+}
+
+Context ScriptContextTable::get_context(int i) const {
+ DCHECK_LT(i, used());
+ return Context::cast(this->get(i + kFirstContextSlotIndex));
+}
+
+OBJECT_CONSTRUCTORS_IMPL(Context, HeapObject)
+NEVER_READ_ONLY_SPACE_IMPL(Context)
+CAST_ACCESSOR(Context)
+SMI_ACCESSORS(Context, length, kLengthOffset)
+
+CAST_ACCESSOR(NativeContext)
+
+Object Context::get(int index) const {
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length()));
+ return RELAXED_READ_FIELD(*this, OffsetOfElementAt(index));
+}
+
+void Context::set(int index, Object value) {
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length()));
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ WRITE_BARRIER(*this, offset, value);
+}
+
+void Context::set(int index, Object value, WriteBarrierMode mode) {
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length()));
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+void Context::set_scope_info(ScopeInfo scope_info) {
+ set(SCOPE_INFO_INDEX, scope_info);
+}
+
+Object Context::unchecked_previous() { return get(PREVIOUS_INDEX); }
+
+Context Context::previous() {
+ Object result = get(PREVIOUS_INDEX);
+ DCHECK(IsBootstrappingOrValidParentContext(result, *this));
+ return Context::unchecked_cast(result);
+}
+void Context::set_previous(Context context) { set(PREVIOUS_INDEX, context); }
+
+Object Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); }
+
+bool Context::has_extension() { return !extension().IsTheHole(); }
+HeapObject Context::extension() {
+ return HeapObject::cast(get(EXTENSION_INDEX));
+}
+void Context::set_extension(HeapObject object) { set(EXTENSION_INDEX, object); }
+
+NativeContext Context::native_context() const {
+ Object result = get(NATIVE_CONTEXT_INDEX);
+ DCHECK(IsBootstrappingOrNativeContext(this->GetIsolate(), result));
+ return NativeContext::unchecked_cast(result);
+}
+
+void Context::set_native_context(NativeContext context) {
+ set(NATIVE_CONTEXT_INDEX, context);
+}
+
+bool Context::IsFunctionContext() const {
+ return map().instance_type() == FUNCTION_CONTEXT_TYPE;
+}
+
+bool Context::IsCatchContext() const {
+ return map().instance_type() == CATCH_CONTEXT_TYPE;
+}
+
+bool Context::IsWithContext() const {
+ return map().instance_type() == WITH_CONTEXT_TYPE;
+}
+
+bool Context::IsDebugEvaluateContext() const {
+ return map().instance_type() == DEBUG_EVALUATE_CONTEXT_TYPE;
+}
+
+bool Context::IsAwaitContext() const {
+ return map().instance_type() == AWAIT_CONTEXT_TYPE;
+}
+
+bool Context::IsBlockContext() const {
+ return map().instance_type() == BLOCK_CONTEXT_TYPE;
+}
+
+bool Context::IsModuleContext() const {
+ return map().instance_type() == MODULE_CONTEXT_TYPE;
+}
+
+bool Context::IsEvalContext() const {
+ return map().instance_type() == EVAL_CONTEXT_TYPE;
+}
+
+bool Context::IsScriptContext() const {
+ return map().instance_type() == SCRIPT_CONTEXT_TYPE;
+}
+
+bool Context::HasSameSecurityTokenAs(Context that) const {
+ return this->native_context().security_token() ==
+ that.native_context().security_token();
+}
+
+#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
+ void Context::set_##name(type value) { \
+ DCHECK(IsNativeContext()); \
+ set(index, value); \
+ } \
+ bool Context::is_##name(type value) const { \
+ DCHECK(IsNativeContext()); \
+ return type::cast(get(index)) == value; \
+ } \
+ type Context::name() const { \
+ DCHECK(IsNativeContext()); \
+ return type::cast(get(index)); \
+ }
+NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
+#undef NATIVE_CONTEXT_FIELD_ACCESSORS
+
+#define CHECK_FOLLOWS2(v1, v2) STATIC_ASSERT((v1 + 1) == (v2))
+#define CHECK_FOLLOWS4(v1, v2, v3, v4) \
+ CHECK_FOLLOWS2(v1, v2); \
+ CHECK_FOLLOWS2(v2, v3); \
+ CHECK_FOLLOWS2(v3, v4)
+
+int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind,
+ bool has_shared_name, bool needs_home_object) {
+ if (IsClassConstructor(kind)) {
+ // Like the strict function map, but with no 'name' accessor. 'name'
+ // needs to be the last property and it is added during instantiation,
+ // in case a static property with the same name exists"
+ return CLASS_FUNCTION_MAP_INDEX;
+ }
+
+ int base = 0;
+ if (IsGeneratorFunction(kind)) {
+ CHECK_FOLLOWS4(GENERATOR_FUNCTION_MAP_INDEX,
+ GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX,
+ GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX,
+ GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+ CHECK_FOLLOWS4(
+ ASYNC_GENERATOR_FUNCTION_MAP_INDEX,
+ ASYNC_GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX,
+ ASYNC_GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX,
+ ASYNC_GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+
+ base = IsAsyncFunction(kind) ? ASYNC_GENERATOR_FUNCTION_MAP_INDEX
+ : GENERATOR_FUNCTION_MAP_INDEX;
+
+ } else if (IsAsyncFunction(kind)) {
+ CHECK_FOLLOWS4(ASYNC_FUNCTION_MAP_INDEX, ASYNC_FUNCTION_WITH_NAME_MAP_INDEX,
+ ASYNC_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX,
+ ASYNC_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+
+ base = ASYNC_FUNCTION_MAP_INDEX;
+
+ } else if (IsStrictFunctionWithoutPrototype(kind)) {
+ DCHECK_IMPLIES(IsArrowFunction(kind), !needs_home_object);
+ CHECK_FOLLOWS4(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ METHOD_WITH_NAME_MAP_INDEX,
+ METHOD_WITH_HOME_OBJECT_MAP_INDEX,
+ METHOD_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+
+ base = STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
+
+ } else {
+ DCHECK(!needs_home_object);
+ CHECK_FOLLOWS2(SLOPPY_FUNCTION_MAP_INDEX,
+ SLOPPY_FUNCTION_WITH_NAME_MAP_INDEX);
+ CHECK_FOLLOWS2(STRICT_FUNCTION_MAP_INDEX,
+ STRICT_FUNCTION_WITH_NAME_MAP_INDEX);
+
+ base = is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
+ : SLOPPY_FUNCTION_MAP_INDEX;
+ }
+ int offset = static_cast<int>(!has_shared_name) |
+ (static_cast<int>(needs_home_object) << 1);
+ DCHECK_EQ(0, offset & ~3);
+
+ return base + offset;
+}
+
+#undef CHECK_FOLLOWS2
+#undef CHECK_FOLLOWS4
+
+Map Context::GetInitialJSArrayMap(ElementsKind kind) const {
+ DCHECK(IsNativeContext());
+ if (!IsFastElementsKind(kind)) return Map();
+ DisallowHeapAllocation no_gc;
+ Object const initial_js_array_map = get(Context::ArrayMapIndex(kind));
+ DCHECK(!initial_js_array_map.IsUndefined());
+ return Map::cast(initial_js_array_map);
+}
+
+MicrotaskQueue* NativeContext::microtask_queue() const {
+ return reinterpret_cast<MicrotaskQueue*>(
+ ReadField<Address>(kMicrotaskQueueOffset));
+}
+
+void NativeContext::set_microtask_queue(MicrotaskQueue* microtask_queue) {
+ WriteField<Address>(kMicrotaskQueueOffset,
+ reinterpret_cast<Address>(microtask_queue));
+}
+
+OBJECT_CONSTRUCTORS_IMPL(NativeContext, Context)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_CONTEXTS_INL_H_
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
new file mode 100644
index 0000000000..cddbcb98c0
--- /dev/null
+++ b/deps/v8/src/objects/contexts.cc
@@ -0,0 +1,512 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/contexts.h"
+
+#include "src/ast/modules.h"
+#include "src/debug/debug.h"
+#include "src/execution/isolate-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/objects/module-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<ScriptContextTable> ScriptContextTable::Extend(
+ Handle<ScriptContextTable> table, Handle<Context> script_context) {
+ Handle<ScriptContextTable> result;
+ int used = table->used();
+ int length = table->length();
+ CHECK(used >= 0 && length > 0 && used < length);
+ if (used + kFirstContextSlotIndex == length) {
+ CHECK(length < Smi::kMaxValue / 2);
+ Isolate* isolate = script_context->GetIsolate();
+ Handle<FixedArray> copy =
+ isolate->factory()->CopyFixedArrayAndGrow(table, length);
+ copy->set_map(ReadOnlyRoots(isolate).script_context_table_map());
+ result = Handle<ScriptContextTable>::cast(copy);
+ } else {
+ result = table;
+ }
+ result->set_used(used + 1);
+
+ DCHECK(script_context->IsScriptContext());
+ result->set(used + kFirstContextSlotIndex, *script_context);
+ return result;
+}
+
+bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table,
+ String name, LookupResult* result) {
+ DisallowHeapAllocation no_gc;
+ for (int i = 0; i < table.used(); i++) {
+ Context context = table.get_context(i);
+ DCHECK(context.IsScriptContext());
+ int slot_index = ScopeInfo::ContextSlotIndex(
+ context.scope_info(), name, &result->mode, &result->init_flag,
+ &result->maybe_assigned_flag);
+
+ if (slot_index >= 0) {
+ result->context_index = i;
+ result->slot_index = slot_index;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Context::is_declaration_context() {
+ if (IsFunctionContext() || IsNativeContext() || IsScriptContext() ||
+ IsModuleContext()) {
+ return true;
+ }
+ if (IsEvalContext()) {
+ return scope_info().language_mode() == LanguageMode::kStrict;
+ }
+ if (!IsBlockContext()) return false;
+ return scope_info().is_declaration_scope();
+}
+
+Context Context::declaration_context() {
+ Context current = *this;
+ while (!current.is_declaration_context()) {
+ current = current.previous();
+ }
+ return current;
+}
+
+Context Context::closure_context() {
+ Context current = *this;
+ while (!current.IsFunctionContext() && !current.IsScriptContext() &&
+ !current.IsModuleContext() && !current.IsNativeContext() &&
+ !current.IsEvalContext()) {
+ current = current.previous();
+ }
+ return current;
+}
+
+JSObject Context::extension_object() {
+ DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext() ||
+ IsEvalContext() || IsCatchContext());
+ HeapObject object = extension();
+ if (object.IsTheHole()) return JSObject();
+ DCHECK(object.IsJSContextExtensionObject() ||
+ (IsNativeContext() && object.IsJSGlobalObject()));
+ return JSObject::cast(object);
+}
+
+JSReceiver Context::extension_receiver() {
+ DCHECK(IsNativeContext() || IsWithContext() || IsEvalContext() ||
+ IsFunctionContext() || IsBlockContext());
+ return IsWithContext() ? JSReceiver::cast(extension()) : extension_object();
+}
+
+ScopeInfo Context::scope_info() {
+ return ScopeInfo::cast(get(SCOPE_INFO_INDEX));
+}
+
+Module Context::module() {
+ Context current = *this;
+ while (!current.IsModuleContext()) {
+ current = current.previous();
+ }
+ return Module::cast(current.extension());
+}
+
+JSGlobalObject Context::global_object() {
+ return JSGlobalObject::cast(native_context().extension());
+}
+
+Context Context::script_context() {
+ Context current = *this;
+ while (!current.IsScriptContext()) {
+ current = current.previous();
+ }
+ return current;
+}
+
+JSGlobalProxy Context::global_proxy() {
+ return native_context().global_proxy_object();
+}
+
+void Context::set_global_proxy(JSGlobalProxy object) {
+ native_context().set_global_proxy_object(object);
+}
+
+/**
+ * Lookups a property in an object environment, taking the unscopables into
+ * account. This is used For HasBinding spec algorithms for ObjectEnvironment.
+ */
+static Maybe<bool> UnscopableLookup(LookupIterator* it) {
+ Isolate* isolate = it->isolate();
+
+ Maybe<bool> found = JSReceiver::HasProperty(it);
+ if (found.IsNothing() || !found.FromJust()) return found;
+
+ Handle<Object> unscopables;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, unscopables,
+ JSReceiver::GetProperty(isolate,
+ Handle<JSReceiver>::cast(it->GetReceiver()),
+ isolate->factory()->unscopables_symbol()),
+ Nothing<bool>());
+ if (!unscopables->IsJSReceiver()) return Just(true);
+ Handle<Object> blacklist;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, blacklist,
+ JSReceiver::GetProperty(isolate, Handle<JSReceiver>::cast(unscopables),
+ it->name()),
+ Nothing<bool>());
+ return Just(!blacklist->BooleanValue(isolate));
+}
+
+static PropertyAttributes GetAttributesForMode(VariableMode mode) {
+ DCHECK(IsDeclaredVariableMode(mode));
+ return mode == VariableMode::kConst ? READ_ONLY : NONE;
+}
+
+// static
+Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
+ ContextLookupFlags flags, int* index,
+ PropertyAttributes* attributes,
+ InitializationFlag* init_flag,
+ VariableMode* variable_mode,
+ bool* is_sloppy_function_name) {
+ Isolate* isolate = context->GetIsolate();
+
+ bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
+ bool failed_whitelist = false;
+ *index = kNotFound;
+ *attributes = ABSENT;
+ *init_flag = kCreatedInitialized;
+ *variable_mode = VariableMode::kVar;
+ if (is_sloppy_function_name != nullptr) {
+ *is_sloppy_function_name = false;
+ }
+
+ if (FLAG_trace_contexts) {
+ PrintF("Context::Lookup(");
+ name->ShortPrint();
+ PrintF(")\n");
+ }
+
+ do {
+ if (FLAG_trace_contexts) {
+ PrintF(" - looking in context %p",
+ reinterpret_cast<void*>(context->ptr()));
+ if (context->IsScriptContext()) PrintF(" (script context)");
+ if (context->IsNativeContext()) PrintF(" (native context)");
+ PrintF("\n");
+ }
+
+ // 1. Check global objects, subjects of with, and extension objects.
+ DCHECK_IMPLIES(context->IsEvalContext(),
+ context->extension().IsTheHole(isolate));
+ if ((context->IsNativeContext() || context->IsWithContext() ||
+ context->IsFunctionContext() || context->IsBlockContext()) &&
+ !context->extension_receiver().is_null()) {
+ Handle<JSReceiver> object(context->extension_receiver(), isolate);
+
+ if (context->IsNativeContext()) {
+ DisallowHeapAllocation no_gc;
+ if (FLAG_trace_contexts) {
+ PrintF(" - trying other script contexts\n");
+ }
+ // Try other script contexts.
+ ScriptContextTable script_contexts =
+ context->global_object().native_context().script_context_table();
+ ScriptContextTable::LookupResult r;
+ if (ScriptContextTable::Lookup(isolate, script_contexts, *name, &r)) {
+ Context context = script_contexts.get_context(r.context_index);
+ if (FLAG_trace_contexts) {
+ PrintF("=> found property in script context %d: %p\n",
+ r.context_index, reinterpret_cast<void*>(context.ptr()));
+ }
+ *index = r.slot_index;
+ *variable_mode = r.mode;
+ *init_flag = r.init_flag;
+ *attributes = GetAttributesForMode(r.mode);
+ return handle(context, isolate);
+ }
+ }
+
+ // Context extension objects needs to behave as if they have no
+ // prototype. So even if we want to follow prototype chains, we need
+ // to only do a local lookup for context extension objects.
+ Maybe<PropertyAttributes> maybe = Nothing<PropertyAttributes>();
+ if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+ object->IsJSContextExtensionObject()) {
+ maybe = JSReceiver::GetOwnPropertyAttributes(object, name);
+ } else if (context->IsWithContext()) {
+ // A with context will never bind "this", but debug-eval may look into
+ // a with context when resolving "this". Other synthetic variables such
+ // as new.target may be resolved as VariableMode::kDynamicLocal due to
+ // bug v8:5405 , skipping them here serves as a workaround until a more
+ // thorough fix can be applied.
+ // TODO(v8:5405): Replace this check with a DCHECK when resolution of
+ // of synthetic variables does not go through this code path.
+ if (ScopeInfo::VariableIsSynthetic(*name)) {
+ maybe = Just(ABSENT);
+ } else {
+ LookupIterator it(object, name, object);
+ Maybe<bool> found = UnscopableLookup(&it);
+ if (found.IsNothing()) {
+ maybe = Nothing<PropertyAttributes>();
+ } else {
+ // Luckily, consumers of |maybe| only care whether the property
+ // was absent or not, so we can return a dummy |NONE| value
+ // for its attributes when it was present.
+ maybe = Just(found.FromJust() ? NONE : ABSENT);
+ }
+ }
+ } else {
+ maybe = JSReceiver::GetPropertyAttributes(object, name);
+ }
+
+ if (maybe.IsNothing()) return Handle<Object>();
+ DCHECK(!isolate->has_pending_exception());
+ *attributes = maybe.FromJust();
+
+ if (maybe.FromJust() != ABSENT) {
+ if (FLAG_trace_contexts) {
+ PrintF("=> found property in context object %p\n",
+ reinterpret_cast<void*>(object->ptr()));
+ }
+ return object;
+ }
+ }
+
+ // 2. Check the context proper if it has slots.
+ if (context->IsFunctionContext() || context->IsBlockContext() ||
+ context->IsScriptContext() || context->IsEvalContext() ||
+ context->IsModuleContext() || context->IsCatchContext()) {
+ DisallowHeapAllocation no_gc;
+ // Use serialized scope information of functions and blocks to search
+ // for the context index.
+ ScopeInfo scope_info = context->scope_info();
+ VariableMode mode;
+ InitializationFlag flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ int slot_index = ScopeInfo::ContextSlotIndex(scope_info, *name, &mode,
+ &flag, &maybe_assigned_flag);
+ DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
+ if (slot_index >= 0) {
+ if (FLAG_trace_contexts) {
+ PrintF("=> found local in context slot %d (mode = %hhu)\n",
+ slot_index, static_cast<uint8_t>(mode));
+ }
+ *index = slot_index;
+ *variable_mode = mode;
+ *init_flag = flag;
+ *attributes = GetAttributesForMode(mode);
+ return context;
+ }
+
+ // Check the slot corresponding to the intermediate context holding
+ // only the function name variable. It's conceptually (and spec-wise)
+ // in an outer scope of the function's declaration scope.
+ if (follow_context_chain && context->IsFunctionContext()) {
+ int function_index = scope_info.FunctionContextSlotIndex(*name);
+ if (function_index >= 0) {
+ if (FLAG_trace_contexts) {
+ PrintF("=> found intermediate function in context slot %d\n",
+ function_index);
+ }
+ *index = function_index;
+ *attributes = READ_ONLY;
+ *init_flag = kCreatedInitialized;
+ *variable_mode = VariableMode::kConst;
+ if (is_sloppy_function_name != nullptr &&
+ is_sloppy(scope_info.language_mode())) {
+ *is_sloppy_function_name = true;
+ }
+ return context;
+ }
+ }
+
+ // Lookup variable in module imports and exports.
+ if (context->IsModuleContext()) {
+ VariableMode mode;
+ InitializationFlag flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ int cell_index =
+ scope_info.ModuleIndex(*name, &mode, &flag, &maybe_assigned_flag);
+ if (cell_index != 0) {
+ if (FLAG_trace_contexts) {
+ PrintF("=> found in module imports or exports\n");
+ }
+ *index = cell_index;
+ *variable_mode = mode;
+ *init_flag = flag;
+ *attributes = ModuleDescriptor::GetCellIndexKind(cell_index) ==
+ ModuleDescriptor::kExport
+ ? GetAttributesForMode(mode)
+ : READ_ONLY;
+ return handle(context->module(), isolate);
+ }
+ }
+ } else if (context->IsDebugEvaluateContext()) {
+ // Check materialized locals.
+ Object ext = context->get(EXTENSION_INDEX);
+ if (ext.IsJSReceiver()) {
+ Handle<JSReceiver> extension(JSReceiver::cast(ext), isolate);
+ LookupIterator it(extension, name, extension);
+ Maybe<bool> found = JSReceiver::HasProperty(&it);
+ if (found.FromMaybe(false)) {
+ *attributes = NONE;
+ return extension;
+ }
+ }
+ // Check the original context, but do not follow its context chain.
+ Object obj = context->get(WRAPPED_CONTEXT_INDEX);
+ if (obj.IsContext()) {
+ Handle<Context> context(Context::cast(obj), isolate);
+ Handle<Object> result =
+ Context::Lookup(context, name, DONT_FOLLOW_CHAINS, index,
+ attributes, init_flag, variable_mode);
+ if (!result.is_null()) return result;
+ }
+ // Check whitelist. Names that do not pass whitelist shall only resolve
+ // to with, script or native contexts up the context chain.
+ obj = context->get(WHITE_LIST_INDEX);
+ if (obj.IsStringSet()) {
+ failed_whitelist =
+ failed_whitelist || !StringSet::cast(obj).Has(isolate, name);
+ }
+ }
+
+ // 3. Prepare to continue with the previous (next outermost) context.
+ if (context->IsNativeContext()) break;
+
+ do {
+ context = Handle<Context>(context->previous(), isolate);
+ // If we come across a whitelist context, and the name is not
+ // whitelisted, then only consider with, script, module or native
+ // contexts.
+ } while (failed_whitelist && !context->IsScriptContext() &&
+ !context->IsNativeContext() && !context->IsWithContext() &&
+ !context->IsModuleContext());
+ } while (follow_context_chain);
+
+ if (FLAG_trace_contexts) {
+ PrintF("=> no property/slot found\n");
+ }
+ return Handle<Object>::null();
+}
+
+void Context::AddOptimizedCode(Code code) {
+ DCHECK(IsNativeContext());
+ DCHECK(code.kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(code.next_code_link().IsUndefined());
+ code.set_next_code_link(get(OPTIMIZED_CODE_LIST));
+ set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER);
+}
+
+void Context::SetOptimizedCodeListHead(Object head) {
+ DCHECK(IsNativeContext());
+ set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
+}
+
+Object Context::OptimizedCodeListHead() {
+ DCHECK(IsNativeContext());
+ return get(OPTIMIZED_CODE_LIST);
+}
+
+void Context::SetDeoptimizedCodeListHead(Object head) {
+ DCHECK(IsNativeContext());
+ set(DEOPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
+}
+
+Object Context::DeoptimizedCodeListHead() {
+ DCHECK(IsNativeContext());
+ return get(DEOPTIMIZED_CODE_LIST);
+}
+
+Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
+ Isolate* isolate = GetIsolate();
+ Handle<Object> result(error_message_for_code_gen_from_strings(), isolate);
+ if (!result->IsUndefined(isolate)) return result;
+ return isolate->factory()->NewStringFromStaticChars(
+ "Code generation from strings disallowed for this context");
+}
+
+#define COMPARE_NAME(index, type, name) \
+ if (string->IsOneByteEqualTo(StaticCharVector(#name))) return index;
+
+int Context::IntrinsicIndexForName(Handle<String> string) {
+ NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(COMPARE_NAME);
+ return kNotFound;
+}
+
+#undef COMPARE_NAME
+
+#define COMPARE_NAME(index, type, name) \
+ if (strncmp(string, #name, length) == 0) return index;
+
+int Context::IntrinsicIndexForName(const unsigned char* unsigned_string,
+ int length) {
+ const char* string = reinterpret_cast<const char*>(unsigned_string);
+ NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(COMPARE_NAME);
+ return kNotFound;
+}
+
+#undef COMPARE_NAME
+
+#ifdef DEBUG
+
+bool Context::IsBootstrappingOrNativeContext(Isolate* isolate, Object object) {
+ // During bootstrapping we allow all objects to pass as global
+ // objects. This is necessary to fix circular dependencies.
+ return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
+ isolate->bootstrapper()->IsActive() || object.IsNativeContext();
+}
+
+bool Context::IsBootstrappingOrValidParentContext(Object object,
+ Context child) {
+ // During bootstrapping we allow all objects to pass as
+ // contexts. This is necessary to fix circular dependencies.
+ if (child.GetIsolate()->bootstrapper()->IsActive()) return true;
+ if (!object.IsContext()) return false;
+ Context context = Context::cast(object);
+ return context.IsNativeContext() || context.IsScriptContext() ||
+ context.IsModuleContext() || !child.IsModuleContext();
+}
+
+#endif
+
+void Context::ResetErrorsThrown() {
+ DCHECK(IsNativeContext());
+ set_errors_thrown(Smi::FromInt(0));
+}
+
+void Context::IncrementErrorsThrown() {
+ DCHECK(IsNativeContext());
+
+ int previous_value = errors_thrown().value();
+ set_errors_thrown(Smi::FromInt(previous_value + 1));
+}
+
+int Context::GetErrorsThrown() { return errors_thrown().value(); }
+
+STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
+STATIC_ASSERT(NativeContext::kScopeInfoOffset ==
+ Context::OffsetOfElementAt(NativeContext::SCOPE_INFO_INDEX));
+STATIC_ASSERT(NativeContext::kPreviousOffset ==
+ Context::OffsetOfElementAt(NativeContext::PREVIOUS_INDEX));
+STATIC_ASSERT(NativeContext::kExtensionOffset ==
+ Context::OffsetOfElementAt(NativeContext::EXTENSION_INDEX));
+STATIC_ASSERT(NativeContext::kNativeContextOffset ==
+ Context::OffsetOfElementAt(NativeContext::NATIVE_CONTEXT_INDEX));
+
+STATIC_ASSERT(NativeContext::kStartOfStrongFieldsOffset ==
+ Context::OffsetOfElementAt(-1));
+STATIC_ASSERT(NativeContext::kStartOfWeakFieldsOffset ==
+ Context::OffsetOfElementAt(NativeContext::FIRST_WEAK_SLOT));
+STATIC_ASSERT(NativeContext::kMicrotaskQueueOffset ==
+ Context::SizeFor(NativeContext::NATIVE_CONTEXT_SLOTS));
+STATIC_ASSERT(NativeContext::kSize ==
+ (Context::SizeFor(NativeContext::NATIVE_CONTEXT_SLOTS) +
+ kSystemPointerSize));
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
new file mode 100644
index 0000000000..d83e351550
--- /dev/null
+++ b/deps/v8/src/objects/contexts.h
@@ -0,0 +1,720 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_CONTEXTS_H_
+#define V8_OBJECTS_CONTEXTS_H_
+
+#include "src/objects/fixed-array.h"
+#include "src/objects/function-kind.h"
+#include "torque-generated/field-offsets-tq.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class JSGlobalObject;
+class JSGlobalProxy;
+class MicrotaskQueue;
+class NativeContext;
+class RegExpMatchInfo;
+
+enum ContextLookupFlags {
+ FOLLOW_CONTEXT_CHAIN = 1 << 0,
+ FOLLOW_PROTOTYPE_CHAIN = 1 << 1,
+
+ DONT_FOLLOW_CHAINS = 0,
+ FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN,
+};
+
+// Heap-allocated activation contexts.
+//
+// Contexts are implemented as FixedArray-like objects having a fixed
+// header with a set of common fields.
+//
+// Note: Context must have no virtual functions and Context objects
+// must always be allocated via Heap::AllocateContext() or
+// Factory::NewContext.
+
+#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
+ async_function_promise_create) \
+ V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
+ V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
+ V(MAKE_ERROR_INDEX, JSFunction, make_error) \
+ V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
+ V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
+ V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
+ V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
+ V(OBJECT_CREATE, JSFunction, object_create) \
+ V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
+ V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
+ V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
+ V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
+ V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
+ V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
+ V(OBJECT_KEYS, JSFunction, object_keys) \
+ V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
+ V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
+ V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
+ V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
+ V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
+ V(MATH_POW_INDEX, JSFunction, math_pow) \
+ V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \
+ V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
+ promise_internal_constructor) \
+ V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then)
+
+#define NATIVE_CONTEXT_FIELDS(V) \
+ V(GLOBAL_PROXY_INDEX, JSGlobalProxy, global_proxy_object) \
+ /* TODO(ishell): Actually we store exactly EmbedderDataArray here but */ \
+ /* it's already UBSan-fiendly and doesn't require a star... So declare */ \
+ /* it as a HeapObject for now. */ \
+ V(EMBEDDER_DATA_INDEX, HeapObject, embedder_data) \
+ /* Below is alpha-sorted */ \
+ V(ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, \
+ accessor_property_descriptor_map) \
+ V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+ V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
+ V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
+ V(ARRAY_BUFFER_NOINIT_FUN_INDEX, JSFunction, array_buffer_noinit_fun) \
+ V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
+ V(ARRAY_JOIN_STACK_INDEX, HeapObject, array_join_stack) \
+ V(ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX, Map, async_from_sync_iterator_map) \
+ V(ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \
+ async_function_await_reject_shared_fun) \
+ V(ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
+ async_function_await_resolve_shared_fun) \
+ V(ASYNC_FUNCTION_FUNCTION_INDEX, JSFunction, async_function_constructor) \
+ V(ASYNC_FUNCTION_OBJECT_MAP_INDEX, Map, async_function_object_map) \
+ V(ASYNC_GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
+ async_generator_function_function) \
+ V(ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN, SharedFunctionInfo, \
+ async_iterator_value_unwrap_shared_fun) \
+ V(ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \
+ async_generator_await_reject_shared_fun) \
+ V(ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
+ async_generator_await_resolve_shared_fun) \
+ V(ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
+ async_generator_yield_resolve_shared_fun) \
+ V(ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
+ async_generator_return_resolve_shared_fun) \
+ V(ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
+ async_generator_return_closed_resolve_shared_fun) \
+ V(ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN, SharedFunctionInfo, \
+ async_generator_return_closed_reject_shared_fun) \
+ V(ATOMICS_OBJECT, JSObject, atomics_object) \
+ V(BIGINT_FUNCTION_INDEX, JSFunction, bigint_function) \
+ V(BIGINT64_ARRAY_FUN_INDEX, JSFunction, bigint64_array_fun) \
+ V(BIGUINT64_ARRAY_FUN_INDEX, JSFunction, biguint64_array_fun) \
+ V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
+ V(BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX, Map, \
+ bound_function_with_constructor_map) \
+ V(BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX, Map, \
+ bound_function_without_constructor_map) \
+ V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
+ call_as_constructor_delegate) \
+ V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
+ V(CALLSITE_FUNCTION_INDEX, JSFunction, callsite_function) \
+ V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
+ V(DATA_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, data_property_descriptor_map) \
+ V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
+ V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
+ V(DEBUG_CONTEXT_ID_INDEX, Object, debug_context_id) \
+ V(EMPTY_FUNCTION_INDEX, JSFunction, empty_function) \
+ V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
+ error_message_for_code_gen_from_strings) \
+ V(ERRORS_THROWN_INDEX, Smi, errors_thrown) \
+ V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_binding_object) \
+ V(EXTRAS_UTILS_OBJECT_INDEX, Object, extras_utils_object) \
+ V(FAST_ALIASED_ARGUMENTS_MAP_INDEX, Map, fast_aliased_arguments_map) \
+ V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, FixedArray, \
+ fast_template_instantiations_cache) \
+ V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
+ V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
+ V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function) \
+ V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
+ generator_function_function) \
+ V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
+ V(ASYNC_GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
+ async_generator_object_prototype_map) \
+ V(INITIAL_ARRAY_ITERATOR_MAP_INDEX, Map, initial_array_iterator_map) \
+ V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX, JSObject, \
+ initial_array_iterator_prototype) \
+ V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
+ V(INITIAL_ERROR_PROTOTYPE_INDEX, JSObject, initial_error_prototype) \
+ V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype) \
+ V(INITIAL_ASYNC_GENERATOR_PROTOTYPE_INDEX, JSObject, \
+ initial_async_generator_prototype) \
+ V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype) \
+ V(INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX, JSObject, \
+ initial_map_iterator_prototype) \
+ V(INITIAL_MAP_PROTOTYPE_MAP_INDEX, Map, initial_map_prototype_map) \
+ V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
+ V(INITIAL_SET_ITERATOR_PROTOTYPE_INDEX, JSObject, \
+ initial_set_iterator_prototype) \
+ V(INITIAL_SET_PROTOTYPE_INDEX, JSObject, initial_set_prototype) \
+ V(INITIAL_SET_PROTOTYPE_MAP_INDEX, Map, initial_set_prototype_map) \
+ V(INITIAL_STRING_ITERATOR_MAP_INDEX, Map, initial_string_iterator_map) \
+ V(INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX, JSObject, \
+ initial_string_iterator_prototype) \
+ V(INITIAL_STRING_PROTOTYPE_INDEX, JSObject, initial_string_prototype) \
+ V(INITIAL_WEAKMAP_PROTOTYPE_MAP_INDEX, Map, initial_weakmap_prototype_map) \
+ V(INITIAL_WEAKSET_PROTOTYPE_MAP_INDEX, Map, initial_weakset_prototype_map) \
+ V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
+ V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
+ V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
+ V(INTL_COLLATOR_FUNCTION_INDEX, JSFunction, intl_collator_function) \
+ V(INTL_DATE_TIME_FORMAT_FUNCTION_INDEX, JSFunction, \
+ intl_date_time_format_function) \
+ V(INTL_NUMBER_FORMAT_FUNCTION_INDEX, JSFunction, \
+ intl_number_format_function) \
+ V(INTL_LOCALE_FUNCTION_INDEX, JSFunction, intl_locale_function) \
+ V(INTL_SEGMENT_ITERATOR_MAP_INDEX, Map, intl_segment_iterator_map) \
+ V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
+ V(JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX, Map, \
+ js_array_packed_smi_elements_map) \
+ V(JS_ARRAY_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map, \
+ js_array_holey_smi_elements_map) \
+ V(JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX, Map, js_array_packed_elements_map) \
+ V(JS_ARRAY_HOLEY_ELEMENTS_MAP_INDEX, Map, js_array_holey_elements_map) \
+ V(JS_ARRAY_PACKED_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
+ js_array_packed_double_elements_map) \
+ V(JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
+ js_array_holey_double_elements_map) \
+ V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
+ V(JS_MAP_MAP_INDEX, Map, js_map_map) \
+ V(JS_MODULE_NAMESPACE_MAP, Map, js_module_namespace_map) \
+ V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
+ V(JS_SET_MAP_INDEX, Map, js_set_map) \
+ V(WEAK_CELL_MAP_INDEX, Map, weak_cell_map) \
+ V(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_MAP_INDEX, Map, \
+ js_finalization_group_cleanup_iterator_map) \
+ V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
+ V(JS_WEAK_REF_MAP_INDEX, Map, js_weak_ref_map) \
+ V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
+ V(MAP_CACHE_INDEX, Object, map_cache) \
+ V(MAP_KEY_ITERATOR_MAP_INDEX, Map, map_key_iterator_map) \
+ V(MAP_KEY_VALUE_ITERATOR_MAP_INDEX, Map, map_key_value_iterator_map) \
+ V(MAP_VALUE_ITERATOR_MAP_INDEX, Map, map_value_iterator_map) \
+ V(MATH_RANDOM_INDEX_INDEX, Smi, math_random_index) \
+ V(MATH_RANDOM_STATE_INDEX, ByteArray, math_random_state) \
+ V(MATH_RANDOM_CACHE_INDEX, FixedDoubleArray, math_random_cache) \
+ V(MESSAGE_LISTENERS_INDEX, TemplateList, message_listeners) \
+ V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
+ V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
+ V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
+ V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
+ V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
+ V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \
+ V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \
+ V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
+ V(PROXY_MAP_INDEX, Map, proxy_map) \
+ V(PROXY_REVOCABLE_RESULT_MAP_INDEX, Map, proxy_revocable_result_map) \
+ V(PROXY_REVOKE_SHARED_FUN, SharedFunctionInfo, proxy_revoke_shared_fun) \
+ V(PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN, SharedFunctionInfo, \
+ promise_get_capabilities_executor_shared_fun) \
+ V(PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX, SharedFunctionInfo, \
+ promise_capability_default_reject_shared_fun) \
+ V(PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX, SharedFunctionInfo, \
+ promise_capability_default_resolve_shared_fun) \
+ V(PROMISE_THEN_FINALLY_SHARED_FUN, SharedFunctionInfo, \
+ promise_then_finally_shared_fun) \
+ V(PROMISE_CATCH_FINALLY_SHARED_FUN, SharedFunctionInfo, \
+ promise_catch_finally_shared_fun) \
+ V(PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN, SharedFunctionInfo, \
+ promise_value_thunk_finally_shared_fun) \
+ V(PROMISE_THROWER_FINALLY_SHARED_FUN, SharedFunctionInfo, \
+ promise_thrower_finally_shared_fun) \
+ V(PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN, SharedFunctionInfo, \
+ promise_all_resolve_element_shared_fun) \
+ V(PROMISE_ALL_SETTLED_RESOLVE_ELEMENT_SHARED_FUN, SharedFunctionInfo, \
+ promise_all_settled_resolve_element_shared_fun) \
+ V(PROMISE_ALL_SETTLED_REJECT_ELEMENT_SHARED_FUN, SharedFunctionInfo, \
+ promise_all_settled_reject_element_shared_fun) \
+ V(PROMISE_PROTOTYPE_INDEX, JSObject, promise_prototype) \
+ V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
+ V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
+ V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info) \
+ V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \
+ V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
+ initial_regexp_string_iterator_prototype_map) \
+ V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
+ V(REGEXP_PROTOTYPE_INDEX, JSObject, regexp_prototype) \
+ V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
+ V(SECURITY_TOKEN_INDEX, Object, security_token) \
+ V(SERIALIZED_OBJECTS, FixedArray, serialized_objects) \
+ V(SET_VALUE_ITERATOR_MAP_INDEX, Map, set_value_iterator_map) \
+ V(SET_KEY_VALUE_ITERATOR_MAP_INDEX, Map, set_key_value_iterator_map) \
+ V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
+ V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
+ V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
+ V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
+ V(SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, \
+ slow_object_with_null_prototype_map) \
+ V(SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP, Map, \
+ slow_object_with_object_prototype_map) \
+ V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, SimpleNumberDictionary, \
+ slow_template_instantiations_cache) \
+ /* All *_FUNCTION_MAP_INDEX definitions used by Context::FunctionMapIndex */ \
+ /* must remain together. */ \
+ V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
+ V(SLOPPY_FUNCTION_WITH_NAME_MAP_INDEX, Map, sloppy_function_with_name_map) \
+ V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ sloppy_function_without_prototype_map) \
+ V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
+ sloppy_function_with_readonly_prototype_map) \
+ V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \
+ V(STRICT_FUNCTION_WITH_NAME_MAP_INDEX, Map, strict_function_with_name_map) \
+ V(STRICT_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
+ strict_function_with_readonly_prototype_map) \
+ V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ strict_function_without_prototype_map) \
+ V(METHOD_WITH_NAME_MAP_INDEX, Map, method_with_name_map) \
+ V(METHOD_WITH_HOME_OBJECT_MAP_INDEX, Map, method_with_home_object_map) \
+ V(METHOD_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
+ method_with_name_and_home_object_map) \
+ V(ASYNC_FUNCTION_MAP_INDEX, Map, async_function_map) \
+ V(ASYNC_FUNCTION_WITH_NAME_MAP_INDEX, Map, async_function_with_name_map) \
+ V(ASYNC_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \
+ async_function_with_home_object_map) \
+ V(ASYNC_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
+ async_function_with_name_and_home_object_map) \
+ V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
+ V(GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX, Map, \
+ generator_function_with_name_map) \
+ V(GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \
+ generator_function_with_home_object_map) \
+ V(GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
+ generator_function_with_name_and_home_object_map) \
+ V(ASYNC_GENERATOR_FUNCTION_MAP_INDEX, Map, async_generator_function_map) \
+ V(ASYNC_GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX, Map, \
+ async_generator_function_with_name_map) \
+ V(ASYNC_GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \
+ async_generator_function_with_home_object_map) \
+ V(ASYNC_GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
+ async_generator_function_with_name_and_home_object_map) \
+ V(CLASS_FUNCTION_MAP_INDEX, Map, class_function_map) \
+ V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
+ V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
+ V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
+ V(WASM_EXPORTED_FUNCTION_MAP_INDEX, Map, wasm_exported_function_map) \
+ V(WASM_EXCEPTION_CONSTRUCTOR_INDEX, JSFunction, wasm_exception_constructor) \
+ V(WASM_FUNCTION_CONSTRUCTOR_INDEX, JSFunction, wasm_function_constructor) \
+ V(WASM_GLOBAL_CONSTRUCTOR_INDEX, JSFunction, wasm_global_constructor) \
+ V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
+ V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
+ V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
+ V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \
+ V(TEMPLATE_WEAKMAP_INDEX, HeapObject, template_weakmap) \
+ V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \
+ V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \
+ V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
+ V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
+ V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
+ V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
+ V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
+ V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
+ V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
+ V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
+ V(ERROR_TO_STRING, JSFunction, error_to_string) \
+ V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
+ V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
+ V(MAP_DELETE_INDEX, JSFunction, map_delete) \
+ V(MAP_GET_INDEX, JSFunction, map_get) \
+ V(MAP_HAS_INDEX, JSFunction, map_has) \
+ V(MAP_SET_INDEX, JSFunction, map_set) \
+ V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
+ V(OBJECT_TO_STRING, JSFunction, object_to_string) \
+ V(PROMISE_ALL_INDEX, JSFunction, promise_all) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
+ V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
+ V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
+ V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(SET_ADD_INDEX, JSFunction, set_add) \
+ V(SET_DELETE_INDEX, JSFunction, set_delete) \
+ V(SET_HAS_INDEX, JSFunction, set_has) \
+ V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
+ V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
+ V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
+ V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_compile_error_function) \
+ V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
+ V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_runtime_error_function) \
+ V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
+ V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \
+ V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
+ NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)
+
+// A table of all script contexts. Every loaded top-level script with top-level
+// lexical declarations contributes its ScriptContext into this table.
+//
+// The table is a fixed array, its first slot is the current used count and
+// the subsequent slots 1..used contain ScriptContexts.
+class ScriptContextTable : public FixedArray {
+ public:
+ DECL_CAST(ScriptContextTable)
+
+ struct LookupResult {
+ int context_index;
+ int slot_index;
+ VariableMode mode;
+ InitializationFlag init_flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ };
+
+ inline int used() const;
+ inline void set_used(int used);
+
+ static inline Handle<Context> GetContext(Isolate* isolate,
+ Handle<ScriptContextTable> table,
+ int i);
+ inline Context get_context(int i) const;
+
+ // Lookup a variable `name` in a ScriptContextTable.
+ // If it returns true, the variable is found and `result` contains
+ // valid information about its location.
+ // If it returns false, `result` is untouched.
+ V8_WARN_UNUSED_RESULT
+ V8_EXPORT_PRIVATE static bool Lookup(Isolate* isolate,
+ ScriptContextTable table, String name,
+ LookupResult* result);
+
+ V8_WARN_UNUSED_RESULT
+ static Handle<ScriptContextTable> Extend(Handle<ScriptContextTable> table,
+ Handle<Context> script_context);
+
+ static const int kUsedSlotIndex = 0;
+ static const int kFirstContextSlotIndex = 1;
+ static const int kMinLength = kFirstContextSlotIndex;
+
+ OBJECT_CONSTRUCTORS(ScriptContextTable, FixedArray);
+};
+
+// JSFunctions are pairs (context, function code), sometimes also called
+// closures. A Context object is used to represent function contexts and
+// dynamically pushed 'with' contexts (or 'scopes' in ECMA-262 speak).
+//
+// At runtime, the contexts build a stack in parallel to the execution
+// stack, with the top-most context being the current context. All contexts
+// have the following slots:
+//
+// [ scope_info ] This is the scope info describing the current context. It
+// contains the names of statically allocated context slots,
+// and stack-allocated locals. The names are needed for
+// dynamic lookups in the presence of 'with' or 'eval', and
+// for the debugger.
+//
+// [ previous ] A pointer to the previous context.
+//
+// [ extension ] Additional data.
+//
+// For native contexts, it contains the global object.
+// For module contexts, it contains the module object.
+// For await contexts, it contains the generator object.
+// For block contexts, it may contain an "extension object".
+// For with contexts, it contains an "extension object".
+//
+// An "extension object" is used to dynamically extend a
+// context with additional variables, namely in the
+// implementation of the 'with' construct and the 'eval'
+// construct. For instance, Context::Lookup also searches
+// the extension object for properties. (Storing the
+// extension object is the original purpose of this context
+// slot, hence the name.)
+//
+// [ native_context ] A pointer to the native context.
+//
+// In addition, function contexts may have statically allocated context slots
+// to store local variables/functions that are accessed from inner functions
+// (via static context addresses) or through 'eval' (dynamic context lookups).
+// The native context contains additional slots for fast access to native
+// properties.
+//
+// Finally, with Harmony scoping, the JSFunction representing a top level
+// script will have the ScriptContext rather than a FunctionContext.
+// Script contexts from all top-level scripts are gathered in
+// ScriptContextTable.
+
+class Context : public HeapObject {
+ public:
+ NEVER_READ_ONLY_SPACE
+
+ DECL_CAST(Context)
+
+ // [length]: length of the context.
+ V8_INLINE int length() const;
+ V8_INLINE void set_length(int value);
+
+ // Setter and getter for elements.
+ V8_INLINE Object get(int index) const;
+ V8_INLINE void set(int index, Object value);
+ // Setter with explicit barrier mode.
+ V8_INLINE void set(int index, Object value, WriteBarrierMode mode);
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_CONTEXT_FIELDS)
+ // TODO(v8:8989): [torque] Support marker constants.
+ /* TODO(ishell): remove this fixedArray-like header size. */
+ static const int kHeaderSize = kScopeInfoOffset;
+ static const int kStartOfTaggedFieldsOffset = kScopeInfoOffset;
+ /* Header size. */ \
+ /* TODO(ishell): use this as header size once MIN_CONTEXT_SLOTS */ \
+ /* is removed in favour of offset-based access to common fields. */ \
+ static const int kTodoHeaderSize = kSize;
+
+ // Garbage collection support.
+ V8_INLINE static constexpr int SizeFor(int length) {
+ // TODO(ishell): switch to kTodoHeaderSize based approach once we no longer
+ // reference common Context fields via index
+ return kHeaderSize + length * kTaggedSize;
+ }
+
+ // Code Generation support.
+ // Offset of the element from the beginning of object.
+ V8_INLINE static constexpr int OffsetOfElementAt(int index) {
+ return SizeFor(index);
+ }
+ // Offset of the element from the heap object pointer.
+ V8_INLINE static constexpr int SlotOffset(int index) {
+ return SizeFor(index) - kHeapObjectTag;
+ }
+
+ // TODO(ishell): eventually migrate to the offset based access instead of
+ // index-based.
+ // The default context slot layout; indices are FixedArray slot indices.
+ enum Field {
+ // TODO(shell): use offset-based approach for accessing common values.
+ // These slots are in all contexts.
+ SCOPE_INFO_INDEX,
+ PREVIOUS_INDEX,
+ EXTENSION_INDEX,
+ NATIVE_CONTEXT_INDEX,
+
+// These slots are only in native contexts.
+#define NATIVE_CONTEXT_SLOT(index, type, name) index,
+ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_SLOT)
+#undef NATIVE_CONTEXT_SLOT
+
+ // Properties from here are treated as weak references by the full GC.
+ // Scavenge treats them as strong references.
+ OPTIMIZED_CODE_LIST, // Weak.
+ DEOPTIMIZED_CODE_LIST, // Weak.
+ NEXT_CONTEXT_LINK, // Weak.
+
+ // Total number of slots.
+ NATIVE_CONTEXT_SLOTS,
+ FIRST_WEAK_SLOT = OPTIMIZED_CODE_LIST,
+ FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX,
+
+ // TODO(shell): Remove, once it becomes zero
+ MIN_CONTEXT_SLOTS = GLOBAL_PROXY_INDEX,
+
+ // This slot holds the thrown value in catch contexts.
+ THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
+
+ // These slots hold values in debug evaluate contexts.
+ WRAPPED_CONTEXT_INDEX = MIN_CONTEXT_SLOTS,
+ WHITE_LIST_INDEX = MIN_CONTEXT_SLOTS + 1
+ };
+
+ // A region of native context entries containing maps for functions created
+ // by Builtins::kFastNewClosure.
+ static const int FIRST_FUNCTION_MAP_INDEX = SLOPPY_FUNCTION_MAP_INDEX;
+ static const int LAST_FUNCTION_MAP_INDEX = CLASS_FUNCTION_MAP_INDEX;
+
+ static const int kNoContext = 0;
+ static const int kInvalidContext = 1;
+
+ void ResetErrorsThrown();
+ void IncrementErrorsThrown();
+ int GetErrorsThrown();
+
+ // Direct slot access.
+ inline void set_scope_info(ScopeInfo scope_info);
+
+ inline Object unchecked_previous();
+ inline Context previous();
+ inline void set_previous(Context context);
+
+ inline Object next_context_link();
+
+ inline bool has_extension();
+ inline HeapObject extension();
+ inline void set_extension(HeapObject object);
+ JSObject extension_object();
+ JSReceiver extension_receiver();
+ V8_EXPORT_PRIVATE ScopeInfo scope_info();
+
+ // Find the module context (assuming there is one) and return the associated
+ // module object.
+ Module module();
+
+ // Get the context where var declarations will be hoisted to, which
+ // may be the context itself.
+ Context declaration_context();
+ bool is_declaration_context();
+
+ // Get the next closure's context on the context chain.
+ Context closure_context();
+
+ // Returns a JSGlobalProxy object or null.
+ V8_EXPORT_PRIVATE JSGlobalProxy global_proxy();
+ void set_global_proxy(JSGlobalProxy global);
+
+ // Get the JSGlobalObject object.
+ V8_EXPORT_PRIVATE JSGlobalObject global_object();
+
+ // Get the script context by traversing the context chain.
+ Context script_context();
+
+ // Compute the native context.
+ inline NativeContext native_context() const;
+ inline void set_native_context(NativeContext context);
+
+ // Predicates for context types. IsNativeContext is already defined on
+ // Object.
+ inline bool IsFunctionContext() const;
+ inline bool IsCatchContext() const;
+ inline bool IsWithContext() const;
+ inline bool IsDebugEvaluateContext() const;
+ inline bool IsAwaitContext() const;
+ inline bool IsBlockContext() const;
+ inline bool IsModuleContext() const;
+ inline bool IsEvalContext() const;
+ inline bool IsScriptContext() const;
+
+ inline bool HasSameSecurityTokenAs(Context that) const;
+
+ // The native context also stores a list of all optimized code and a
+ // list of all deoptimized code, which are needed by the deoptimizer.
+ V8_EXPORT_PRIVATE void AddOptimizedCode(Code code);
+ void SetOptimizedCodeListHead(Object head);
+ Object OptimizedCodeListHead();
+ void SetDeoptimizedCodeListHead(Object head);
+ Object DeoptimizedCodeListHead();
+
+ Handle<Object> ErrorMessageForCodeGenerationFromStrings();
+
+ static int IntrinsicIndexForName(Handle<String> name);
+ static int IntrinsicIndexForName(const unsigned char* name, int length);
+
+#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
+ inline void set_##name(type value); \
+ inline bool is_##name(type value) const; \
+ inline type name() const;
+ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
+#undef NATIVE_CONTEXT_FIELD_ACCESSORS
+
+ // Lookup the slot called name, starting with the current context.
+ // There are three possibilities:
+ //
+ // 1) result->IsContext():
+ // The binding was found in a context. *index is always the
+ // non-negative slot index. *attributes is NONE for var and let
+ // declarations, READ_ONLY for const declarations (never ABSENT).
+ //
+ // 2) result->IsJSObject():
+ // The binding was found as a named property in a context extension
+ // object (i.e., was introduced via eval), as a property on the subject
+ // of with, or as a property of the global object. *index is -1 and
+ // *attributes is not ABSENT.
+ //
+ // 3) result->IsModule():
+ // The binding was found in module imports or exports.
+ // *attributes is never ABSENT. imports are READ_ONLY.
+ //
+ // 4) result.is_null():
+ // There was no binding found, *index is always -1 and *attributes is
+ // always ABSENT.
+ static Handle<Object> Lookup(Handle<Context> context, Handle<String> name,
+ ContextLookupFlags flags, int* index,
+ PropertyAttributes* attributes,
+ InitializationFlag* init_flag,
+ VariableMode* variable_mode,
+ bool* is_sloppy_function_name = nullptr);
+
+ static inline int FunctionMapIndex(LanguageMode language_mode,
+ FunctionKind kind, bool has_shared_name,
+ bool needs_home_object);
+
+ static int ArrayMapIndex(ElementsKind elements_kind) {
+ DCHECK(IsFastElementsKind(elements_kind));
+ return elements_kind + FIRST_JS_ARRAY_MAP_SLOT;
+ }
+
+ inline Map GetInitialJSArrayMap(ElementsKind kind) const;
+
+ static const int kNotFound = -1;
+
+ // Dispatched behavior.
+ DECL_PRINTER(Context)
+ DECL_VERIFIER(Context)
+
+ using BodyDescriptor = FlexibleBodyDescriptor<kStartOfTaggedFieldsOffset>;
+
+ private:
+#ifdef DEBUG
+ // Bootstrapping-aware type checks.
+ V8_EXPORT_PRIVATE static bool IsBootstrappingOrNativeContext(Isolate* isolate,
+ Object object);
+ static bool IsBootstrappingOrValidParentContext(Object object, Context kid);
+#endif
+
+ OBJECT_CONSTRUCTORS(Context, HeapObject);
+};
+
+class NativeContext : public Context {
+ public:
+ DECL_CAST(NativeContext)
+ // TODO(neis): Move some stuff from Context here.
+
+ // [microtask_queue]: pointer to the MicrotaskQueue object.
+ DECL_PRIMITIVE_ACCESSORS(microtask_queue, MicrotaskQueue*)
+
+ // Dispatched behavior.
+ DECL_PRINTER(NativeContext)
+ DECL_VERIFIER(NativeContext)
+
+ // Layout description.
+#define NATIVE_CONTEXT_FIELDS_DEF(V) \
+ /* TODO(ishell): move definition of common context offsets to Context. */ \
+ V(kStartOfNativeContextFieldsOffset, \
+ (FIRST_WEAK_SLOT - MIN_CONTEXT_SLOTS) * kTaggedSize) \
+ V(kEndOfStrongFieldsOffset, 0) \
+ V(kStartOfWeakFieldsOffset, \
+ (NATIVE_CONTEXT_SLOTS - FIRST_WEAK_SLOT) * kTaggedSize) \
+ V(kEndOfWeakFieldsOffset, 0) \
+ V(kEndOfNativeContextFieldsOffset, 0) \
+ V(kEndOfTaggedFieldsOffset, 0) \
+ /* Raw data. */ \
+ V(kMicrotaskQueueOffset, kSystemPointerSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Context::kTodoHeaderSize,
+ NATIVE_CONTEXT_FIELDS_DEF)
+#undef NATIVE_CONTEXT_FIELDS_DEF
+
+ class BodyDescriptor;
+
+ private:
+ STATIC_ASSERT(OffsetOfElementAt(EMBEDDER_DATA_INDEX) ==
+ Internals::kNativeContextEmbedderDataOffset);
+
+ OBJECT_CONSTRUCTORS(NativeContext, Context);
+};
+
+using ContextField = Context::Field;
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_CONTEXTS_H_
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
index 1be71ce8fa..f9496cc342 100644
--- a/deps/v8/src/objects/data-handler-inl.h
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_DATA_HANDLER_INL_H_
#define V8_OBJECTS_DATA_HANDLER_INL_H_
-#include "src/objects-inl.h" // Needed for write barriers
#include "src/objects/data-handler.h"
+#include "src/objects/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -22,15 +22,15 @@ ACCESSORS(DataHandler, smi_handler, Object, kSmiHandlerOffset)
ACCESSORS(DataHandler, validity_cell, Object, kValidityCellOffset)
int DataHandler::data_field_count() const {
- return (map()->instance_size() - kSizeWithData0) / kTaggedSize;
+ return (map().instance_size() - kSizeWithData0) / kTaggedSize;
}
WEAK_ACCESSORS_CHECKED(DataHandler, data1, kData1Offset,
- map()->instance_size() >= kSizeWithData1)
+ map().instance_size() >= kSizeWithData1)
WEAK_ACCESSORS_CHECKED(DataHandler, data2, kData2Offset,
- map()->instance_size() >= kSizeWithData2)
+ map().instance_size() >= kSizeWithData2)
WEAK_ACCESSORS_CHECKED(DataHandler, data3, kData3Offset,
- map()->instance_size() >= kSizeWithData3)
+ map().instance_size() >= kSizeWithData3)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index d445174cbc..273f710c3b 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -8,8 +8,8 @@
#include "src/objects/debug-objects.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/code-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
// Has to be the last include (doesn't have include guards):
@@ -37,7 +37,7 @@ ACCESSORS(DebugInfo, script, Object, kScriptOffset)
ACCESSORS(DebugInfo, original_bytecode_array, Object,
kOriginalBytecodeArrayOffset)
ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayOffset)
-ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateOffset)
+ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsOffset)
ACCESSORS(DebugInfo, coverage_info, Object, kCoverageInfoOffset)
BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, side_effect_state,
@@ -56,9 +56,9 @@ SMI_ACCESSORS(BreakPoint, id, kIdOffset)
ACCESSORS(BreakPoint, condition, String, kConditionOffset)
bool DebugInfo::HasInstrumentedBytecodeArray() {
- DCHECK_EQ(debug_bytecode_array()->IsBytecodeArray(),
- original_bytecode_array()->IsBytecodeArray());
- return debug_bytecode_array()->IsBytecodeArray();
+ DCHECK_EQ(debug_bytecode_array().IsBytecodeArray(),
+ original_bytecode_array().IsBytecodeArray());
+ return debug_bytecode_array().IsBytecodeArray();
}
BytecodeArray DebugInfo::OriginalBytecodeArray() {
@@ -68,7 +68,7 @@ BytecodeArray DebugInfo::OriginalBytecodeArray() {
BytecodeArray DebugInfo::DebugBytecodeArray() {
DCHECK(HasInstrumentedBytecodeArray());
- DCHECK_EQ(shared()->GetDebugBytecodeArray(), debug_bytecode_array());
+ DCHECK_EQ(shared().GetDebugBytecodeArray(), debug_bytecode_array());
return BytecodeArray::cast(debug_bytecode_array());
}
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index 760edbfbcf..5087918e75 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -5,9 +5,9 @@
#include "src/objects/debug-objects.h"
#include "src/debug/debug-evaluate.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/debug-objects-inl.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -31,7 +31,7 @@ void DebugInfo::ClearBreakInfo(Isolate* isolate) {
if (HasInstrumentedBytecodeArray()) {
// Reset function's bytecode array field to point to the original bytecode
// array.
- shared()->SetDebugBytecodeArray(OriginalBytecodeArray());
+ shared().SetDebugBytecodeArray(OriginalBytecodeArray());
// If the function is currently running on the stack, we need to update the
// bytecode pointers on the stack so they point to the original
@@ -80,19 +80,18 @@ bool DebugInfo::HasBreakPoint(Isolate* isolate, int source_position) {
// If there is no break point info object or no break points in the break
// point info object there is no break point at this code offset.
- if (break_point_info->IsUndefined(isolate)) return false;
- return BreakPointInfo::cast(break_point_info)->GetBreakPointCount(isolate) >
- 0;
+ if (break_point_info.IsUndefined(isolate)) return false;
+ return BreakPointInfo::cast(break_point_info).GetBreakPointCount(isolate) > 0;
}
// Get the break point info object for this source position.
Object DebugInfo::GetBreakPointInfo(Isolate* isolate, int source_position) {
DCHECK(HasBreakInfo());
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined(isolate)) {
+ for (int i = 0; i < break_points().length(); i++) {
+ if (!break_points().get(i).IsUndefined(isolate)) {
BreakPointInfo break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- if (break_point_info->source_position() == source_position) {
+ BreakPointInfo::cast(break_points().get(i));
+ if (break_point_info.source_position() == source_position) {
return break_point_info;
}
}
@@ -103,10 +102,10 @@ Object DebugInfo::GetBreakPointInfo(Isolate* isolate, int source_position) {
bool DebugInfo::ClearBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (debug_info->break_points()->get(i)->IsUndefined(isolate)) continue;
+ for (int i = 0; i < debug_info->break_points().length(); i++) {
+ if (debug_info->break_points().get(i).IsUndefined(isolate)) continue;
Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
- BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
+ BreakPointInfo::cast(debug_info->break_points().get(i)), isolate);
if (BreakPointInfo::HasBreakPoint(isolate, break_point_info, break_point)) {
BreakPointInfo::ClearBreakPoint(isolate, break_point_info, break_point);
return true;
@@ -131,8 +130,8 @@ void DebugInfo::SetBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
// break points before. Try to find a free slot.
static const int kNoBreakPointInfo = -1;
int index = kNoBreakPointInfo;
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (debug_info->break_points()->get(i)->IsUndefined(isolate)) {
+ for (int i = 0; i < debug_info->break_points().length(); i++) {
+ if (debug_info->break_points().get(i).IsUndefined(isolate)) {
index = i;
break;
}
@@ -157,7 +156,7 @@ void DebugInfo::SetBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
Handle<BreakPointInfo> new_break_point_info =
isolate->factory()->NewBreakPointInfo(source_position);
BreakPointInfo::SetBreakPoint(isolate, new_break_point_info, break_point);
- debug_info->break_points()->set(index, *new_break_point_info);
+ debug_info->break_points().set(index, *new_break_point_info);
}
// Get the break point objects for a source position.
@@ -165,10 +164,10 @@ Handle<Object> DebugInfo::GetBreakPoints(Isolate* isolate,
int source_position) {
DCHECK(HasBreakInfo());
Object break_point_info = GetBreakPointInfo(isolate, source_position);
- if (break_point_info->IsUndefined(isolate)) {
+ if (break_point_info.IsUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
- return Handle<Object>(BreakPointInfo::cast(break_point_info)->break_points(),
+ return Handle<Object>(BreakPointInfo::cast(break_point_info).break_points(),
isolate);
}
@@ -176,11 +175,11 @@ Handle<Object> DebugInfo::GetBreakPoints(Isolate* isolate,
int DebugInfo::GetBreakPointCount(Isolate* isolate) {
DCHECK(HasBreakInfo());
int count = 0;
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined(isolate)) {
+ for (int i = 0; i < break_points().length(); i++) {
+ if (!break_points().get(i).IsUndefined(isolate)) {
BreakPointInfo break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- count += break_point_info->GetBreakPointCount(isolate);
+ BreakPointInfo::cast(break_points().get(i));
+ count += break_point_info.GetBreakPointCount(isolate);
}
}
return count;
@@ -190,10 +189,10 @@ Handle<Object> DebugInfo::FindBreakPointInfo(Isolate* isolate,
Handle<DebugInfo> debug_info,
Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
+ for (int i = 0; i < debug_info->break_points().length(); i++) {
+ if (!debug_info->break_points().get(i).IsUndefined(isolate)) {
Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
- BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
+ BreakPointInfo::cast(debug_info->break_points().get(i)), isolate);
if (BreakPointInfo::HasBreakPoint(isolate, break_point_info,
break_point)) {
return break_point_info;
@@ -228,7 +227,7 @@ DebugInfo::SideEffectState DebugInfo::GetSideEffectState(Isolate* isolate) {
namespace {
bool IsEqual(BreakPoint break_point1, BreakPoint break_point2) {
- return break_point1->id() == break_point2->id();
+ return break_point1.id() == break_point2.id();
}
} // namespace
@@ -237,9 +236,9 @@ void BreakPointInfo::ClearBreakPoint(Isolate* isolate,
Handle<BreakPointInfo> break_point_info,
Handle<BreakPoint> break_point) {
// If there are no break points just ignore.
- if (break_point_info->break_points()->IsUndefined(isolate)) return;
+ if (break_point_info->break_points().IsUndefined(isolate)) return;
// If there is a single break point clear it if it is the same.
- if (!break_point_info->break_points()->IsFixedArray()) {
+ if (!break_point_info->break_points().IsFixedArray()) {
if (IsEqual(BreakPoint::cast(break_point_info->break_points()),
*break_point)) {
break_point_info->set_break_points(
@@ -248,7 +247,7 @@ void BreakPointInfo::ClearBreakPoint(Isolate* isolate,
return;
}
// If there are multiple break points shrink the array
- DCHECK(break_point_info->break_points()->IsFixedArray());
+ DCHECK(break_point_info->break_points().IsFixedArray());
Handle<FixedArray> old_array = Handle<FixedArray>(
FixedArray::cast(break_point_info->break_points()), isolate);
Handle<FixedArray> new_array =
@@ -271,14 +270,14 @@ void BreakPointInfo::SetBreakPoint(Isolate* isolate,
Handle<BreakPointInfo> break_point_info,
Handle<BreakPoint> break_point) {
// If there was no break point objects before just set it.
- if (break_point_info->break_points()->IsUndefined(isolate)) {
+ if (break_point_info->break_points().IsUndefined(isolate)) {
break_point_info->set_break_points(*break_point);
return;
}
// If the break point object is the same as before just ignore.
if (break_point_info->break_points() == *break_point) return;
// If there was one break point object before replace with array.
- if (!break_point_info->break_points()->IsFixedArray()) {
+ if (!break_point_info->break_points().IsFixedArray()) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(2);
array->set(0, break_point_info->break_points());
array->set(1, *break_point);
@@ -304,18 +303,18 @@ bool BreakPointInfo::HasBreakPoint(Isolate* isolate,
Handle<BreakPointInfo> break_point_info,
Handle<BreakPoint> break_point) {
// No break point.
- if (break_point_info->break_points()->IsUndefined(isolate)) {
+ if (break_point_info->break_points().IsUndefined(isolate)) {
return false;
}
// Single break point.
- if (!break_point_info->break_points()->IsFixedArray()) {
+ if (!break_point_info->break_points().IsFixedArray()) {
return IsEqual(BreakPoint::cast(break_point_info->break_points()),
*break_point);
}
// Multiple break points.
FixedArray array = FixedArray::cast(break_point_info->break_points());
- for (int i = 0; i < array->length(); i++) {
- if (IsEqual(BreakPoint::cast(array->get(i)), *break_point)) {
+ for (int i = 0; i < array.length(); i++) {
+ if (IsEqual(BreakPoint::cast(array.get(i)), *break_point)) {
return true;
}
}
@@ -325,11 +324,11 @@ bool BreakPointInfo::HasBreakPoint(Isolate* isolate,
// Get the number of break points.
int BreakPointInfo::GetBreakPointCount(Isolate* isolate) {
// No break point.
- if (break_points()->IsUndefined(isolate)) return 0;
+ if (break_points().IsUndefined(isolate)) return 0;
// Single break point.
- if (!break_points()->IsFixedArray()) return 1;
+ if (!break_points().IsFixedArray()) return 1;
// Multiple break points.
- return FixedArray::cast(break_points())->length();
+ return FixedArray::cast(break_points()).length();
}
int CoverageInfo::SlotCount() const {
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 9839f405f6..243caaa526 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_DEBUG_OBJECTS_H_
#define V8_OBJECTS_DEBUG_OBJECTS_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
@@ -168,21 +168,9 @@ class DebugInfo : public Struct {
DECL_PRINTER(DebugInfo)
DECL_VERIFIER(DebugInfo)
-// Layout description.
-#define DEBUG_INFO_FIELDS(V) \
- V(kSharedFunctionInfoOffset, kTaggedSize) \
- V(kDebuggerHintsOffset, kTaggedSize) \
- V(kScriptOffset, kTaggedSize) \
- V(kOriginalBytecodeArrayOffset, kTaggedSize) \
- V(kDebugBytecodeArrayOffset, kTaggedSize) \
- V(kBreakPointsStateOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- V(kCoverageInfoOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, DEBUG_INFO_FIELDS)
-#undef DEBUG_INFO_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ TORQUE_GENERATED_DEBUG_INFO_FIELDS)
static const int kEstimatedNofBreakPointsInFunction = 4;
@@ -247,11 +235,6 @@ class CoverageInfo : public FixedArray {
// Print debug info.
void Print(std::unique_ptr<char[]> function_name);
- private:
- static int FirstIndexForSlot(int slot_index) {
- return kFirstSlotIndex + slot_index * kSlotIndexCount;
- }
-
static const int kFirstSlotIndex = 0;
// Each slot is assigned a group of indices starting at kFirstSlotIndex.
@@ -259,7 +242,17 @@ class CoverageInfo : public FixedArray {
static const int kSlotStartSourcePositionIndex = 0;
static const int kSlotEndSourcePositionIndex = 1;
static const int kSlotBlockCountIndex = 2;
- static const int kSlotIndexCount = 3;
+ static const int kSlotPaddingIndex = 3; // Padding to make the index count 4.
+ static const int kSlotIndexCount = 4;
+
+ static const int kSlotIndexCountLog2 = 2;
+ static const int kSlotIndexCountMask = (kSlotIndexCount - 1);
+ STATIC_ASSERT(1 << kSlotIndexCountLog2 == kSlotIndexCount);
+
+ private:
+ static int FirstIndexForSlot(int slot_index) {
+ return kFirstSlotIndex + slot_index * kSlotIndexCount;
+ }
OBJECT_CONSTRUCTORS(CoverageInfo, FixedArray);
};
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index a59d4e5a75..1cd64c1bf1 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -7,16 +7,16 @@
#include "src/objects/descriptor-array.h"
-#include "src/field-type.h"
+#include "src/execution/isolate.h"
+#include "src/handles/maybe-handles-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/lookup-cache-inl.h"
-#include "src/maybe-handles-inl.h"
+#include "src/objects/field-type.h"
#include "src/objects/heap-object-inl.h"
-#include "src/objects/maybe-object.h"
+#include "src/objects/lookup-cache-inl.h"
+#include "src/objects/maybe-object-inl.h"
+#include "src/objects/property.h"
#include "src/objects/struct-inl.h"
-#include "src/property.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -59,25 +59,25 @@ inline int16_t DescriptorArray::CompareAndSwapRawNumberOfMarkedDescriptors(
}
void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) {
- set_enum_cache(array->enum_cache());
+ set_enum_cache(array.enum_cache());
}
int DescriptorArray::Search(Name name, int valid_descriptors) {
- DCHECK(name->IsUniqueName());
+ DCHECK(name.IsUniqueName());
return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors,
nullptr);
}
int DescriptorArray::Search(Name name, Map map) {
- DCHECK(name->IsUniqueName());
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ DCHECK(name.IsUniqueName());
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) return kNotFound;
return Search(name, number_of_own_descriptors);
}
int DescriptorArray::SearchWithCache(Isolate* isolate, Name name, Map map) {
- DCHECK(name->IsUniqueName());
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ DCHECK(name.IsUniqueName());
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) return kNotFound;
DescriptorLookupCache* cache = isolate->descriptor_lookup_cache();
@@ -92,7 +92,11 @@ int DescriptorArray::SearchWithCache(Isolate* isolate, Name name, Map map) {
}
ObjectSlot DescriptorArray::GetFirstPointerSlot() {
- return RawField(DescriptorArray::kPointersStartOffset);
+ static_assert(kEndOfStrongFieldsOffset == kStartOfWeakFieldsOffset,
+ "Weak and strong fields are continuous.");
+ static_assert(kEndOfWeakFieldsOffset == kHeaderSize,
+ "Weak fields extend up to the end of the header.");
+ return RawField(DescriptorArray::kStartOfStrongFieldsOffset);
}
ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
@@ -105,7 +109,7 @@ ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
ObjectSlot DescriptorArray::GetKeySlot(int descriptor) {
DCHECK_LE(descriptor, number_of_all_descriptors());
ObjectSlot slot = GetDescriptorSlot(descriptor) + kEntryKeyIndex;
- DCHECK((*slot)->IsObject());
+ DCHECK((*slot).IsObject());
return slot;
}
@@ -194,7 +198,7 @@ void DescriptorArray::Append(Descriptor* desc) {
for (insertion = descriptor_number; insertion > 0; --insertion) {
Name key = GetSortedKey(insertion - 1);
- if (key->Hash() <= hash) break;
+ if (key.Hash() <= hash) break;
SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
}
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 89350514b7..3c1fa98a37 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_DESCRIPTOR_ARRAY_H_
#define V8_OBJECTS_DESCRIPTOR_ARRAY_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -139,20 +139,9 @@ class DescriptorArray : public HeapObject {
static const int kNotFound = -1;
// Layout description.
-#define DESCRIPTOR_ARRAY_FIELDS(V) \
- V(kNumberOfAllDescriptorsOffset, kUInt16Size) \
- V(kNumberOfDescriptorsOffset, kUInt16Size) \
- V(kRawNumberOfMarkedDescriptorsOffset, kUInt16Size) \
- V(kFiller16BitsOffset, kUInt16Size) \
- V(kPointersStartOffset, 0) \
- V(kEnumCacheOffset, kTaggedSize) \
- V(kHeaderSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- DESCRIPTOR_ARRAY_FIELDS)
-#undef DESCRIPTOR_ARRAY_FIELDS
-
- STATIC_ASSERT(IsAligned(kPointersStartOffset, kTaggedSize));
+ TORQUE_GENERATED_DESCRIPTOR_ARRAY_FIELDS)
+ STATIC_ASSERT(IsAligned(kStartOfWeakFieldsOffset, kTaggedSize));
STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
// Garbage collection support.
@@ -174,7 +163,13 @@ class DescriptorArray : public HeapObject {
inline ObjectSlot GetKeySlot(int descriptor);
inline MaybeObjectSlot GetValueSlot(int descriptor);
- using BodyDescriptor = FlexibleWeakBodyDescriptor<kPointersStartOffset>;
+ static_assert(kEndOfStrongFieldsOffset == kStartOfWeakFieldsOffset,
+ "Weak fields follow strong fields.");
+ static_assert(kEndOfWeakFieldsOffset == kHeaderSize,
+ "Weak fields extend up to the end of the header.");
+ // We use this visitor to also visitor to also visit the enum_cache, which is
+ // the only tagged field in the header, and placed at the end of the header.
+ using BodyDescriptor = FlexibleWeakBodyDescriptor<kStartOfStrongFieldsOffset>;
// Layout of descriptor.
// Naming is consistent with Dictionary classes for easy templating.
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index caacde21fa..a1692978f3 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/dictionary.h"
-#include "src/hash-seed-inl.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/property-cell-inl.h"
@@ -53,14 +53,14 @@ SimpleNumberDictionary::SimpleNumberDictionary(Address ptr)
bool NumberDictionary::requires_slow_elements() {
Object max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi()) return false;
+ if (!max_index_object.IsSmi()) return false;
return 0 != (Smi::ToInt(max_index_object) & kRequiresSlowElementsMask);
}
uint32_t NumberDictionary::max_number_key() {
DCHECK(!requires_slow_elements());
Object max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi()) return 0;
+ if (!max_index_object.IsSmi()) return 0;
uint32_t value = static_cast<uint32_t>(Smi::ToInt(max_index_object));
return value >> kRequiresSlowElementsTagSize;
}
@@ -73,7 +73,7 @@ template <typename Derived, typename Shape>
void Dictionary<Derived, Shape>::ClearEntry(Isolate* isolate, int entry) {
Object the_hole = this->GetReadOnlyRoots().the_hole_value();
PropertyDetails details = PropertyDetails::Empty();
- Derived::cast(*this)->SetEntry(isolate, entry, the_hole, the_hole, details);
+ Derived::cast(*this).SetEntry(isolate, entry, the_hole, the_hole, details);
}
template <typename Derived, typename Shape>
@@ -81,7 +81,7 @@ void Dictionary<Derived, Shape>::SetEntry(Isolate* isolate, int entry,
Object key, Object value,
PropertyDetails details) {
DCHECK(Dictionary::kEntrySize == 2 || Dictionary::kEntrySize == 3);
- DCHECK(!key->IsName() || details.dictionary_index() > 0);
+ DCHECK(!key.IsName() || details.dictionary_index() > 0);
int index = DerivedHashTable::EntryToIndex(entry);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = this->GetWriteBarrierMode(no_gc);
@@ -91,7 +91,7 @@ void Dictionary<Derived, Shape>::SetEntry(Isolate* isolate, int entry,
}
Object GlobalDictionaryShape::Unwrap(Object object) {
- return PropertyCell::cast(object)->name();
+ return PropertyCell::cast(object).name();
}
RootIndex GlobalDictionaryShape::GetMapRootIndex() {
@@ -105,7 +105,7 @@ RootIndex NameDictionaryShape::GetMapRootIndex() {
}
PropertyCell GlobalDictionary::CellAt(int entry) {
- DCHECK(KeyAt(entry)->IsPropertyCell());
+ DCHECK(KeyAt(entry).IsPropertyCell());
return PropertyCell::cast(KeyAt(entry));
}
@@ -115,15 +115,15 @@ bool GlobalDictionaryShape::IsLive(ReadOnlyRoots roots, Object k) {
}
bool GlobalDictionaryShape::IsKey(ReadOnlyRoots roots, Object k) {
- return IsLive(roots, k) && !PropertyCell::cast(k)->value()->IsTheHole(roots);
+ return IsLive(roots, k) && !PropertyCell::cast(k).value().IsTheHole(roots);
}
-Name GlobalDictionary::NameAt(int entry) { return CellAt(entry)->name(); }
-Object GlobalDictionary::ValueAt(int entry) { return CellAt(entry)->value(); }
+Name GlobalDictionary::NameAt(int entry) { return CellAt(entry).name(); }
+Object GlobalDictionary::ValueAt(int entry) { return CellAt(entry).value(); }
void GlobalDictionary::SetEntry(Isolate* isolate, int entry, Object key,
Object value, PropertyDetails details) {
- DCHECK_EQ(key, PropertyCell::cast(value)->name());
+ DCHECK_EQ(key, PropertyCell::cast(value).name());
set(EntryToIndex(entry) + kEntryKeyIndex, value);
DetailsAtPut(isolate, entry, details);
}
@@ -133,8 +133,8 @@ void GlobalDictionary::ValueAtPut(int entry, Object value) {
}
bool NumberDictionaryBaseShape::IsMatch(uint32_t key, Object other) {
- DCHECK(other->IsNumber());
- return key == static_cast<uint32_t>(other->Number());
+ DCHECK(other.IsNumber());
+ return key == static_cast<uint32_t>(other.Number());
}
uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) {
@@ -143,8 +143,8 @@ uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) {
uint32_t NumberDictionaryBaseShape::HashForObject(ReadOnlyRoots roots,
Object other) {
- DCHECK(other->IsNumber());
- return ComputeSeededHash(static_cast<uint32_t>(other->Number()),
+ DCHECK(other.IsNumber());
+ return ComputeSeededHash(static_cast<uint32_t>(other.Number()),
HashSeed(roots));
}
@@ -162,7 +162,7 @@ RootIndex SimpleNumberDictionaryShape::GetMapRootIndex() {
}
bool NameDictionaryShape::IsMatch(Handle<Name> key, Object other) {
- DCHECK(other->IsTheHole() || Name::cast(other)->IsUniqueName());
+ DCHECK(other.IsTheHole() || Name::cast(other).IsUniqueName());
DCHECK(key->IsUniqueName());
return *key == other;
}
@@ -172,17 +172,17 @@ uint32_t NameDictionaryShape::Hash(Isolate* isolate, Handle<Name> key) {
}
uint32_t NameDictionaryShape::HashForObject(ReadOnlyRoots roots, Object other) {
- return Name::cast(other)->Hash();
+ return Name::cast(other).Hash();
}
bool GlobalDictionaryShape::IsMatch(Handle<Name> key, Object other) {
- DCHECK(PropertyCell::cast(other)->name()->IsUniqueName());
- return *key == PropertyCell::cast(other)->name();
+ DCHECK(PropertyCell::cast(other).name().IsUniqueName());
+ return *key == PropertyCell::cast(other).name();
}
uint32_t GlobalDictionaryShape::HashForObject(ReadOnlyRoots roots,
Object other) {
- return PropertyCell::cast(other)->name()->Hash();
+ return PropertyCell::cast(other).name().Hash();
}
Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
@@ -194,19 +194,19 @@ Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
template <typename Dictionary>
PropertyDetails GlobalDictionaryShape::DetailsAt(Dictionary dict, int entry) {
DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
- return dict->CellAt(entry)->property_details();
+ return dict.CellAt(entry).property_details();
}
template <typename Dictionary>
void GlobalDictionaryShape::DetailsAtPut(Isolate* isolate, Dictionary dict,
int entry, PropertyDetails value) {
DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
- PropertyCell cell = dict->CellAt(entry);
- if (cell->property_details().IsReadOnly() != value.IsReadOnly()) {
- cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ PropertyCell cell = dict.CellAt(entry);
+ if (cell.property_details().IsReadOnly() != value.IsReadOnly()) {
+ cell.dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
}
- cell->set_property_details(value);
+ cell.set_property_details(value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 0bce08393f..ca709f34d8 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -6,11 +6,11 @@
#define V8_OBJECTS_DICTIONARY_H_
#include "src/base/export-template.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/hash-table.h"
#include "src/objects/property-array.h"
#include "src/objects/smi.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -99,16 +99,16 @@ class BaseDictionaryShape : public BaseShape<Key> {
static inline PropertyDetails DetailsAt(Dictionary dict, int entry) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
DCHECK_GE(entry, 0); // Not found is -1, which is not caught by get().
- return PropertyDetails(Smi::cast(dict->get(
- Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex)));
+ return PropertyDetails(Smi::cast(dict.get(Dictionary::EntryToIndex(entry) +
+ Dictionary::kEntryDetailsIndex)));
}
template <typename Dictionary>
static inline void DetailsAtPut(Isolate* isolate, Dictionary dict, int entry,
PropertyDetails value) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
- dict->set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex,
- value.AsSmi());
+ dict.set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex,
+ value.AsSmi());
}
};
@@ -341,10 +341,6 @@ class NumberDictionary
static const int kMaxNumberKeyIndex = kPrefixStartIndex;
void UpdateMaxNumberKey(uint32_t key, Handle<JSObject> dictionary_holder);
- // Returns true if the dictionary contains any elements that are non-writable,
- // non-configurable, non-enumerable, or have getters/setters.
- bool HasComplexElements();
-
// Sorting support
void CopyValuesTo(FixedArray elements);
diff --git a/deps/v8/src/objects/elements-inl.h b/deps/v8/src/objects/elements-inl.h
new file mode 100644
index 0000000000..c4f2e2bf78
--- /dev/null
+++ b/deps/v8/src/objects/elements-inl.h
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ELEMENTS_INL_H_
+#define V8_OBJECTS_ELEMENTS_INL_H_
+
+#include "src/objects/elements.h"
+
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+inline void ElementsAccessor::CollectElementIndices(Handle<JSObject> object,
+ KeyAccumulator* keys) {
+ CollectElementIndices(object, handle(object->elements(), keys->isolate()),
+ keys);
+}
+
+inline MaybeHandle<FixedArray> ElementsAccessor::PrependElementIndices(
+ Handle<JSObject> object, Handle<FixedArray> keys, GetKeysConversion convert,
+ PropertyFilter filter) {
+ return PrependElementIndices(object,
+ handle(object->elements(), object->GetIsolate()),
+ keys, convert, filter);
+}
+
+inline bool ElementsAccessor::HasElement(JSObject holder, uint32_t index,
+ PropertyFilter filter) {
+ return HasElement(holder, index, holder.elements(), filter);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_ELEMENTS_INL_H_
diff --git a/deps/v8/src/objects/elements-kind.cc b/deps/v8/src/objects/elements-kind.cc
new file mode 100644
index 0000000000..a819caf459
--- /dev/null
+++ b/deps/v8/src/objects/elements-kind.cc
@@ -0,0 +1,266 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/elements-kind.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/objects/elements.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+int ElementsKindToShiftSize(ElementsKind elements_kind) {
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ return 0;
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ return 1;
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ return 2;
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case BIGINT64_ELEMENTS:
+ case BIGUINT64_ELEMENTS:
+ return 3;
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case PACKED_FROZEN_ELEMENTS:
+ case PACKED_SEALED_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ return kTaggedSizeLog2;
+ case NO_ELEMENTS:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+}
+
+int ElementsKindToByteSize(ElementsKind elements_kind) {
+ return 1 << ElementsKindToShiftSize(elements_kind);
+}
+
+int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) {
+ STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+
+ if (IsTypedArrayElementsKind(elements_kind)) {
+ return 0;
+ } else {
+ return FixedArray::kHeaderSize - kHeapObjectTag;
+ }
+}
+
+const char* ElementsKindToString(ElementsKind kind) {
+ switch (kind) {
+ case PACKED_SMI_ELEMENTS:
+ return "PACKED_SMI_ELEMENTS";
+ case HOLEY_SMI_ELEMENTS:
+ return "HOLEY_SMI_ELEMENTS";
+ case PACKED_ELEMENTS:
+ return "PACKED_ELEMENTS";
+ case HOLEY_ELEMENTS:
+ return "HOLEY_ELEMENTS";
+ case PACKED_DOUBLE_ELEMENTS:
+ return "PACKED_DOUBLE_ELEMENTS";
+ case HOLEY_DOUBLE_ELEMENTS:
+ return "HOLEY_DOUBLE_ELEMENTS";
+ case PACKED_SEALED_ELEMENTS:
+ return "PACKED_SEALED_ELEMENTS";
+ case HOLEY_SEALED_ELEMENTS:
+ return "HOLEY_SEALED_ELEMENTS";
+ case PACKED_FROZEN_ELEMENTS:
+ return "PACKED_FROZEN_ELEMENTS";
+ case HOLEY_FROZEN_ELEMENTS:
+ return "HOLEY_FROZEN_ELEMENTS";
+ case DICTIONARY_ELEMENTS:
+ return "DICTIONARY_ELEMENTS";
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ return "FAST_SLOPPY_ARGUMENTS_ELEMENTS";
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ return "SLOW_SLOPPY_ARGUMENTS_ELEMENTS";
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ return "FAST_STRING_WRAPPER_ELEMENTS";
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ return "SLOW_STRING_WRAPPER_ELEMENTS";
+
+#define PRINT_NAME(Type, type, TYPE, _) \
+ case TYPE##_ELEMENTS: \
+ return #TYPE "ELEMENTS";
+
+ TYPED_ARRAYS(PRINT_NAME);
+#undef PRINT_NAME
+ case NO_ELEMENTS:
+ return "NO_ELEMENTS";
+ }
+}
+
+ElementsKind kFastElementsKindSequence[kFastElementsKindCount] = {
+ PACKED_SMI_ELEMENTS, // 0
+ HOLEY_SMI_ELEMENTS, // 1
+ PACKED_DOUBLE_ELEMENTS, // 2
+ HOLEY_DOUBLE_ELEMENTS, // 3
+ PACKED_ELEMENTS, // 4
+ HOLEY_ELEMENTS // 5
+};
+STATIC_ASSERT(PACKED_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
+// Verify that kFastElementsKindPackedToHoley is correct.
+STATIC_ASSERT(PACKED_SMI_ELEMENTS + kFastElementsKindPackedToHoley ==
+ HOLEY_SMI_ELEMENTS);
+STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS + kFastElementsKindPackedToHoley ==
+ HOLEY_DOUBLE_ELEMENTS);
+STATIC_ASSERT(PACKED_ELEMENTS + kFastElementsKindPackedToHoley ==
+ HOLEY_ELEMENTS);
+
+ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
+ DCHECK(sequence_number >= 0 && sequence_number < kFastElementsKindCount);
+ return kFastElementsKindSequence[sequence_number];
+}
+
+int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
+ for (int i = 0; i < kFastElementsKindCount; ++i) {
+ if (kFastElementsKindSequence[i] == elements_kind) {
+ return i;
+ }
+ }
+ UNREACHABLE();
+}
+
+ElementsKind GetNextTransitionElementsKind(ElementsKind kind) {
+ int index = GetSequenceIndexFromFastElementsKind(kind);
+ return GetFastElementsKindFromSequenceIndex(index + 1);
+}
+
+static inline bool IsFastTransitionTarget(ElementsKind elements_kind) {
+ return IsFastElementsKind(elements_kind) ||
+ elements_kind == DICTIONARY_ELEMENTS;
+}
+
+bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ if (!IsFastElementsKind(from_kind)) return false;
+ if (!IsFastTransitionTarget(to_kind)) return false;
+ DCHECK(!IsTypedArrayElementsKind(from_kind));
+ DCHECK(!IsTypedArrayElementsKind(to_kind));
+ switch (from_kind) {
+ case PACKED_SMI_ELEMENTS:
+ return to_kind != PACKED_SMI_ELEMENTS;
+ case HOLEY_SMI_ELEMENTS:
+ return to_kind != PACKED_SMI_ELEMENTS && to_kind != HOLEY_SMI_ELEMENTS;
+ case PACKED_DOUBLE_ELEMENTS:
+ return to_kind != PACKED_SMI_ELEMENTS && to_kind != HOLEY_SMI_ELEMENTS &&
+ to_kind != PACKED_DOUBLE_ELEMENTS;
+ case HOLEY_DOUBLE_ELEMENTS:
+ return to_kind == PACKED_ELEMENTS || to_kind == HOLEY_ELEMENTS;
+ case PACKED_ELEMENTS:
+ return to_kind == HOLEY_ELEMENTS;
+ case HOLEY_ELEMENTS:
+ return false;
+ default:
+ return false;
+ }
+}
+
+bool UnionElementsKindUptoSize(ElementsKind* a_out, ElementsKind b) {
+ // Assert that the union of two ElementKinds can be computed via std::max.
+ static_assert(PACKED_SMI_ELEMENTS < HOLEY_SMI_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(HOLEY_SMI_ELEMENTS < PACKED_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_ELEMENTS < HOLEY_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_DOUBLE_ELEMENTS < HOLEY_DOUBLE_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ ElementsKind a = *a_out;
+ switch (a) {
+ case PACKED_SMI_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = b;
+ return true;
+ default:
+ return false;
+ }
+ case HOLEY_SMI_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ *a_out = HOLEY_SMI_ELEMENTS;
+ return true;
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = HOLEY_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+ case PACKED_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ *a_out = PACKED_ELEMENTS;
+ return true;
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = HOLEY_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+ case HOLEY_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = HOLEY_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+ break;
+ case PACKED_DOUBLE_ELEMENTS:
+ switch (b) {
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ *a_out = b;
+ return true;
+ default:
+ return false;
+ }
+ case HOLEY_DOUBLE_ELEMENTS:
+ switch (b) {
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ *a_out = HOLEY_DOUBLE_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/elements-kind.h b/deps/v8/src/objects/elements-kind.h
new file mode 100644
index 0000000000..3ed6ea66ec
--- /dev/null
+++ b/deps/v8/src/objects/elements-kind.h
@@ -0,0 +1,317 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ELEMENTS_KIND_H_
+#define V8_OBJECTS_ELEMENTS_KIND_H_
+
+#include "src/base/macros.h"
+#include "src/common/checks.h"
+#include "src/flags/flags.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// V has parameters (Type, type, TYPE, C type)
+#define TYPED_ARRAYS(V) \
+ V(Uint8, uint8, UINT8, uint8_t) \
+ V(Int8, int8, INT8, int8_t) \
+ V(Uint16, uint16, UINT16, uint16_t) \
+ V(Int16, int16, INT16, int16_t) \
+ V(Uint32, uint32, UINT32, uint32_t) \
+ V(Int32, int32, INT32, int32_t) \
+ V(Float32, float32, FLOAT32, float) \
+ V(Float64, float64, FLOAT64, double) \
+ V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t) \
+ V(BigUint64, biguint64, BIGUINT64, uint64_t) \
+ V(BigInt64, bigint64, BIGINT64, int64_t)
+
+enum ElementsKind : uint8_t {
+ // The "fast" kind for elements that only contain SMI values. Must be first
+ // to make it possible to efficiently check maps for this kind.
+ PACKED_SMI_ELEMENTS,
+ HOLEY_SMI_ELEMENTS,
+
+ // The "fast" kind for tagged values. Must be second to make it possible to
+ // efficiently check maps for this and the PACKED_SMI_ELEMENTS kind
+ // together at once.
+ PACKED_ELEMENTS,
+ HOLEY_ELEMENTS,
+
+ // The "fast" kind for unwrapped, non-tagged double values.
+ PACKED_DOUBLE_ELEMENTS,
+ HOLEY_DOUBLE_ELEMENTS,
+
+ // The sealed kind for elements.
+ PACKED_SEALED_ELEMENTS,
+ HOLEY_SEALED_ELEMENTS,
+
+ // The frozen kind for elements.
+ PACKED_FROZEN_ELEMENTS,
+ HOLEY_FROZEN_ELEMENTS,
+
+ // The "slow" kind.
+ DICTIONARY_ELEMENTS,
+
+ // Elements kind of the "arguments" object (only in sloppy mode).
+ FAST_SLOPPY_ARGUMENTS_ELEMENTS,
+ SLOW_SLOPPY_ARGUMENTS_ELEMENTS,
+
+ // For string wrapper objects ("new String('...')"), the string's characters
+ // are overlaid onto a regular elements backing store.
+ FAST_STRING_WRAPPER_ELEMENTS,
+ SLOW_STRING_WRAPPER_ELEMENTS,
+
+// Fixed typed arrays.
+#define TYPED_ARRAY_ELEMENTS_KIND(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
+ TYPED_ARRAYS(TYPED_ARRAY_ELEMENTS_KIND)
+#undef TYPED_ARRAY_ELEMENTS_KIND
+
+ // Sentinel ElementsKind for objects with no elements.
+ NO_ELEMENTS,
+
+ // Derived constants from ElementsKind.
+ FIRST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS,
+ LAST_ELEMENTS_KIND = BIGINT64_ELEMENTS,
+ FIRST_FAST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS,
+ LAST_FAST_ELEMENTS_KIND = HOLEY_DOUBLE_ELEMENTS,
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = BIGINT64_ELEMENTS,
+ TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS,
+ LAST_FROZEN_ELEMENTS_KIND = HOLEY_FROZEN_ELEMENTS,
+
+// Alias for kSystemPointerSize-sized elements
+#ifdef V8_COMPRESS_POINTERS
+ SYSTEM_POINTER_ELEMENTS = PACKED_DOUBLE_ELEMENTS,
+#else
+ SYSTEM_POINTER_ELEMENTS = PACKED_ELEMENTS,
+#endif
+};
+
+constexpr int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
+constexpr int kFastElementsKindCount =
+ LAST_FAST_ELEMENTS_KIND - FIRST_FAST_ELEMENTS_KIND + 1;
+
+// The number to add to a packed elements kind to reach a holey elements kind
+constexpr int kFastElementsKindPackedToHoley =
+ HOLEY_SMI_ELEMENTS - PACKED_SMI_ELEMENTS;
+
+V8_EXPORT_PRIVATE int ElementsKindToShiftSize(ElementsKind elements_kind);
+V8_EXPORT_PRIVATE int ElementsKindToByteSize(ElementsKind elements_kind);
+int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind);
+const char* ElementsKindToString(ElementsKind kind);
+
+inline ElementsKind GetInitialFastElementsKind() { return PACKED_SMI_ELEMENTS; }
+
+ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number);
+int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
+
+ElementsKind GetNextTransitionElementsKind(ElementsKind elements_kind);
+
+inline bool IsDictionaryElementsKind(ElementsKind kind) {
+ return kind == DICTIONARY_ELEMENTS;
+}
+
+inline bool IsSloppyArgumentsElementsKind(ElementsKind kind) {
+ return IsInRange(kind, FAST_SLOPPY_ARGUMENTS_ELEMENTS,
+ SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
+}
+
+inline bool IsStringWrapperElementsKind(ElementsKind kind) {
+ return IsInRange(kind, FAST_STRING_WRAPPER_ELEMENTS,
+ SLOW_STRING_WRAPPER_ELEMENTS);
+}
+
+inline bool IsTypedArrayElementsKind(ElementsKind kind) {
+ return IsInRange(kind, FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+}
+
+inline bool IsTerminalElementsKind(ElementsKind kind) {
+ return kind == TERMINAL_FAST_ELEMENTS_KIND || IsTypedArrayElementsKind(kind);
+}
+
+inline bool IsFastElementsKind(ElementsKind kind) {
+ STATIC_ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
+ return kind <= LAST_FAST_ELEMENTS_KIND;
+}
+
+inline bool IsTransitionElementsKind(ElementsKind kind) {
+ return IsFastElementsKind(kind) || IsTypedArrayElementsKind(kind) ||
+ kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS ||
+ kind == FAST_STRING_WRAPPER_ELEMENTS;
+}
+
+inline bool IsDoubleElementsKind(ElementsKind kind) {
+ return IsInRange(kind, PACKED_DOUBLE_ELEMENTS, HOLEY_DOUBLE_ELEMENTS);
+}
+
+inline bool IsFixedFloatElementsKind(ElementsKind kind) {
+ return kind == FLOAT32_ELEMENTS || kind == FLOAT64_ELEMENTS;
+}
+
+inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
+ return IsDoubleElementsKind(kind) || IsFixedFloatElementsKind(kind);
+}
+
+// This predicate is used for disabling respective functionality in builtins.
+inline bool IsFrozenOrSealedElementsKindUnchecked(ElementsKind kind) {
+ return IsInRange(kind, PACKED_SEALED_ELEMENTS, HOLEY_FROZEN_ELEMENTS);
+}
+
+inline bool IsFrozenOrSealedElementsKind(ElementsKind kind) {
+ DCHECK_IMPLIES(IsFrozenOrSealedElementsKindUnchecked(kind),
+ FLAG_enable_sealed_frozen_elements_kind);
+ return IsFrozenOrSealedElementsKindUnchecked(kind);
+}
+
+inline bool IsSealedElementsKind(ElementsKind kind) {
+ DCHECK_IMPLIES(IsInRange(kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS),
+ FLAG_enable_sealed_frozen_elements_kind);
+ return IsInRange(kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS);
+}
+
+inline bool IsFrozenElementsKind(ElementsKind kind) {
+ DCHECK_IMPLIES(IsInRange(kind, PACKED_FROZEN_ELEMENTS, HOLEY_FROZEN_ELEMENTS),
+ FLAG_enable_sealed_frozen_elements_kind);
+ return IsInRange(kind, PACKED_FROZEN_ELEMENTS, HOLEY_FROZEN_ELEMENTS);
+}
+
+inline bool IsSmiOrObjectElementsKind(ElementsKind kind) {
+ return IsInRange(kind, PACKED_SMI_ELEMENTS, HOLEY_ELEMENTS);
+}
+
+inline bool IsSmiElementsKind(ElementsKind kind) {
+ return IsInRange(kind, PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS);
+}
+
+inline bool IsFastNumberElementsKind(ElementsKind kind) {
+ return IsSmiElementsKind(kind) || IsDoubleElementsKind(kind);
+}
+
+inline bool IsObjectElementsKind(ElementsKind kind) {
+ return IsInRange(kind, PACKED_ELEMENTS, HOLEY_ELEMENTS);
+}
+
+inline bool IsHoleyFrozenOrSealedElementsKind(ElementsKind kind) {
+ DCHECK_IMPLIES(kind == HOLEY_SEALED_ELEMENTS || kind == HOLEY_FROZEN_ELEMENTS,
+ FLAG_enable_sealed_frozen_elements_kind);
+ return kind == HOLEY_SEALED_ELEMENTS || kind == HOLEY_FROZEN_ELEMENTS;
+}
+
+inline bool IsHoleyElementsKind(ElementsKind kind) {
+ return kind % 2 == 1 && kind <= HOLEY_DOUBLE_ELEMENTS;
+}
+
+inline bool IsHoleyElementsKindForRead(ElementsKind kind) {
+ return kind % 2 == 1 && kind <= HOLEY_FROZEN_ELEMENTS;
+}
+
+inline bool IsHoleyOrDictionaryElementsKind(ElementsKind kind) {
+ return IsHoleyElementsKindForRead(kind) || kind == DICTIONARY_ELEMENTS;
+}
+
+inline bool IsFastPackedElementsKind(ElementsKind kind) {
+ return kind % 2 == 0 && kind <= PACKED_DOUBLE_ELEMENTS;
+}
+
+inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
+ if (holey_kind == HOLEY_SMI_ELEMENTS) {
+ return PACKED_SMI_ELEMENTS;
+ }
+ if (holey_kind == HOLEY_DOUBLE_ELEMENTS) {
+ return PACKED_DOUBLE_ELEMENTS;
+ }
+ if (holey_kind == HOLEY_ELEMENTS) {
+ return PACKED_ELEMENTS;
+ }
+ return holey_kind;
+}
+
+inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
+ if (packed_kind == PACKED_SMI_ELEMENTS) {
+ return HOLEY_SMI_ELEMENTS;
+ }
+ if (packed_kind == PACKED_DOUBLE_ELEMENTS) {
+ return HOLEY_DOUBLE_ELEMENTS;
+ }
+ if (packed_kind == PACKED_ELEMENTS) {
+ return HOLEY_ELEMENTS;
+ }
+ return packed_kind;
+}
+
+inline bool UnionElementsKindUptoPackedness(ElementsKind* a_out,
+ ElementsKind b) {
+ // Assert that the union of two ElementKinds can be computed via std::max.
+ static_assert(PACKED_SMI_ELEMENTS < HOLEY_SMI_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_ELEMENTS < HOLEY_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_DOUBLE_ELEMENTS < HOLEY_DOUBLE_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ ElementsKind a = *a_out;
+ switch (a) {
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
+ if (b == PACKED_SMI_ELEMENTS || b == HOLEY_SMI_ELEMENTS) {
+ *a_out = std::max(a, b);
+ return true;
+ }
+ break;
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ if (b == PACKED_ELEMENTS || b == HOLEY_ELEMENTS) {
+ *a_out = std::max(a, b);
+ return true;
+ }
+ break;
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ if (b == PACKED_DOUBLE_ELEMENTS || b == HOLEY_DOUBLE_ELEMENTS) {
+ *a_out = std::max(a, b);
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+bool UnionElementsKindUptoSize(ElementsKind* a_out, ElementsKind b);
+
+inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
+ DCHECK(IsSmiElementsKind(from_kind));
+ return (from_kind == PACKED_SMI_ELEMENTS) ? PACKED_ELEMENTS : HOLEY_ELEMENTS;
+}
+
+inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ return (GetHoleyElementsKind(from_kind) == to_kind) ||
+ (IsSmiElementsKind(from_kind) && IsObjectElementsKind(to_kind));
+}
+
+bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+ ElementsKind to_kind);
+
+inline ElementsKind GetMoreGeneralElementsKind(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
+ return to_kind;
+ }
+ return from_kind;
+}
+
+inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
+ return IsFastElementsKind(from_kind) &&
+ from_kind != TERMINAL_FAST_ELEMENTS_KIND;
+}
+
+inline bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; }
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_ELEMENTS_KIND_H_
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
new file mode 100644
index 0000000000..e1232a0d5b
--- /dev/null
+++ b/deps/v8/src/objects/elements.cc
@@ -0,0 +1,4798 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/elements.h"
+
+#include "src/execution/arguments.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
+#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h" // For MaxNumberToStringCacheSize.
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/arguments-inl.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/keys.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/slots-atomic-inl.h"
+#include "src/objects/slots.h"
+#include "src/utils/utils.h"
+
+// Each concrete ElementsAccessor can handle exactly one ElementsKind,
+// several abstract ElementsAccessor classes are used to allow sharing
+// common code.
+//
+// Inheritance hierarchy:
+// - ElementsAccessorBase (abstract)
+// - FastElementsAccessor (abstract)
+// - FastSmiOrObjectElementsAccessor
+// - FastPackedSmiElementsAccessor
+// - FastHoleySmiElementsAccessor
+// - FastPackedObjectElementsAccessor
+// - FastSealedObjectElementsAccessor: template
+// - FastPackedSealedObjectElementsAccessor
+// - FastHoleySealedObjectElementsAccessor
+// - FastFrozenObjectElementsAccessor: template
+// - FastPackedFrozenObjectElementsAccessor
+// - FastHoleyFrozenObjectElementsAccessor
+// - FastHoleyObjectElementsAccessor
+// - FastDoubleElementsAccessor
+// - FastPackedDoubleElementsAccessor
+// - FastHoleyDoubleElementsAccessor
+// - TypedElementsAccessor: template, with instantiations:
+// - Uint8ElementsAccessor
+// - Int8ElementsAccessor
+// - Uint16ElementsAccessor
+// - Int16ElementsAccessor
+// - Uint32ElementsAccessor
+// - Int32ElementsAccessor
+// - Float32ElementsAccessor
+// - Float64ElementsAccessor
+// - Uint8ClampedElementsAccessor
+// - BigUint64ElementsAccessor
+// - BigInt64ElementsAccessor
+// - DictionaryElementsAccessor
+// - SloppyArgumentsElementsAccessor
+// - FastSloppyArgumentsElementsAccessor
+// - SlowSloppyArgumentsElementsAccessor
+// - StringWrapperElementsAccessor
+// - FastStringWrapperElementsAccessor
+// - SlowStringWrapperElementsAccessor
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+static const int kPackedSizeNotKnown = -1;
+
+enum Where { AT_START, AT_END };
+
+// First argument in list is the accessor class, the second argument is the
+// accessor ElementsKind, and the third is the backing store class. Use the
+// fast element handler for smi-only arrays. The implementation is currently
+// identical. Note that the order must match that of the ElementsKind enum for
+// the |accessor_array[]| below to work.
+#define ELEMENTS_LIST(V) \
+ V(FastPackedSmiElementsAccessor, PACKED_SMI_ELEMENTS, FixedArray) \
+ V(FastHoleySmiElementsAccessor, HOLEY_SMI_ELEMENTS, FixedArray) \
+ V(FastPackedObjectElementsAccessor, PACKED_ELEMENTS, FixedArray) \
+ V(FastHoleyObjectElementsAccessor, HOLEY_ELEMENTS, FixedArray) \
+ V(FastPackedDoubleElementsAccessor, PACKED_DOUBLE_ELEMENTS, \
+ FixedDoubleArray) \
+ V(FastHoleyDoubleElementsAccessor, HOLEY_DOUBLE_ELEMENTS, FixedDoubleArray) \
+ V(FastPackedSealedObjectElementsAccessor, PACKED_SEALED_ELEMENTS, \
+ FixedArray) \
+ V(FastHoleySealedObjectElementsAccessor, HOLEY_SEALED_ELEMENTS, FixedArray) \
+ V(FastPackedFrozenObjectElementsAccessor, PACKED_FROZEN_ELEMENTS, \
+ FixedArray) \
+ V(FastHoleyFrozenObjectElementsAccessor, HOLEY_FROZEN_ELEMENTS, FixedArray) \
+ V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, NumberDictionary) \
+ V(FastSloppyArgumentsElementsAccessor, FAST_SLOPPY_ARGUMENTS_ELEMENTS, \
+ FixedArray) \
+ V(SlowSloppyArgumentsElementsAccessor, SLOW_SLOPPY_ARGUMENTS_ELEMENTS, \
+ FixedArray) \
+ V(FastStringWrapperElementsAccessor, FAST_STRING_WRAPPER_ELEMENTS, \
+ FixedArray) \
+ V(SlowStringWrapperElementsAccessor, SLOW_STRING_WRAPPER_ELEMENTS, \
+ FixedArray) \
+ V(Uint8ElementsAccessor, UINT8_ELEMENTS, ByteArray) \
+ V(Int8ElementsAccessor, INT8_ELEMENTS, ByteArray) \
+ V(Uint16ElementsAccessor, UINT16_ELEMENTS, ByteArray) \
+ V(Int16ElementsAccessor, INT16_ELEMENTS, ByteArray) \
+ V(Uint32ElementsAccessor, UINT32_ELEMENTS, ByteArray) \
+ V(Int32ElementsAccessor, INT32_ELEMENTS, ByteArray) \
+ V(Float32ElementsAccessor, FLOAT32_ELEMENTS, ByteArray) \
+ V(Float64ElementsAccessor, FLOAT64_ELEMENTS, ByteArray) \
+ V(Uint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, ByteArray) \
+ V(BigUint64ElementsAccessor, BIGUINT64_ELEMENTS, ByteArray) \
+ V(BigInt64ElementsAccessor, BIGINT64_ELEMENTS, ByteArray)
+
+template <ElementsKind Kind>
+class ElementsKindTraits {
+ public:
+ using BackingStore = FixedArrayBase;
+};
+
+#define ELEMENTS_TRAITS(Class, KindParam, Store) \
+ template <> \
+ class ElementsKindTraits<KindParam> { \
+ public: /* NOLINT */ \
+ static constexpr ElementsKind Kind = KindParam; \
+ using BackingStore = Store; \
+ }; \
+ constexpr ElementsKind ElementsKindTraits<KindParam>::Kind;
+ELEMENTS_LIST(ELEMENTS_TRAITS)
+#undef ELEMENTS_TRAITS
+
+V8_WARN_UNUSED_RESULT
+MaybeHandle<Object> ThrowArrayLengthRangeError(Isolate* isolate) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidArrayLength),
+ Object);
+}
+
+WriteBarrierMode GetWriteBarrierMode(ElementsKind kind) {
+ if (IsSmiElementsKind(kind)) return SKIP_WRITE_BARRIER;
+ if (IsDoubleElementsKind(kind)) return SKIP_WRITE_BARRIER;
+ return UPDATE_WRITE_BARRIER;
+}
+
+void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
+ ElementsKind from_kind, uint32_t from_start,
+ FixedArrayBase to_base, ElementsKind to_kind,
+ uint32_t to_start, int raw_copy_size) {
+ ReadOnlyRoots roots(isolate);
+ DCHECK(to_base.map() != roots.fixed_cow_array_map());
+ DisallowHeapAllocation no_allocation;
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size =
+ Min(from_base.length() - from_start, to_base.length() - to_start);
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ int start = to_start + copy_size;
+ int length = to_base.length() - start;
+ if (length > 0) {
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
+ roots.the_hole_value(), length);
+ }
+ }
+ }
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
+ if (copy_size == 0) return;
+ FixedArray from = FixedArray::cast(from_base);
+ FixedArray to = FixedArray::cast(to_base);
+ DCHECK(IsSmiOrObjectElementsKind(from_kind));
+ DCHECK(IsSmiOrObjectElementsKind(to_kind));
+
+ WriteBarrierMode write_barrier_mode =
+ (IsObjectElementsKind(from_kind) && IsObjectElementsKind(to_kind))
+ ? UPDATE_WRITE_BARRIER
+ : SKIP_WRITE_BARRIER;
+ to.CopyElements(isolate, to_start, from, from_start, copy_size,
+ write_barrier_mode);
+}
+
+static void CopyDictionaryToObjectElements(
+ Isolate* isolate, FixedArrayBase from_base, uint32_t from_start,
+ FixedArrayBase to_base, ElementsKind to_kind, uint32_t to_start,
+ int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
+ NumberDictionary from = NumberDictionary::cast(from_base);
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from.max_number_key() + 1 - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ int start = to_start + copy_size;
+ int length = to_base.length() - start;
+ if (length > 0) {
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
+ ReadOnlyRoots(isolate).the_hole_value(), length);
+ }
+ }
+ }
+ DCHECK(to_base != from_base);
+ DCHECK(IsSmiOrObjectElementsKind(to_kind));
+ if (copy_size == 0) return;
+ FixedArray to = FixedArray::cast(to_base);
+ uint32_t to_length = to.length();
+ if (to_start + copy_size > to_length) {
+ copy_size = to_length - to_start;
+ }
+ WriteBarrierMode write_barrier_mode = GetWriteBarrierMode(to_kind);
+ for (int i = 0; i < copy_size; i++) {
+ int entry = from.FindEntry(isolate, i + from_start);
+ if (entry != NumberDictionary::kNotFound) {
+ Object value = from.ValueAt(entry);
+ DCHECK(!value.IsTheHole(isolate));
+ to.set(i + to_start, value, write_barrier_mode);
+ } else {
+ to.set_the_hole(isolate, i + to_start);
+ }
+ }
+}
+
+// NOTE: this method violates the handlified function signature convention:
+// raw pointer parameters in the function that allocates.
+// See ElementsAccessorBase::CopyElements() for details.
+static void CopyDoubleToObjectElements(Isolate* isolate,
+ FixedArrayBase from_base,
+ uint32_t from_start,
+ FixedArrayBase to_base,
+ uint32_t to_start, int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ DisallowHeapAllocation no_allocation;
+ DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size =
+ Min(from_base.length() - from_start, to_base.length() - to_start);
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ // Also initialize the area that will be copied over since HeapNumber
+ // allocation below can cause an incremental marking step, requiring all
+ // existing heap objects to be propertly initialized.
+ int start = to_start;
+ int length = to_base.length() - start;
+ if (length > 0) {
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
+ ReadOnlyRoots(isolate).the_hole_value(), length);
+ }
+ }
+ }
+
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
+ if (copy_size == 0) return;
+
+ // From here on, the code below could actually allocate. Therefore the raw
+ // values are wrapped into handles.
+ Handle<FixedDoubleArray> from(FixedDoubleArray::cast(from_base), isolate);
+ Handle<FixedArray> to(FixedArray::cast(to_base), isolate);
+
+ // Use an outer loop to not waste too much time on creating HandleScopes.
+ // On the other hand we might overflow a single handle scope depending on
+ // the copy_size.
+ int offset = 0;
+ while (offset < copy_size) {
+ HandleScope scope(isolate);
+ offset += 100;
+ for (int i = offset - 100; i < offset && i < copy_size; ++i) {
+ Handle<Object> value =
+ FixedDoubleArray::get(*from, i + from_start, isolate);
+ to->set(i + to_start, *value, UPDATE_WRITE_BARRIER);
+ }
+ }
+}
+
+static void CopyDoubleToDoubleElements(FixedArrayBase from_base,
+ uint32_t from_start,
+ FixedArrayBase to_base,
+ uint32_t to_start, int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size =
+ Min(from_base.length() - from_start, to_base.length() - to_start);
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
+ }
+ }
+ }
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
+ if (copy_size == 0) return;
+ FixedDoubleArray from = FixedDoubleArray::cast(from_base);
+ FixedDoubleArray to = FixedDoubleArray::cast(to_base);
+ Address to_address = to.address() + FixedDoubleArray::kHeaderSize;
+ Address from_address = from.address() + FixedDoubleArray::kHeaderSize;
+ to_address += kDoubleSize * to_start;
+ from_address += kDoubleSize * from_start;
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): we use CopyTagged() in order to avoid unaligned
+ // access to double values in the arrays. This will no longed be necessary
+ // once the allocations alignment issue is fixed.
+ int words_per_double = (kDoubleSize / kTaggedSize);
+ CopyTagged(to_address, from_address,
+ static_cast<size_t>(words_per_double * copy_size));
+#else
+ int words_per_double = (kDoubleSize / kSystemPointerSize);
+ CopyWords(to_address, from_address,
+ static_cast<size_t>(words_per_double * copy_size));
+#endif
+}
+
+static void CopySmiToDoubleElements(FixedArrayBase from_base,
+ uint32_t from_start, FixedArrayBase to_base,
+ uint32_t to_start, int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from_base.length() - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
+ }
+ }
+ }
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
+ if (copy_size == 0) return;
+ FixedArray from = FixedArray::cast(from_base);
+ FixedDoubleArray to = FixedDoubleArray::cast(to_base);
+ Object the_hole = from.GetReadOnlyRoots().the_hole_value();
+ for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
+ from_start < from_end; from_start++, to_start++) {
+ Object hole_or_smi = from.get(from_start);
+ if (hole_or_smi == the_hole) {
+ to.set_the_hole(to_start);
+ } else {
+ to.set(to_start, Smi::ToInt(hole_or_smi));
+ }
+ }
+}
+
+static void CopyPackedSmiToDoubleElements(FixedArrayBase from_base,
+ uint32_t from_start,
+ FixedArrayBase to_base,
+ uint32_t to_start, int packed_size,
+ int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
+ int copy_size = raw_copy_size;
+ uint32_t to_end;
+ if (raw_copy_size < 0) {
+ DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = packed_size - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ to_end = to_base.length();
+ for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
+ }
+ } else {
+ to_end = to_start + static_cast<uint32_t>(copy_size);
+ }
+ } else {
+ to_end = to_start + static_cast<uint32_t>(copy_size);
+ }
+ DCHECK(static_cast<int>(to_end) <= to_base.length());
+ DCHECK(packed_size >= 0 && packed_size <= copy_size);
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
+ if (copy_size == 0) return;
+ FixedArray from = FixedArray::cast(from_base);
+ FixedDoubleArray to = FixedDoubleArray::cast(to_base);
+ for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
+ from_start < from_end; from_start++, to_start++) {
+ Object smi = from.get(from_start);
+ DCHECK(!smi.IsTheHole());
+ to.set(to_start, Smi::ToInt(smi));
+ }
+}
+
+static void CopyObjectToDoubleElements(FixedArrayBase from_base,
+ uint32_t from_start,
+ FixedArrayBase to_base,
+ uint32_t to_start, int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from_base.length() - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
+ }
+ }
+ }
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
+ if (copy_size == 0) return;
+ FixedArray from = FixedArray::cast(from_base);
+ FixedDoubleArray to = FixedDoubleArray::cast(to_base);
+ Object the_hole = from.GetReadOnlyRoots().the_hole_value();
+ for (uint32_t from_end = from_start + copy_size; from_start < from_end;
+ from_start++, to_start++) {
+ Object hole_or_object = from.get(from_start);
+ if (hole_or_object == the_hole) {
+ to.set_the_hole(to_start);
+ } else {
+ to.set(to_start, hole_or_object.Number());
+ }
+ }
+}
+
+static void CopyDictionaryToDoubleElements(
+ Isolate* isolate, FixedArrayBase from_base, uint32_t from_start,
+ FixedArrayBase to_base, uint32_t to_start, int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
+ NumberDictionary from = NumberDictionary::cast(from_base);
+ int copy_size = raw_copy_size;
+ if (copy_size < 0) {
+ DCHECK(copy_size == ElementsAccessor::kCopyToEnd ||
+ copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from.max_number_key() + 1 - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
+ }
+ }
+ }
+ if (copy_size == 0) return;
+ FixedDoubleArray to = FixedDoubleArray::cast(to_base);
+ uint32_t to_length = to.length();
+ if (to_start + copy_size > to_length) {
+ copy_size = to_length - to_start;
+ }
+ for (int i = 0; i < copy_size; i++) {
+ int entry = from.FindEntry(isolate, i + from_start);
+ if (entry != NumberDictionary::kNotFound) {
+ to.set(i + to_start, from.ValueAt(entry).Number());
+ } else {
+ to.set_the_hole(i + to_start);
+ }
+ }
+}
+
+static void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
+ uint32_t sort_size) {
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ AtomicSlot start(indices->GetFirstElementAddress());
+ AtomicSlot end(start + sort_size);
+ std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) {
+#ifdef V8_COMPRESS_POINTERS
+ DEFINE_ROOT_VALUE(isolate);
+ Object a(DecompressTaggedAny(ROOT_VALUE, elementA));
+ Object b(DecompressTaggedAny(ROOT_VALUE, elementB));
+#else
+ Object a(elementA);
+ Object b(elementB);
+#endif
+ if (a.IsSmi() || !a.IsUndefined(isolate)) {
+ if (!b.IsSmi() && b.IsUndefined(isolate)) {
+ return true;
+ }
+ return a.Number() < b.Number();
+ }
+ return !b.IsSmi() && b.IsUndefined(isolate);
+ });
+ isolate->heap()->WriteBarrierForRange(*indices, ObjectSlot(start),
+ ObjectSlot(end));
+}
+
+static Maybe<bool> IncludesValueSlowPath(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ bool search_for_hole = value->IsUndefined(isolate);
+ for (uint32_t k = start_from; k < length; ++k) {
+ LookupIterator it(isolate, receiver, k);
+ if (!it.IsFound()) {
+ if (search_for_hole) return Just(true);
+ continue;
+ }
+ Handle<Object> element_k;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k,
+ Object::GetProperty(&it), Nothing<bool>());
+
+ if (value->SameValueZero(*element_k)) return Just(true);
+ }
+
+ return Just(false);
+}
+
+static Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from,
+ uint32_t length) {
+ for (uint32_t k = start_from; k < length; ++k) {
+ LookupIterator it(isolate, receiver, k);
+ if (!it.IsFound()) {
+ continue;
+ }
+ Handle<Object> element_k;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_k, Object::GetProperty(&it), Nothing<int64_t>());
+
+ if (value->StrictEquals(*element_k)) return Just<int64_t>(k);
+ }
+
+ return Just<int64_t>(-1);
+}
+
+// The InternalElementsAccessor is a helper class to expose otherwise protected
+// methods to its subclasses. Namely, we don't want to publicly expose methods
+// that take an entry (instead of an index) as an argument.
+class InternalElementsAccessor : public ElementsAccessor {
+ public:
+ uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index) override = 0;
+
+ PropertyDetails GetDetails(JSObject holder, uint32_t entry) override = 0;
+};
+
+// Base class for element handler implementations. Contains the
+// the common logic for objects with different ElementsKinds.
+// Subclasses must specialize method for which the element
+// implementation differs from the base class implementation.
+//
+// This class is intended to be used in the following way:
+//
+// class SomeElementsAccessor :
+// public ElementsAccessorBase<SomeElementsAccessor,
+// BackingStoreClass> {
+// ...
+// }
+//
+// This is an example of the Curiously Recurring Template Pattern (see
+// http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern). We use
+// CRTP to guarantee aggressive compile time optimizations (i.e. inlining and
+// specialization of SomeElementsAccessor methods).
+template <typename Subclass, typename ElementsTraitsParam>
+class ElementsAccessorBase : public InternalElementsAccessor {
+ public:
+ ElementsAccessorBase() = default;
+
+ using ElementsTraits = ElementsTraitsParam;
+ using BackingStore = typename ElementsTraitsParam::BackingStore;
+
+ static ElementsKind kind() { return ElementsTraits::Kind; }
+
+ static void ValidateContents(JSObject holder, int length) {}
+
+ static void ValidateImpl(JSObject holder) {
+ FixedArrayBase fixed_array_base = holder.elements();
+ if (!fixed_array_base.IsHeapObject()) return;
+ // Arrays that have been shifted in place can't be verified.
+ if (fixed_array_base.IsFiller()) return;
+ int length = 0;
+ if (holder.IsJSArray()) {
+ Object length_obj = JSArray::cast(holder).length();
+ if (length_obj.IsSmi()) {
+ length = Smi::ToInt(length_obj);
+ }
+ } else if (holder.IsJSTypedArray()) {
+ // TODO(bmeurer, v8:4153): Change this to size_t later.
+ length = static_cast<int>(JSTypedArray::cast(holder).length());
+ } else {
+ length = fixed_array_base.length();
+ }
+ Subclass::ValidateContents(holder, length);
+ }
+
+ void Validate(JSObject holder) final {
+ DisallowHeapAllocation no_gc;
+ Subclass::ValidateImpl(holder);
+ }
+
+ static bool IsPackedImpl(JSObject holder, FixedArrayBase backing_store,
+ uint32_t start, uint32_t end) {
+ DisallowHeapAllocation no_gc;
+ if (IsFastPackedElementsKind(kind())) return true;
+ Isolate* isolate = holder.GetIsolate();
+ for (uint32_t i = start; i < end; i++) {
+ if (!Subclass::HasElementImpl(isolate, holder, i, backing_store,
+ ALL_PROPERTIES)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ static void TryTransitionResultArrayToPacked(Handle<JSArray> array) {
+ if (!IsHoleyElementsKind(kind())) return;
+ Handle<FixedArrayBase> backing_store(array->elements(),
+ array->GetIsolate());
+ int length = Smi::ToInt(array->length());
+ if (!Subclass::IsPackedImpl(*array, *backing_store, 0, length)) return;
+
+ ElementsKind packed_kind = GetPackedElementsKind(kind());
+ Handle<Map> new_map =
+ JSObject::GetElementsTransitionMap(array, packed_kind);
+ JSObject::MigrateToMap(array, new_map);
+ if (FLAG_trace_elements_transitions) {
+ JSObject::PrintElementsTransition(stdout, array, kind(), backing_store,
+ packed_kind, backing_store);
+ }
+ }
+
+ bool HasElement(JSObject holder, uint32_t index, FixedArrayBase backing_store,
+ PropertyFilter filter) final {
+ return Subclass::HasElementImpl(holder.GetIsolate(), holder, index,
+ backing_store, filter);
+ }
+
+ static bool HasElementImpl(Isolate* isolate, JSObject holder, uint32_t index,
+ FixedArrayBase backing_store,
+ PropertyFilter filter = ALL_PROPERTIES) {
+ return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index,
+ filter) != kMaxUInt32;
+ }
+
+ bool HasEntry(JSObject holder, uint32_t entry) final {
+ return Subclass::HasEntryImpl(holder.GetIsolate(), holder.elements(),
+ entry);
+ }
+
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store,
+ uint32_t entry) {
+ UNIMPLEMENTED();
+ }
+
+ bool HasAccessors(JSObject holder) final {
+ return Subclass::HasAccessorsImpl(holder, holder.elements());
+ }
+
+ static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
+ return false;
+ }
+
+ Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) final {
+ return Subclass::GetInternalImpl(holder, entry);
+ }
+
+ static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
+ uint32_t entry) {
+ return Subclass::GetImpl(holder->GetIsolate(), holder->elements(), entry);
+ }
+
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
+ uint32_t entry) {
+ uint32_t index = GetIndexForEntryImpl(backing_store, entry);
+ return handle(BackingStore::cast(backing_store).get(index), isolate);
+ }
+
+ void Set(Handle<JSObject> holder, uint32_t entry, Object value) final {
+ Subclass::SetImpl(holder, entry, value);
+ }
+
+ void Reconfigure(Handle<JSObject> object, Handle<FixedArrayBase> store,
+ uint32_t entry, Handle<Object> value,
+ PropertyAttributes attributes) final {
+ Subclass::ReconfigureImpl(object, store, entry, value, attributes);
+ }
+
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ UNREACHABLE();
+ }
+
+ void Add(Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, uint32_t new_capacity) final {
+ Subclass::AddImpl(object, index, value, attributes, new_capacity);
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ UNREACHABLE();
+ }
+
+ uint32_t Push(Handle<JSArray> receiver, Arguments* args,
+ uint32_t push_size) final {
+ return Subclass::PushImpl(receiver, args, push_size);
+ }
+
+ static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+ uint32_t push_sized) {
+ UNREACHABLE();
+ }
+
+ uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
+ uint32_t unshift_size) final {
+ return Subclass::UnshiftImpl(receiver, args, unshift_size);
+ }
+
+ static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args,
+ uint32_t unshift_size) {
+ UNREACHABLE();
+ }
+
+ Handle<Object> Pop(Handle<JSArray> receiver) final {
+ return Subclass::PopImpl(receiver);
+ }
+
+ static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+
+ Handle<Object> Shift(Handle<JSArray> receiver) final {
+ return Subclass::ShiftImpl(receiver);
+ }
+
+ static Handle<Object> ShiftImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+
+ void SetLength(Handle<JSArray> array, uint32_t length) final {
+ Subclass::SetLengthImpl(array->GetIsolate(), array, length,
+ handle(array->elements(), array->GetIsolate()));
+ }
+
+ static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
+ DCHECK(!array->SetLengthWouldNormalize(length));
+ DCHECK(IsFastElementsKind(array->GetElementsKind()));
+ uint32_t old_length = 0;
+ CHECK(array->length().ToArrayIndex(&old_length));
+
+ if (old_length < length) {
+ ElementsKind kind = array->GetElementsKind();
+ if (!IsHoleyElementsKind(kind)) {
+ kind = GetHoleyElementsKind(kind);
+ JSObject::TransitionElementsKind(array, kind);
+ }
+ }
+
+ // Check whether the backing store should be shrunk.
+ uint32_t capacity = backing_store->length();
+ old_length = Min(old_length, capacity);
+ if (length == 0) {
+ array->initialize_elements();
+ } else if (length <= capacity) {
+ if (IsSmiOrObjectElementsKind(kind())) {
+ JSObject::EnsureWritableFastElements(array);
+ if (array->elements() != *backing_store) {
+ backing_store = handle(array->elements(), isolate);
+ }
+ }
+ if (2 * length + JSObject::kMinAddedElementsCapacity <= capacity) {
+ // If more than half the elements won't be used, trim the array.
+ // Do not trim from short arrays to prevent frequent trimming on
+ // repeated pop operations.
+ // Leave some space to allow for subsequent push operations.
+ int elements_to_trim = length + 1 == old_length
+ ? (capacity - length) / 2
+ : capacity - length;
+ isolate->heap()->RightTrimFixedArray(*backing_store, elements_to_trim);
+ // Fill the non-trimmed elements with holes.
+ BackingStore::cast(*backing_store)
+ .FillWithHoles(length,
+ std::min(old_length, capacity - elements_to_trim));
+ } else {
+ // Otherwise, fill the unused tail with holes.
+ BackingStore::cast(*backing_store).FillWithHoles(length, old_length);
+ }
+ } else {
+ // Check whether the backing store should be expanded.
+ capacity = Max(length, JSObject::NewElementsCapacity(capacity));
+ Subclass::GrowCapacityAndConvertImpl(array, capacity);
+ }
+
+ array->set_length(Smi::FromInt(length));
+ JSObject::ValidateElements(*array);
+ }
+
+ uint32_t NumberOfElements(JSObject receiver) final {
+ return Subclass::NumberOfElementsImpl(receiver, receiver.elements());
+ }
+
+ static uint32_t NumberOfElementsImpl(JSObject receiver,
+ FixedArrayBase backing_store) {
+ UNREACHABLE();
+ }
+
+ static uint32_t GetMaxIndex(JSObject receiver, FixedArrayBase elements) {
+ if (receiver.IsJSArray()) {
+ DCHECK(JSArray::cast(receiver).length().IsSmi());
+ return static_cast<uint32_t>(
+ Smi::ToInt(JSArray::cast(receiver).length()));
+ }
+ return Subclass::GetCapacityImpl(receiver, elements);
+ }
+
+ static uint32_t GetMaxNumberOfEntries(JSObject receiver,
+ FixedArrayBase elements) {
+ return Subclass::GetMaxIndex(receiver, elements);
+ }
+
+ static Handle<FixedArrayBase> ConvertElementsWithCapacity(
+ Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
+ ElementsKind from_kind, uint32_t capacity) {
+ return ConvertElementsWithCapacity(
+ object, old_elements, from_kind, capacity, 0, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole);
+ }
+
+ static Handle<FixedArrayBase> ConvertElementsWithCapacity(
+ Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
+ ElementsKind from_kind, uint32_t capacity, int copy_size) {
+ return ConvertElementsWithCapacity(object, old_elements, from_kind,
+ capacity, 0, 0, copy_size);
+ }
+
+ static Handle<FixedArrayBase> ConvertElementsWithCapacity(
+ Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
+ ElementsKind from_kind, uint32_t capacity, uint32_t src_index,
+ uint32_t dst_index, int copy_size) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<FixedArrayBase> new_elements;
+ if (IsDoubleElementsKind(kind())) {
+ new_elements = isolate->factory()->NewFixedDoubleArray(capacity);
+ } else {
+ new_elements = isolate->factory()->NewUninitializedFixedArray(capacity);
+ }
+
+ int packed_size = kPackedSizeNotKnown;
+ if (IsFastPackedElementsKind(from_kind) && object->IsJSArray()) {
+ packed_size = Smi::ToInt(JSArray::cast(*object).length());
+ }
+
+ Subclass::CopyElementsImpl(isolate, *old_elements, src_index, *new_elements,
+ from_kind, dst_index, packed_size, copy_size);
+
+ return new_elements;
+ }
+
+ static void TransitionElementsKindImpl(Handle<JSObject> object,
+ Handle<Map> to_map) {
+ Handle<Map> from_map = handle(object->map(), object->GetIsolate());
+ ElementsKind from_kind = from_map->elements_kind();
+ ElementsKind to_kind = to_map->elements_kind();
+ if (IsHoleyElementsKind(from_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+ if (from_kind != to_kind) {
+ // This method should never be called for any other case.
+ DCHECK(IsFastElementsKind(from_kind));
+ DCHECK(IsFastElementsKind(to_kind));
+ DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
+
+ Handle<FixedArrayBase> from_elements(object->elements(),
+ object->GetIsolate());
+ if (object->elements() ==
+ object->GetReadOnlyRoots().empty_fixed_array() ||
+ IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
+ // No change is needed to the elements() buffer, the transition
+ // only requires a map change.
+ JSObject::MigrateToMap(object, to_map);
+ } else {
+ DCHECK(
+ (IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
+ (IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
+ uint32_t capacity = static_cast<uint32_t>(object->elements().length());
+ Handle<FixedArrayBase> elements = ConvertElementsWithCapacity(
+ object, from_elements, from_kind, capacity);
+ JSObject::SetMapAndElements(object, to_map, elements);
+ }
+ if (FLAG_trace_elements_transitions) {
+ JSObject::PrintElementsTransition(
+ stdout, object, from_kind, from_elements, to_kind,
+ handle(object->elements(), object->GetIsolate()));
+ }
+ }
+ }
+
+ static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
+ ElementsKind from_kind = object->GetElementsKind();
+ if (IsSmiOrObjectElementsKind(from_kind)) {
+ // Array optimizations rely on the prototype lookups of Array objects
+ // always returning undefined. If there is a store to the initial
+ // prototype object, make sure all of these optimizations are invalidated.
+ object->GetIsolate()->UpdateNoElementsProtectorOnSetLength(object);
+ }
+ Handle<FixedArrayBase> old_elements(object->elements(),
+ object->GetIsolate());
+ // This method should only be called if there's a reason to update the
+ // elements.
+ DCHECK(IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(kind()) ||
+ IsDictionaryElementsKind(from_kind) ||
+ static_cast<uint32_t>(old_elements->length()) < capacity);
+ Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
+ kind(), capacity);
+ }
+
+ static void BasicGrowCapacityAndConvertImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
+ ElementsKind from_kind, ElementsKind to_kind, uint32_t capacity) {
+ Handle<FixedArrayBase> elements =
+ ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
+
+ if (IsHoleyElementsKind(from_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+ Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, to_kind);
+ JSObject::SetMapAndElements(object, new_map, elements);
+
+ // Transition through the allocation site as well if present.
+ JSObject::UpdateAllocationSite(object, to_kind);
+
+ if (FLAG_trace_elements_transitions) {
+ JSObject::PrintElementsTransition(stdout, object, from_kind, old_elements,
+ to_kind, elements);
+ }
+ }
+
+ void TransitionElementsKind(Handle<JSObject> object, Handle<Map> map) final {
+ Subclass::TransitionElementsKindImpl(object, map);
+ }
+
+ void GrowCapacityAndConvert(Handle<JSObject> object,
+ uint32_t capacity) final {
+ Subclass::GrowCapacityAndConvertImpl(object, capacity);
+ }
+
+ bool GrowCapacity(Handle<JSObject> object, uint32_t index) final {
+ // This function is intended to be called from optimized code. We don't
+ // want to trigger lazy deopts there, so refuse to handle cases that would.
+ if (object->map().is_prototype_map() ||
+ object->WouldConvertToSlowElements(index)) {
+ return false;
+ }
+ Handle<FixedArrayBase> old_elements(object->elements(),
+ object->GetIsolate());
+ uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1);
+ DCHECK(static_cast<uint32_t>(old_elements->length()) < new_capacity);
+ Handle<FixedArrayBase> elements =
+ ConvertElementsWithCapacity(object, old_elements, kind(), new_capacity);
+
+ DCHECK_EQ(object->GetElementsKind(), kind());
+ // Transition through the allocation site as well if present.
+ if (JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kCheckOnly>(
+ object, kind())) {
+ return false;
+ }
+
+ object->set_elements(*elements);
+ return true;
+ }
+
+ void Delete(Handle<JSObject> obj, uint32_t entry) final {
+ Subclass::DeleteImpl(obj, entry);
+ }
+
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
+ UNREACHABLE();
+ }
+
+ void CopyElements(JSObject from_holder, uint32_t from_start,
+ ElementsKind from_kind, Handle<FixedArrayBase> to,
+ uint32_t to_start, int copy_size) final {
+ int packed_size = kPackedSizeNotKnown;
+ bool is_packed =
+ IsFastPackedElementsKind(from_kind) && from_holder.IsJSArray();
+ if (is_packed) {
+ packed_size = Smi::ToInt(JSArray::cast(from_holder).length());
+ if (copy_size >= 0 && packed_size > copy_size) {
+ packed_size = copy_size;
+ }
+ }
+ FixedArrayBase from = from_holder.elements();
+ // NOTE: the Subclass::CopyElementsImpl() methods
+ // violate the handlified function signature convention:
+ // raw pointer parameters in the function that allocates. This is done
+ // intentionally to avoid ArrayConcat() builtin performance degradation.
+ //
+ // Details: The idea is that allocations actually happen only in case of
+ // copying from object with fast double elements to object with object
+ // elements. In all the other cases there are no allocations performed and
+ // handle creation causes noticeable performance degradation of the builtin.
+ Subclass::CopyElementsImpl(from_holder.GetIsolate(), from, from_start, *to,
+ from_kind, to_start, packed_size, copy_size);
+ }
+
+ void CopyElements(Isolate* isolate, Handle<FixedArrayBase> source,
+ ElementsKind source_kind,
+ Handle<FixedArrayBase> destination, int size) override {
+ Subclass::CopyElementsImpl(isolate, *source, 0, *destination, source_kind,
+ 0, kPackedSizeNotKnown, size);
+ }
+
+ void CopyTypedArrayElementsSlice(JSTypedArray source,
+ JSTypedArray destination, size_t start,
+ size_t end) override {
+ Subclass::CopyTypedArrayElementsSliceImpl(source, destination, start, end);
+ }
+
+ static void CopyTypedArrayElementsSliceImpl(JSTypedArray source,
+ JSTypedArray destination,
+ size_t start, size_t end) {
+ UNREACHABLE();
+ }
+
+ Object CopyElements(Handle<Object> source, Handle<JSObject> destination,
+ size_t length, uint32_t offset) final {
+ return Subclass::CopyElementsHandleImpl(source, destination, length,
+ offset);
+ }
+
+ static Object CopyElementsHandleImpl(Handle<Object> source,
+ Handle<JSObject> destination,
+ size_t length, uint32_t offset) {
+ UNREACHABLE();
+ }
+
+ Handle<NumberDictionary> Normalize(Handle<JSObject> object) final {
+ return Subclass::NormalizeImpl(
+ object, handle(object->elements(), object->GetIsolate()));
+ }
+
+ static Handle<NumberDictionary> NormalizeImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> elements) {
+ UNREACHABLE();
+ }
+
+ Maybe<bool> CollectValuesOrEntries(Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries,
+ bool get_entries, int* nof_items,
+ PropertyFilter filter) override {
+ return Subclass::CollectValuesOrEntriesImpl(
+ isolate, object, values_or_entries, get_entries, nof_items, filter);
+ }
+
+ static Maybe<bool> CollectValuesOrEntriesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+ PropertyFilter filter) {
+ DCHECK_EQ(*nof_items, 0);
+ KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
+ ALL_PROPERTIES);
+ Subclass::CollectElementIndicesImpl(
+ object, handle(object->elements(), isolate), &accumulator);
+ Handle<FixedArray> keys = accumulator.GetKeys();
+
+ int count = 0;
+ int i = 0;
+ ElementsKind original_elements_kind = object->GetElementsKind();
+
+ for (; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ uint32_t index;
+ if (!key->ToUint32(&index)) continue;
+
+ DCHECK_EQ(object->GetElementsKind(), original_elements_kind);
+ uint32_t entry = Subclass::GetEntryForIndexImpl(
+ isolate, *object, object->elements(), index, filter);
+ if (entry == kMaxUInt32) continue;
+ PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
+
+ Handle<Object> value;
+ if (details.kind() == kData) {
+ value = Subclass::GetInternalImpl(object, entry);
+ } else {
+ // This might modify the elements and/or change the elements kind.
+ LookupIterator it(isolate, object, index, LookupIterator::OWN);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, Object::GetProperty(&it), Nothing<bool>());
+ }
+ if (get_entries) value = MakeEntryPair(isolate, index, value);
+ values_or_entries->set(count++, *value);
+ if (object->GetElementsKind() != original_elements_kind) break;
+ }
+
+ // Slow path caused by changes in elements kind during iteration.
+ for (; i < keys->length(); i++) {
+ Handle<Object> key(keys->get(i), isolate);
+ uint32_t index;
+ if (!key->ToUint32(&index)) continue;
+
+ if (filter & ONLY_ENUMERABLE) {
+ InternalElementsAccessor* accessor =
+ reinterpret_cast<InternalElementsAccessor*>(
+ object->GetElementsAccessor());
+ uint32_t entry = accessor->GetEntryForIndex(isolate, *object,
+ object->elements(), index);
+ if (entry == kMaxUInt32) continue;
+ PropertyDetails details = accessor->GetDetails(*object, entry);
+ if (!details.IsEnumerable()) continue;
+ }
+
+ Handle<Object> value;
+ LookupIterator it(isolate, object, index, LookupIterator::OWN);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::GetProperty(&it),
+ Nothing<bool>());
+
+ if (get_entries) value = MakeEntryPair(isolate, index, value);
+ values_or_entries->set(count++, *value);
+ }
+
+ *nof_items = count;
+ return Just(true);
+ }
+
+ void CollectElementIndices(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys) final {
+ if (keys->filter() & ONLY_ALL_CAN_READ) return;
+ Subclass::CollectElementIndicesImpl(object, backing_store, keys);
+ }
+
+ static void CollectElementIndicesImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys) {
+ DCHECK_NE(DICTIONARY_ELEMENTS, kind());
+ // Non-dictionary elements can't have all-can-read accessors.
+ uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
+ PropertyFilter filter = keys->filter();
+ Isolate* isolate = keys->isolate();
+ Factory* factory = isolate->factory();
+ for (uint32_t i = 0; i < length; i++) {
+ if (Subclass::HasElementImpl(isolate, *object, i, *backing_store,
+ filter)) {
+ keys->AddKey(factory->NewNumberFromUint(i));
+ }
+ }
+ }
+
+ static Handle<FixedArray> DirectCollectElementIndicesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
+ PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
+ uint32_t insertion_index = 0) {
+ uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
+ uint32_t const kMaxStringTableEntries =
+ isolate->heap()->MaxNumberToStringCacheSize();
+ for (uint32_t i = 0; i < length; i++) {
+ if (Subclass::HasElementImpl(isolate, *object, i, *backing_store,
+ filter)) {
+ if (convert == GetKeysConversion::kConvertToString) {
+ bool use_cache = i < kMaxStringTableEntries;
+ Handle<String> index_string =
+ isolate->factory()->Uint32ToString(i, use_cache);
+ list->set(insertion_index, *index_string);
+ } else {
+ list->set(insertion_index, Smi::FromInt(i));
+ }
+ insertion_index++;
+ }
+ }
+ *nof_indices = insertion_index;
+ return list;
+ }
+
+ MaybeHandle<FixedArray> PrependElementIndices(
+ Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+ Handle<FixedArray> keys, GetKeysConversion convert,
+ PropertyFilter filter) final {
+ return Subclass::PrependElementIndicesImpl(object, backing_store, keys,
+ convert, filter);
+ }
+
+ static MaybeHandle<FixedArray> PrependElementIndicesImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+ Handle<FixedArray> keys, GetKeysConversion convert,
+ PropertyFilter filter) {
+ Isolate* isolate = object->GetIsolate();
+ uint32_t nof_property_keys = keys->length();
+ uint32_t initial_list_length =
+ Subclass::GetMaxNumberOfEntries(*object, *backing_store);
+
+ initial_list_length += nof_property_keys;
+ if (initial_list_length > FixedArray::kMaxLength ||
+ initial_list_length < nof_property_keys) {
+ return isolate->Throw<FixedArray>(isolate->factory()->NewRangeError(
+ MessageTemplate::kInvalidArrayLength));
+ }
+
+ // Collect the element indices into a new list.
+ MaybeHandle<FixedArray> raw_array =
+ isolate->factory()->TryNewFixedArray(initial_list_length);
+ Handle<FixedArray> combined_keys;
+
+ // If we have a holey backing store try to precisely estimate the backing
+ // store size as a last emergency measure if we cannot allocate the big
+ // array.
+ if (!raw_array.ToHandle(&combined_keys)) {
+ if (IsHoleyOrDictionaryElementsKind(kind())) {
+ // If we overestimate the result list size we might end up in the
+ // large-object space which doesn't free memory on shrinking the list.
+ // Hence we try to estimate the final size for holey backing stores more
+ // precisely here.
+ initial_list_length =
+ Subclass::NumberOfElementsImpl(*object, *backing_store);
+ initial_list_length += nof_property_keys;
+ }
+ combined_keys = isolate->factory()->NewFixedArray(initial_list_length);
+ }
+
+ uint32_t nof_indices = 0;
+ bool needs_sorting = IsDictionaryElementsKind(kind()) ||
+ IsSloppyArgumentsElementsKind(kind());
+ combined_keys = Subclass::DirectCollectElementIndicesImpl(
+ isolate, object, backing_store,
+ needs_sorting ? GetKeysConversion::kKeepNumbers : convert, filter,
+ combined_keys, &nof_indices);
+
+ if (needs_sorting) {
+ SortIndices(isolate, combined_keys, nof_indices);
+ // Indices from dictionary elements should only be converted after
+ // sorting.
+ if (convert == GetKeysConversion::kConvertToString) {
+ for (uint32_t i = 0; i < nof_indices; i++) {
+ Handle<Object> index_string = isolate->factory()->Uint32ToString(
+ combined_keys->get(i).Number());
+ combined_keys->set(i, *index_string);
+ }
+ }
+ }
+
+ // Copy over the passed-in property keys.
+ CopyObjectToObjectElements(isolate, *keys, PACKED_ELEMENTS, 0,
+ *combined_keys, PACKED_ELEMENTS, nof_indices,
+ nof_property_keys);
+
+ // For holey elements and arguments we might have to shrink the collected
+ // keys since the estimates might be off.
+ if (IsHoleyOrDictionaryElementsKind(kind()) ||
+ IsSloppyArgumentsElementsKind(kind())) {
+ // Shrink combined_keys to the final size.
+ int final_size = nof_indices + nof_property_keys;
+ DCHECK_LE(final_size, combined_keys->length());
+ return FixedArray::ShrinkOrEmpty(isolate, combined_keys, final_size);
+ }
+
+ return combined_keys;
+ }
+
+ void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) final {
+ Subclass::AddElementsToKeyAccumulatorImpl(receiver, accumulator, convert);
+ }
+
+ static uint32_t GetCapacityImpl(JSObject holder,
+ FixedArrayBase backing_store) {
+ return backing_store.length();
+ }
+
+ uint32_t GetCapacity(JSObject holder, FixedArrayBase backing_store) final {
+ return Subclass::GetCapacityImpl(holder, backing_store);
+ }
+
+ static Object FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) {
+ UNREACHABLE();
+ }
+
+ Object Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) override {
+ return Subclass::FillImpl(receiver, obj_value, start, end);
+ }
+
+ static Maybe<bool> IncludesValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ return IncludesValueSlowPath(isolate, receiver, value, start_from, length);
+ }
+
+ Maybe<bool> IncludesValue(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start_from,
+ uint32_t length) final {
+ return Subclass::IncludesValueImpl(isolate, receiver, value, start_from,
+ length);
+ }
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ return IndexOfValueSlowPath(isolate, receiver, value, start_from, length);
+ }
+
+ Maybe<int64_t> IndexOfValue(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start_from,
+ uint32_t length) final {
+ return Subclass::IndexOfValueImpl(isolate, receiver, value, start_from,
+ length);
+ }
+
+ static Maybe<int64_t> LastIndexOfValueImpl(Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from) {
+ UNREACHABLE();
+ }
+
+ Maybe<int64_t> LastIndexOfValue(Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from) final {
+ return Subclass::LastIndexOfValueImpl(receiver, value, start_from);
+ }
+
+ static void ReverseImpl(JSObject receiver) { UNREACHABLE(); }
+
+ void Reverse(JSObject receiver) final { Subclass::ReverseImpl(receiver); }
+
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase backing_store,
+ uint32_t entry) {
+ return entry;
+ }
+
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index, PropertyFilter filter) {
+ DCHECK(IsFastElementsKind(kind()) || IsFrozenOrSealedElementsKind(kind()));
+ uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
+ if (IsHoleyElementsKindForRead(kind())) {
+ return index < length && !BackingStore::cast(backing_store)
+ .is_the_hole(isolate, index)
+ ? index
+ : kMaxUInt32;
+ } else {
+ return index < length ? index : kMaxUInt32;
+ }
+ }
+
+ uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index) final {
+ return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index,
+ ALL_PROPERTIES);
+ }
+
+ static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
+ uint32_t entry) {
+ return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
+ }
+
+ static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
+ }
+
+ PropertyDetails GetDetails(JSObject holder, uint32_t entry) final {
+ return Subclass::GetDetailsImpl(holder, entry);
+ }
+
+ Handle<FixedArray> CreateListFromArrayLike(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t length) final {
+ return Subclass::CreateListFromArrayLikeImpl(isolate, object, length);
+ }
+
+ static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t length) {
+ UNREACHABLE();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ElementsAccessorBase);
+};
+
+class DictionaryElementsAccessor
+ : public ElementsAccessorBase<DictionaryElementsAccessor,
+ ElementsKindTraits<DICTIONARY_ELEMENTS>> {
+ public:
+ static uint32_t GetMaxIndex(JSObject receiver, FixedArrayBase elements) {
+ // We cannot properly estimate this for dictionaries.
+ UNREACHABLE();
+ }
+
+ static uint32_t GetMaxNumberOfEntries(JSObject receiver,
+ FixedArrayBase backing_store) {
+ return NumberOfElementsImpl(receiver, backing_store);
+ }
+
+ static uint32_t NumberOfElementsImpl(JSObject receiver,
+ FixedArrayBase backing_store) {
+ NumberDictionary dict = NumberDictionary::cast(backing_store);
+ return dict.NumberOfElements();
+ }
+
+ static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
+ Handle<NumberDictionary> dict =
+ Handle<NumberDictionary>::cast(backing_store);
+ int capacity = dict->Capacity();
+ uint32_t old_length = 0;
+ CHECK(array->length().ToArrayLength(&old_length));
+ {
+ DisallowHeapAllocation no_gc;
+ ReadOnlyRoots roots(isolate);
+ if (length < old_length) {
+ if (dict->requires_slow_elements()) {
+ // Find last non-deletable element in range of elements to be
+ // deleted and adjust range accordingly.
+ for (int entry = 0; entry < capacity; entry++) {
+ Object index = dict->KeyAt(entry);
+ if (dict->IsKey(roots, index)) {
+ uint32_t number = static_cast<uint32_t>(index.Number());
+ if (length <= number && number < old_length) {
+ PropertyDetails details = dict->DetailsAt(entry);
+ if (!details.IsConfigurable()) length = number + 1;
+ }
+ }
+ }
+ }
+
+ if (length == 0) {
+ // Flush the backing store.
+ array->initialize_elements();
+ } else {
+ // Remove elements that should be deleted.
+ int removed_entries = 0;
+ for (int entry = 0; entry < capacity; entry++) {
+ Object index = dict->KeyAt(entry);
+ if (dict->IsKey(roots, index)) {
+ uint32_t number = static_cast<uint32_t>(index.Number());
+ if (length <= number && number < old_length) {
+ dict->ClearEntry(isolate, entry);
+ removed_entries++;
+ }
+ }
+ }
+
+ if (removed_entries > 0) {
+ // Update the number of elements.
+ dict->ElementsRemoved(removed_entries);
+ }
+ }
+ }
+ }
+
+ Handle<Object> length_obj = isolate->factory()->NewNumberFromUint(length);
+ array->set_length(*length_obj);
+ }
+
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
+ UNREACHABLE();
+ }
+
+ static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ Handle<NumberDictionary> dict(NumberDictionary::cast(obj->elements()),
+ obj->GetIsolate());
+ dict = NumberDictionary::DeleteEntry(obj->GetIsolate(), dict, entry);
+ obj->set_elements(*dict);
+ }
+
+ static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
+ DisallowHeapAllocation no_gc;
+ NumberDictionary dict = NumberDictionary::cast(backing_store);
+ if (!dict.requires_slow_elements()) return false;
+ int capacity = dict.Capacity();
+ ReadOnlyRoots roots = holder.GetReadOnlyRoots();
+ for (int i = 0; i < capacity; i++) {
+ Object key = dict.KeyAt(i);
+ if (!dict.IsKey(roots, key)) continue;
+ PropertyDetails details = dict.DetailsAt(i);
+ if (details.kind() == kAccessor) return true;
+ }
+ return false;
+ }
+
+ static Object GetRaw(FixedArrayBase store, uint32_t entry) {
+ NumberDictionary backing_store = NumberDictionary::cast(store);
+ return backing_store.ValueAt(entry);
+ }
+
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
+ uint32_t entry) {
+ return handle(GetRaw(backing_store, entry), isolate);
+ }
+
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ Object value) {
+ SetImpl(holder->elements(), entry, value);
+ }
+
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value) {
+ NumberDictionary::cast(backing_store).ValueAtPut(entry, value);
+ }
+
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ NumberDictionary dictionary = NumberDictionary::cast(*store);
+ if (attributes != NONE) object->RequireSlowElements(dictionary);
+ dictionary.ValueAtPut(entry, *value);
+ PropertyDetails details = dictionary.DetailsAt(entry);
+ details = PropertyDetails(kData, attributes, PropertyCellType::kNoCell,
+ details.dictionary_index());
+
+ dictionary.DetailsAtPut(object->GetIsolate(), entry, details);
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
+ Handle<NumberDictionary> dictionary =
+ object->HasFastElements() || object->HasFastStringWrapperElements()
+ ? JSObject::NormalizeElements(object)
+ : handle(NumberDictionary::cast(object->elements()),
+ object->GetIsolate());
+ Handle<NumberDictionary> new_dictionary = NumberDictionary::Add(
+ object->GetIsolate(), dictionary, index, value, details);
+ new_dictionary->UpdateMaxNumberKey(index, object);
+ if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
+ if (dictionary.is_identical_to(new_dictionary)) return;
+ object->set_elements(*new_dictionary);
+ }
+
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase store,
+ uint32_t entry) {
+ DisallowHeapAllocation no_gc;
+ NumberDictionary dict = NumberDictionary::cast(store);
+ Object index = dict.KeyAt(entry);
+ return !index.IsTheHole(isolate);
+ }
+
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase store, uint32_t entry) {
+ DisallowHeapAllocation no_gc;
+ NumberDictionary dict = NumberDictionary::cast(store);
+ uint32_t result = 0;
+ CHECK(dict.KeyAt(entry).ToArrayIndex(&result));
+ return result;
+ }
+
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase store, uint32_t index,
+ PropertyFilter filter) {
+ DisallowHeapAllocation no_gc;
+ NumberDictionary dictionary = NumberDictionary::cast(store);
+ int entry = dictionary.FindEntry(isolate, index);
+ if (entry == NumberDictionary::kNotFound) return kMaxUInt32;
+ if (filter != ALL_PROPERTIES) {
+ PropertyDetails details = dictionary.DetailsAt(entry);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) != 0) return kMaxUInt32;
+ }
+ return static_cast<uint32_t>(entry);
+ }
+
+ static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ return GetDetailsImpl(holder.elements(), entry);
+ }
+
+ static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
+ uint32_t entry) {
+ return NumberDictionary::cast(backing_store).DetailsAt(entry);
+ }
+
+ static uint32_t FilterKey(Handle<NumberDictionary> dictionary, int entry,
+ Object raw_key, PropertyFilter filter) {
+ DCHECK(raw_key.IsNumber());
+ DCHECK_LE(raw_key.Number(), kMaxUInt32);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) != 0) return kMaxUInt32;
+ return static_cast<uint32_t>(raw_key.Number());
+ }
+
+ static uint32_t GetKeyForEntryImpl(Isolate* isolate,
+ Handle<NumberDictionary> dictionary,
+ int entry, PropertyFilter filter) {
+ DisallowHeapAllocation no_gc;
+ Object raw_key = dictionary->KeyAt(entry);
+ if (!dictionary->IsKey(ReadOnlyRoots(isolate), raw_key)) return kMaxUInt32;
+ return FilterKey(dictionary, entry, raw_key, filter);
+ }
+
+ static void CollectElementIndicesImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys) {
+ if (keys->filter() & SKIP_STRINGS) return;
+ Isolate* isolate = keys->isolate();
+ Handle<NumberDictionary> dictionary =
+ Handle<NumberDictionary>::cast(backing_store);
+ int capacity = dictionary->Capacity();
+ Handle<FixedArray> elements = isolate->factory()->NewFixedArray(
+ GetMaxNumberOfEntries(*object, *backing_store));
+ int insertion_index = 0;
+ PropertyFilter filter = keys->filter();
+ ReadOnlyRoots roots(isolate);
+ for (int i = 0; i < capacity; i++) {
+ Object raw_key = dictionary->KeyAt(i);
+ if (!dictionary->IsKey(roots, raw_key)) continue;
+ uint32_t key = FilterKey(dictionary, i, raw_key, filter);
+ if (key == kMaxUInt32) {
+ keys->AddShadowingKey(raw_key);
+ continue;
+ }
+ elements->set(insertion_index, raw_key);
+ insertion_index++;
+ }
+ SortIndices(isolate, elements, insertion_index);
+ for (int i = 0; i < insertion_index; i++) {
+ keys->AddKey(elements->get(i));
+ }
+ }
+
+ static Handle<FixedArray> DirectCollectElementIndicesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
+ PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
+ uint32_t insertion_index = 0) {
+ if (filter & SKIP_STRINGS) return list;
+ if (filter & ONLY_ALL_CAN_READ) return list;
+
+ Handle<NumberDictionary> dictionary =
+ Handle<NumberDictionary>::cast(backing_store);
+ uint32_t capacity = dictionary->Capacity();
+ for (uint32_t i = 0; i < capacity; i++) {
+ uint32_t key = GetKeyForEntryImpl(isolate, dictionary, i, filter);
+ if (key == kMaxUInt32) continue;
+ Handle<Object> index = isolate->factory()->NewNumberFromUint(key);
+ list->set(insertion_index, *index);
+ insertion_index++;
+ }
+ *nof_indices = insertion_index;
+ return list;
+ }
+
+ static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) {
+ Isolate* isolate = accumulator->isolate();
+ Handle<NumberDictionary> dictionary(
+ NumberDictionary::cast(receiver->elements()), isolate);
+ int capacity = dictionary->Capacity();
+ ReadOnlyRoots roots(isolate);
+ for (int i = 0; i < capacity; i++) {
+ Object k = dictionary->KeyAt(i);
+ if (!dictionary->IsKey(roots, k)) continue;
+ Object value = dictionary->ValueAt(i);
+ DCHECK(!value.IsTheHole(isolate));
+ DCHECK(!value.IsAccessorPair());
+ DCHECK(!value.IsAccessorInfo());
+ accumulator->AddKey(value, convert);
+ }
+ }
+
+ static bool IncludesValueFastPath(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start_from,
+ uint32_t length, Maybe<bool>* result) {
+ DisallowHeapAllocation no_gc;
+ NumberDictionary dictionary = NumberDictionary::cast(receiver->elements());
+ int capacity = dictionary.Capacity();
+ Object the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ Object undefined = ReadOnlyRoots(isolate).undefined_value();
+
+ // Scan for accessor properties. If accessors are present, then elements
+ // must be accessed in order via the slow path.
+ bool found = false;
+ for (int i = 0; i < capacity; ++i) {
+ Object k = dictionary.KeyAt(i);
+ if (k == the_hole) continue;
+ if (k == undefined) continue;
+
+ uint32_t index;
+ if (!k.ToArrayIndex(&index) || index < start_from || index >= length) {
+ continue;
+ }
+
+ if (dictionary.DetailsAt(i).kind() == kAccessor) {
+ // Restart from beginning in slow path, otherwise we may observably
+ // access getters out of order
+ return false;
+ } else if (!found) {
+ Object element_k = dictionary.ValueAt(i);
+ if (value->SameValueZero(element_k)) found = true;
+ }
+ }
+
+ *result = Just(found);
+ return true;
+ }
+
+ static Maybe<bool> IncludesValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ bool search_for_hole = value->IsUndefined(isolate);
+
+ if (!search_for_hole) {
+ Maybe<bool> result = Nothing<bool>();
+ if (DictionaryElementsAccessor::IncludesValueFastPath(
+ isolate, receiver, value, start_from, length, &result)) {
+ return result;
+ }
+ }
+ ElementsKind original_elements_kind = receiver->GetElementsKind();
+ USE(original_elements_kind);
+ Handle<NumberDictionary> dictionary(
+ NumberDictionary::cast(receiver->elements()), isolate);
+ // Iterate through entire range, as accessing elements out of order is
+ // observable
+ for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(receiver->GetElementsKind(), original_elements_kind);
+ int entry = dictionary->FindEntry(isolate, k);
+ if (entry == NumberDictionary::kNotFound) {
+ if (search_for_hole) return Just(true);
+ continue;
+ }
+
+ PropertyDetails details = GetDetailsImpl(*dictionary, entry);
+ switch (details.kind()) {
+ case kData: {
+ Object element_k = dictionary->ValueAt(entry);
+ if (value->SameValueZero(element_k)) return Just(true);
+ break;
+ }
+ case kAccessor: {
+ LookupIterator it(isolate, receiver, k,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ DCHECK(it.IsFound());
+ DCHECK_EQ(it.state(), LookupIterator::ACCESSOR);
+ Handle<Object> element_k;
+
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k,
+ Object::GetPropertyWithAccessor(&it),
+ Nothing<bool>());
+
+ if (value->SameValueZero(*element_k)) return Just(true);
+
+ // Bailout to slow path if elements on prototype changed
+ if (!JSObject::PrototypeHasNoElements(isolate, *receiver)) {
+ return IncludesValueSlowPath(isolate, receiver, value, k + 1,
+ length);
+ }
+
+ // Continue if elements unchanged
+ if (*dictionary == receiver->elements()) continue;
+
+ // Otherwise, bailout or update elements
+
+ // If switched to initial elements, return true if searching for
+ // undefined, and false otherwise.
+ if (receiver->map().GetInitialElements() == receiver->elements()) {
+ return Just(search_for_hole);
+ }
+
+ // If switched to fast elements, continue with the correct accessor.
+ if (receiver->GetElementsKind() != DICTIONARY_ELEMENTS) {
+ ElementsAccessor* accessor = receiver->GetElementsAccessor();
+ return accessor->IncludesValue(isolate, receiver, value, k + 1,
+ length);
+ }
+ dictionary =
+ handle(NumberDictionary::cast(receiver->elements()), isolate);
+ break;
+ }
+ }
+ }
+ return Just(false);
+ }
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+
+ ElementsKind original_elements_kind = receiver->GetElementsKind();
+ USE(original_elements_kind);
+ Handle<NumberDictionary> dictionary(
+ NumberDictionary::cast(receiver->elements()), isolate);
+ // Iterate through entire range, as accessing elements out of order is
+ // observable.
+ for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(receiver->GetElementsKind(), original_elements_kind);
+ int entry = dictionary->FindEntry(isolate, k);
+ if (entry == NumberDictionary::kNotFound) continue;
+
+ PropertyDetails details = GetDetailsImpl(*dictionary, entry);
+ switch (details.kind()) {
+ case kData: {
+ Object element_k = dictionary->ValueAt(entry);
+ if (value->StrictEquals(element_k)) {
+ return Just<int64_t>(k);
+ }
+ break;
+ }
+ case kAccessor: {
+ LookupIterator it(isolate, receiver, k,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ DCHECK(it.IsFound());
+ DCHECK_EQ(it.state(), LookupIterator::ACCESSOR);
+ Handle<Object> element_k;
+
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k,
+ Object::GetPropertyWithAccessor(&it),
+ Nothing<int64_t>());
+
+ if (value->StrictEquals(*element_k)) return Just<int64_t>(k);
+
+ // Bailout to slow path if elements on prototype changed.
+ if (!JSObject::PrototypeHasNoElements(isolate, *receiver)) {
+ return IndexOfValueSlowPath(isolate, receiver, value, k + 1,
+ length);
+ }
+
+ // Continue if elements unchanged.
+ if (*dictionary == receiver->elements()) continue;
+
+ // Otherwise, bailout or update elements.
+ if (receiver->GetElementsKind() != DICTIONARY_ELEMENTS) {
+ // Otherwise, switch to slow path.
+ return IndexOfValueSlowPath(isolate, receiver, value, k + 1,
+ length);
+ }
+ dictionary =
+ handle(NumberDictionary::cast(receiver->elements()), isolate);
+ break;
+ }
+ }
+ }
+ return Just<int64_t>(-1);
+ }
+
+ static void ValidateContents(JSObject holder, int length) {
+ DisallowHeapAllocation no_gc;
+#if DEBUG
+ DCHECK_EQ(holder.map().elements_kind(), DICTIONARY_ELEMENTS);
+ if (!FLAG_enable_slow_asserts) return;
+ ReadOnlyRoots roots = holder.GetReadOnlyRoots();
+ NumberDictionary dictionary = NumberDictionary::cast(holder.elements());
+ // Validate the requires_slow_elements and max_number_key values.
+ int capacity = dictionary.Capacity();
+ bool requires_slow_elements = false;
+ int max_key = 0;
+ for (int i = 0; i < capacity; ++i) {
+ Object k;
+ if (!dictionary.ToKey(roots, i, &k)) continue;
+ DCHECK_LE(0.0, k.Number());
+ if (k.Number() > NumberDictionary::kRequiresSlowElementsLimit) {
+ requires_slow_elements = true;
+ } else {
+ max_key = Max(max_key, Smi::ToInt(k));
+ }
+ }
+ if (requires_slow_elements) {
+ DCHECK(dictionary.requires_slow_elements());
+ } else if (!dictionary.requires_slow_elements()) {
+ DCHECK_LE(max_key, dictionary.max_number_key());
+ }
+#endif
+ }
+};
+
+// Super class for all fast element arrays.
+template <typename Subclass, typename KindTraits>
+class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
+ public:
+ using BackingStore = typename KindTraits::BackingStore;
+
+ static Handle<NumberDictionary> NormalizeImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store) {
+ Isolate* isolate = object->GetIsolate();
+ ElementsKind kind = Subclass::kind();
+
+ // Ensure that notifications fire if the array or object prototypes are
+ // normalizing.
+ if (IsSmiOrObjectElementsKind(kind) ||
+ kind == FAST_STRING_WRAPPER_ELEMENTS) {
+ isolate->UpdateNoElementsProtectorOnNormalizeElements(object);
+ }
+
+ int capacity = object->GetFastElementsUsage();
+ Handle<NumberDictionary> dictionary =
+ NumberDictionary::New(isolate, capacity);
+
+ PropertyDetails details = PropertyDetails::Empty();
+ int j = 0;
+ int max_number_key = -1;
+ for (int i = 0; j < capacity; i++) {
+ if (IsHoleyElementsKindForRead(kind)) {
+ if (BackingStore::cast(*store).is_the_hole(isolate, i)) continue;
+ }
+ max_number_key = i;
+ Handle<Object> value = Subclass::GetImpl(isolate, *store, i);
+ dictionary =
+ NumberDictionary::Add(isolate, dictionary, i, value, details);
+ j++;
+ }
+
+ if (max_number_key > 0) {
+ dictionary->UpdateMaxNumberKey(static_cast<uint32_t>(max_number_key),
+ object);
+ }
+ return dictionary;
+ }
+
+ static void DeleteAtEnd(Handle<JSObject> obj,
+ Handle<BackingStore> backing_store, uint32_t entry) {
+ uint32_t length = static_cast<uint32_t>(backing_store->length());
+ Isolate* isolate = obj->GetIsolate();
+ for (; entry > 0; entry--) {
+ if (!backing_store->is_the_hole(isolate, entry - 1)) break;
+ }
+ if (entry == 0) {
+ FixedArray empty = ReadOnlyRoots(isolate).empty_fixed_array();
+ // Dynamically ask for the elements kind here since we manually redirect
+ // the operations for argument backing stores.
+ if (obj->GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
+ SloppyArgumentsElements::cast(obj->elements()).set_arguments(empty);
+ } else {
+ obj->set_elements(empty);
+ }
+ return;
+ }
+
+ isolate->heap()->RightTrimFixedArray(*backing_store, length - entry);
+ }
+
+ static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
+ Handle<FixedArrayBase> store) {
+ DCHECK(obj->HasSmiOrObjectElements() || obj->HasDoubleElements() ||
+ obj->HasFastArgumentsElements() ||
+ obj->HasFastStringWrapperElements());
+ Handle<BackingStore> backing_store = Handle<BackingStore>::cast(store);
+ if (!obj->IsJSArray() &&
+ entry == static_cast<uint32_t>(store->length()) - 1) {
+ DeleteAtEnd(obj, backing_store, entry);
+ return;
+ }
+
+ Isolate* isolate = obj->GetIsolate();
+ backing_store->set_the_hole(isolate, entry);
+
+ // TODO(verwaest): Move this out of elements.cc.
+ // If an old space backing store is larger than a certain size and
+ // has too few used values, normalize it.
+ const int kMinLengthForSparsenessCheck = 64;
+ if (backing_store->length() < kMinLengthForSparsenessCheck) return;
+ // TODO(ulan): Check if it works with young large objects.
+ if (ObjectInYoungGeneration(*backing_store)) return;
+ uint32_t length = 0;
+ if (obj->IsJSArray()) {
+ JSArray::cast(*obj).length().ToArrayLength(&length);
+ } else {
+ length = static_cast<uint32_t>(store->length());
+ }
+
+ // To avoid doing the check on every delete, use a counter-based heuristic.
+ const int kLengthFraction = 16;
+ // The above constant must be large enough to ensure that we check for
+ // normalization frequently enough. At a minimum, it should be large
+ // enough to reliably hit the "window" of remaining elements count where
+ // normalization would be beneficial.
+ STATIC_ASSERT(kLengthFraction >=
+ NumberDictionary::kEntrySize *
+ NumberDictionary::kPreferFastElementsSizeFactor);
+ size_t current_counter = isolate->elements_deletion_counter();
+ if (current_counter < length / kLengthFraction) {
+ isolate->set_elements_deletion_counter(current_counter + 1);
+ return;
+ }
+ // Reset the counter whenever the full check is performed.
+ isolate->set_elements_deletion_counter(0);
+
+ if (!obj->IsJSArray()) {
+ uint32_t i;
+ for (i = entry + 1; i < length; i++) {
+ if (!backing_store->is_the_hole(isolate, i)) break;
+ }
+ if (i == length) {
+ DeleteAtEnd(obj, backing_store, entry);
+ return;
+ }
+ }
+ int num_used = 0;
+ for (int i = 0; i < backing_store->length(); ++i) {
+ if (!backing_store->is_the_hole(isolate, i)) {
+ ++num_used;
+ // Bail out if a number dictionary wouldn't be able to save much space.
+ if (NumberDictionary::kPreferFastElementsSizeFactor *
+ NumberDictionary::ComputeCapacity(num_used) *
+ NumberDictionary::kEntrySize >
+ static_cast<uint32_t>(backing_store->length())) {
+ return;
+ }
+ }
+ }
+ JSObject::NormalizeElements(obj);
+ }
+
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(object);
+ entry = dictionary->FindEntry(object->GetIsolate(), entry);
+ DictionaryElementsAccessor::ReconfigureImpl(object, dictionary, entry,
+ value, attributes);
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ DCHECK_EQ(NONE, attributes);
+ ElementsKind from_kind = object->GetElementsKind();
+ ElementsKind to_kind = Subclass::kind();
+ if (IsDictionaryElementsKind(from_kind) ||
+ IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(to_kind) ||
+ Subclass::GetCapacityImpl(*object, object->elements()) !=
+ new_capacity) {
+ Subclass::GrowCapacityAndConvertImpl(object, new_capacity);
+ } else {
+ if (IsFastElementsKind(from_kind) && from_kind != to_kind) {
+ JSObject::TransitionElementsKind(object, to_kind);
+ }
+ if (IsSmiOrObjectElementsKind(from_kind)) {
+ DCHECK(IsSmiOrObjectElementsKind(to_kind));
+ JSObject::EnsureWritableFastElements(object);
+ }
+ }
+ Subclass::SetImpl(object, index, *value);
+ }
+
+ static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ ElementsKind kind = KindTraits::Kind;
+ if (IsFastPackedElementsKind(kind)) {
+ JSObject::TransitionElementsKind(obj, GetHoleyElementsKind(kind));
+ }
+ if (IsSmiOrObjectElementsKind(KindTraits::Kind)) {
+ JSObject::EnsureWritableFastElements(obj);
+ }
+ DeleteCommon(obj, entry, handle(obj->elements(), obj->GetIsolate()));
+ }
+
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store,
+ uint32_t entry) {
+ return !BackingStore::cast(backing_store).is_the_hole(isolate, entry);
+ }
+
+ static uint32_t NumberOfElementsImpl(JSObject receiver,
+ FixedArrayBase backing_store) {
+ uint32_t max_index = Subclass::GetMaxIndex(receiver, backing_store);
+ if (IsFastPackedElementsKind(Subclass::kind())) return max_index;
+ Isolate* isolate = receiver.GetIsolate();
+ uint32_t count = 0;
+ for (uint32_t i = 0; i < max_index; i++) {
+ if (Subclass::HasEntryImpl(isolate, backing_store, i)) count++;
+ }
+ return count;
+ }
+
+ static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) {
+ Isolate* isolate = accumulator->isolate();
+ Handle<FixedArrayBase> elements(receiver->elements(), isolate);
+ uint32_t length = Subclass::GetMaxNumberOfEntries(*receiver, *elements);
+ for (uint32_t i = 0; i < length; i++) {
+ if (IsFastPackedElementsKind(KindTraits::Kind) ||
+ HasEntryImpl(isolate, *elements, i)) {
+ accumulator->AddKey(Subclass::GetImpl(isolate, *elements, i), convert);
+ }
+ }
+ }
+
+ static void ValidateContents(JSObject holder, int length) {
+#if DEBUG
+ Isolate* isolate = holder.GetIsolate();
+ Heap* heap = isolate->heap();
+ FixedArrayBase elements = holder.elements();
+ Map map = elements.map();
+ if (IsSmiOrObjectElementsKind(KindTraits::Kind)) {
+ DCHECK_NE(map, ReadOnlyRoots(heap).fixed_double_array_map());
+ } else if (IsDoubleElementsKind(KindTraits::Kind)) {
+ DCHECK_NE(map, ReadOnlyRoots(heap).fixed_cow_array_map());
+ if (map == ReadOnlyRoots(heap).fixed_array_map()) DCHECK_EQ(0, length);
+ } else {
+ UNREACHABLE();
+ }
+ if (length == 0) return; // nothing to do!
+#if ENABLE_SLOW_DCHECKS
+ DisallowHeapAllocation no_gc;
+ BackingStore backing_store = BackingStore::cast(elements);
+ if (IsSmiElementsKind(KindTraits::Kind)) {
+ HandleScope scope(isolate);
+ for (int i = 0; i < length; i++) {
+ DCHECK(BackingStore::get(backing_store, i, isolate)->IsSmi() ||
+ (IsHoleyElementsKind(KindTraits::Kind) &&
+ backing_store.is_the_hole(isolate, i)));
+ }
+ } else if (KindTraits::Kind == PACKED_ELEMENTS ||
+ KindTraits::Kind == PACKED_DOUBLE_ELEMENTS) {
+ for (int i = 0; i < length; i++) {
+ DCHECK(!backing_store.is_the_hole(isolate, i));
+ }
+ } else {
+ DCHECK(IsHoleyElementsKind(KindTraits::Kind));
+ }
+#endif
+#endif
+ }
+
+ static Handle<Object> PopImpl(Handle<JSArray> receiver) {
+ return Subclass::RemoveElement(receiver, AT_END);
+ }
+
+ static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
+ return Subclass::RemoveElement(receiver, AT_START);
+ }
+
+ static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+ uint32_t push_size) {
+ Handle<FixedArrayBase> backing_store(receiver->elements(),
+ receiver->GetIsolate());
+ return Subclass::AddArguments(receiver, backing_store, args, push_size,
+ AT_END);
+ }
+
+ static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args,
+ uint32_t unshift_size) {
+ Handle<FixedArrayBase> backing_store(receiver->elements(),
+ receiver->GetIsolate());
+ return Subclass::AddArguments(receiver, backing_store, args, unshift_size,
+ AT_START);
+ }
+
+ static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, int dst_index,
+ int src_index, int len, int hole_start,
+ int hole_end) {
+ Handle<BackingStore> dst_elms = Handle<BackingStore>::cast(backing_store);
+ if (len > JSArray::kMaxCopyElements && dst_index == 0 &&
+ isolate->heap()->CanMoveObjectStart(*dst_elms)) {
+ // Update all the copies of this backing_store handle.
+ *dst_elms.location() =
+ BackingStore::cast(
+ isolate->heap()->LeftTrimFixedArray(*dst_elms, src_index))
+ .ptr();
+ receiver->set_elements(*dst_elms);
+ // Adjust the hole offset as the array has been shrunk.
+ hole_end -= src_index;
+ DCHECK_LE(hole_start, backing_store->length());
+ DCHECK_LE(hole_end, backing_store->length());
+ } else if (len != 0) {
+ WriteBarrierMode mode = GetWriteBarrierMode(KindTraits::Kind);
+ dst_elms->MoveElements(isolate, dst_index, src_index, len, mode);
+ }
+ if (hole_start != hole_end) {
+ dst_elms->FillWithHoles(hole_start, hole_end);
+ }
+ }
+
+ static Object FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) {
+ // Ensure indexes are within array bounds
+ DCHECK_LE(0, start);
+ DCHECK_LE(start, end);
+
+ // Make sure COW arrays are copied.
+ if (IsSmiOrObjectElementsKind(Subclass::kind())) {
+ JSObject::EnsureWritableFastElements(receiver);
+ }
+
+ // Make sure we have enough space.
+ uint32_t capacity =
+ Subclass::GetCapacityImpl(*receiver, receiver->elements());
+ if (end > capacity) {
+ Subclass::GrowCapacityAndConvertImpl(receiver, end);
+ CHECK_EQ(Subclass::kind(), receiver->GetElementsKind());
+ }
+ DCHECK_LE(end, Subclass::GetCapacityImpl(*receiver, receiver->elements()));
+
+ for (uint32_t index = start; index < end; ++index) {
+ Subclass::SetImpl(receiver, index, *obj_value);
+ }
+ return *receiver;
+ }
+
+ static Maybe<bool> IncludesValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> search_value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase elements_base = receiver->elements();
+ Object the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ Object undefined = ReadOnlyRoots(isolate).undefined_value();
+ Object value = *search_value;
+
+ if (start_from >= length) return Just(false);
+
+ // Elements beyond the capacity of the backing store treated as undefined.
+ uint32_t elements_length = static_cast<uint32_t>(elements_base.length());
+ if (value == undefined && elements_length < length) return Just(true);
+ if (elements_length == 0) {
+ DCHECK_NE(value, undefined);
+ return Just(false);
+ }
+
+ length = std::min(elements_length, length);
+
+ if (!value.IsNumber()) {
+ if (value == undefined) {
+ // Search for `undefined` or The Hole. Even in the case of
+ // PACKED_DOUBLE_ELEMENTS or PACKED_SMI_ELEMENTS, we might encounter The
+ // Hole here, since the {length} used here can be larger than
+ // JSArray::length.
+ if (IsSmiOrObjectElementsKind(Subclass::kind()) ||
+ IsFrozenOrSealedElementsKind(Subclass::kind())) {
+ auto elements = FixedArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ Object element_k = elements.get(k);
+
+ if (element_k == the_hole || element_k == undefined) {
+ return Just(true);
+ }
+ }
+ return Just(false);
+ } else {
+ // Search for The Hole in HOLEY_DOUBLE_ELEMENTS or
+ // PACKED_DOUBLE_ELEMENTS.
+ DCHECK(IsDoubleElementsKind(Subclass::kind()));
+ auto elements = FixedDoubleArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (elements.is_the_hole(k)) {
+ return Just(true);
+ }
+ }
+ return Just(false);
+ }
+ } else if (!IsObjectElementsKind(Subclass::kind()) &&
+ !IsFrozenOrSealedElementsKind(Subclass::kind())) {
+ // Search for non-number, non-Undefined value, with either
+ // PACKED_SMI_ELEMENTS, PACKED_DOUBLE_ELEMENTS, HOLEY_SMI_ELEMENTS or
+ // HOLEY_DOUBLE_ELEMENTS. Guaranteed to return false, since these
+ // elements kinds can only contain Number values or undefined.
+ return Just(false);
+ } else {
+ // Search for non-number, non-Undefined value with either
+ // PACKED_ELEMENTS or HOLEY_ELEMENTS.
+ DCHECK(IsObjectElementsKind(Subclass::kind()) ||
+ IsFrozenOrSealedElementsKind(Subclass::kind()));
+ auto elements = FixedArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ Object element_k = elements.get(k);
+ if (element_k == the_hole) {
+ continue;
+ }
+
+ if (value.SameValueZero(element_k)) return Just(true);
+ }
+ return Just(false);
+ }
+ } else {
+ if (!value.IsNaN()) {
+ double search_value = value.Number();
+ if (IsDoubleElementsKind(Subclass::kind())) {
+ // Search for non-NaN Number in PACKED_DOUBLE_ELEMENTS or
+ // HOLEY_DOUBLE_ELEMENTS --- Skip TheHole, and trust UCOMISD or
+ // similar operation for result.
+ auto elements = FixedDoubleArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (elements.is_the_hole(k)) {
+ continue;
+ }
+ if (elements.get_scalar(k) == search_value) return Just(true);
+ }
+ return Just(false);
+ } else {
+ // Search for non-NaN Number in PACKED_ELEMENTS, HOLEY_ELEMENTS,
+ // PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS --- Skip non-Numbers,
+ // and trust UCOMISD or similar operation for result
+ auto elements = FixedArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ Object element_k = elements.get(k);
+ if (element_k.IsNumber() && element_k.Number() == search_value) {
+ return Just(true);
+ }
+ }
+ return Just(false);
+ }
+ } else {
+ // Search for NaN --- NaN cannot be represented with Smi elements, so
+ // abort if ElementsKind is PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS
+ if (IsSmiElementsKind(Subclass::kind())) return Just(false);
+
+ if (IsDoubleElementsKind(Subclass::kind())) {
+ // Search for NaN in PACKED_DOUBLE_ELEMENTS or
+ // HOLEY_DOUBLE_ELEMENTS --- Skip The Hole and trust
+ // std::isnan(elementK) for result
+ auto elements = FixedDoubleArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (elements.is_the_hole(k)) {
+ continue;
+ }
+ if (std::isnan(elements.get_scalar(k))) return Just(true);
+ }
+ return Just(false);
+ } else {
+ // Search for NaN in PACKED_ELEMENTS, HOLEY_ELEMENTS,
+ // PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS. Return true if
+ // elementK->IsHeapNumber() && std::isnan(elementK->Number())
+ DCHECK(IsSmiOrObjectElementsKind(Subclass::kind()) ||
+ IsFrozenOrSealedElementsKind(Subclass::kind()));
+ auto elements = FixedArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (elements.get(k).IsNaN()) return Just(true);
+ }
+ return Just(false);
+ }
+ }
+ }
+ }
+
+ static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t length) {
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
+ Handle<FixedArrayBase> elements(object->elements(), isolate);
+ for (uint32_t i = 0; i < length; i++) {
+ if (!Subclass::HasElementImpl(isolate, *object, i, *elements)) continue;
+ Handle<Object> value;
+ value = Subclass::GetImpl(isolate, *elements, i);
+ if (value->IsName()) {
+ value = isolate->factory()->InternalizeName(Handle<Name>::cast(value));
+ }
+ result->set(i, *value);
+ }
+ return result;
+ }
+
+ static Handle<Object> RemoveElement(Handle<JSArray> receiver,
+ Where remove_position) {
+ Isolate* isolate = receiver->GetIsolate();
+ ElementsKind kind = KindTraits::Kind;
+ if (IsSmiOrObjectElementsKind(kind)) {
+ HandleScope scope(isolate);
+ JSObject::EnsureWritableFastElements(receiver);
+ }
+ Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
+ uint32_t length = static_cast<uint32_t>(Smi::ToInt(receiver->length()));
+ DCHECK_GT(length, 0);
+ int new_length = length - 1;
+ int remove_index = remove_position == AT_START ? 0 : new_length;
+ Handle<Object> result =
+ Subclass::GetImpl(isolate, *backing_store, remove_index);
+ if (remove_position == AT_START) {
+ Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length,
+ 0, 0);
+ }
+ Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store);
+
+ if (IsHoleyElementsKind(kind) && result->IsTheHole(isolate)) {
+ return isolate->factory()->undefined_value();
+ }
+ return result;
+ }
+
+ static uint32_t AddArguments(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ Arguments* args, uint32_t add_size,
+ Where add_position) {
+ uint32_t length = Smi::ToInt(receiver->length());
+ DCHECK_LT(0, add_size);
+ uint32_t elms_len = backing_store->length();
+ // Check we do not overflow the new_length.
+ DCHECK(add_size <= static_cast<uint32_t>(Smi::kMaxValue - length));
+ uint32_t new_length = length + add_size;
+
+ if (new_length > elms_len) {
+ // New backing storage is needed.
+ uint32_t capacity = JSObject::NewElementsCapacity(new_length);
+ // If we add arguments to the start we have to shift the existing objects.
+ int copy_dst_index = add_position == AT_START ? add_size : 0;
+ // Copy over all objects to a new backing_store.
+ backing_store = Subclass::ConvertElementsWithCapacity(
+ receiver, backing_store, KindTraits::Kind, capacity, 0,
+ copy_dst_index, ElementsAccessor::kCopyToEndAndInitializeToHole);
+ receiver->set_elements(*backing_store);
+ } else if (add_position == AT_START) {
+ // If the backing store has enough capacity and we add elements to the
+ // start we have to shift the existing objects.
+ Isolate* isolate = receiver->GetIsolate();
+ Subclass::MoveElements(isolate, receiver, backing_store, add_size, 0,
+ length, 0, 0);
+ }
+
+ int insertion_index = add_position == AT_START ? 0 : length;
+ // Copy the arguments to the start.
+ Subclass::CopyArguments(args, backing_store, add_size, 1, insertion_index);
+ // Set the length.
+ receiver->set_length(Smi::FromInt(new_length));
+ return new_length;
+ }
+
+ static void CopyArguments(Arguments* args, Handle<FixedArrayBase> dst_store,
+ uint32_t copy_size, uint32_t src_index,
+ uint32_t dst_index) {
+ // Add the provided values.
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase raw_backing_store = *dst_store;
+ WriteBarrierMode mode = raw_backing_store.GetWriteBarrierMode(no_gc);
+ for (uint32_t i = 0; i < copy_size; i++) {
+ Object argument = (*args)[src_index + i];
+ DCHECK(!argument.IsTheHole());
+ Subclass::SetImpl(raw_backing_store, dst_index + i, argument, mode);
+ }
+ }
+};
+
+template <typename Subclass, typename KindTraits>
+class FastSmiOrObjectElementsAccessor
+ : public FastElementsAccessor<Subclass, KindTraits> {
+ public:
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ Object value) {
+ SetImpl(holder->elements(), entry, value);
+ }
+
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value) {
+ FixedArray::cast(backing_store).set(entry, value);
+ }
+
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value, WriteBarrierMode mode) {
+ FixedArray::cast(backing_store).set(entry, value, mode);
+ }
+
+ static Object GetRaw(FixedArray backing_store, uint32_t entry) {
+ uint32_t index = Subclass::GetIndexForEntryImpl(backing_store, entry);
+ return backing_store.get(index);
+ }
+
+ // NOTE: this method violates the handlified function signature convention:
+ // raw pointer parameters in the function that allocates.
+ // See ElementsAccessor::CopyElements() for details.
+ // This method could actually allocate if copying from double elements to
+ // object elements.
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
+ DisallowHeapAllocation no_gc;
+ ElementsKind to_kind = KindTraits::Kind;
+ switch (from_kind) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case PACKED_FROZEN_ELEMENTS:
+ case PACKED_SEALED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
+ CopyObjectToObjectElements(isolate, from, from_kind, from_start, to,
+ to_kind, to_start, copy_size);
+ break;
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS: {
+ AllowHeapAllocation allow_allocation;
+ DCHECK(IsObjectElementsKind(to_kind));
+ CopyDoubleToObjectElements(isolate, from, from_start, to, to_start,
+ copy_size);
+ break;
+ }
+ case DICTIONARY_ELEMENTS:
+ CopyDictionaryToObjectElements(isolate, from, from_start, to, to_kind,
+ to_start, copy_size);
+ break;
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ // This function is currently only used for JSArrays with non-zero
+ // length.
+ UNREACHABLE();
+ case NO_ELEMENTS:
+ break; // Nothing to do.
+ }
+ }
+
+ static Maybe<bool> CollectValuesOrEntriesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+ PropertyFilter filter) {
+ int count = 0;
+ if (get_entries) {
+ // Collecting entries needs to allocate, so this code must be handlified.
+ Handle<FixedArray> elements(FixedArray::cast(object->elements()),
+ isolate);
+ uint32_t length = elements->length();
+ for (uint32_t index = 0; index < length; ++index) {
+ if (!Subclass::HasEntryImpl(isolate, *elements, index)) continue;
+ Handle<Object> value = Subclass::GetImpl(isolate, *elements, index);
+ value = MakeEntryPair(isolate, index, value);
+ values_or_entries->set(count++, *value);
+ }
+ } else {
+ // No allocations here, so we can avoid handlification overhead.
+ DisallowHeapAllocation no_gc;
+ FixedArray elements = FixedArray::cast(object->elements());
+ uint32_t length = elements.length();
+ for (uint32_t index = 0; index < length; ++index) {
+ if (!Subclass::HasEntryImpl(isolate, elements, index)) continue;
+ Object value = GetRaw(elements, index);
+ values_or_entries->set(count++, value);
+ }
+ }
+ *nof_items = count;
+ return Just(true);
+ }
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> search_value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase elements_base = receiver->elements();
+ Object value = *search_value;
+
+ if (start_from >= length) return Just<int64_t>(-1);
+
+ length = std::min(static_cast<uint32_t>(elements_base.length()), length);
+
+ // Only FAST_{,HOLEY_}ELEMENTS can store non-numbers.
+ if (!value.IsNumber() && !IsObjectElementsKind(Subclass::kind()) &&
+ !IsFrozenOrSealedElementsKind(Subclass::kind())) {
+ return Just<int64_t>(-1);
+ }
+ // NaN can never be found by strict equality.
+ if (value.IsNaN()) return Just<int64_t>(-1);
+
+ // k can be greater than receiver->length() below, but it is bounded by
+ // elements_base->length() so we never read out of bounds. This means that
+ // elements->get(k) can return the hole, for which the StrictEquals will
+ // always fail.
+ FixedArray elements = FixedArray::cast(receiver->elements());
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (value.StrictEquals(elements.get(k))) return Just<int64_t>(k);
+ }
+ return Just<int64_t>(-1);
+ }
+};
+
+class FastPackedSmiElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<
+ FastPackedSmiElementsAccessor,
+ ElementsKindTraits<PACKED_SMI_ELEMENTS>> {};
+
+class FastHoleySmiElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<
+ FastHoleySmiElementsAccessor,
+ ElementsKindTraits<HOLEY_SMI_ELEMENTS>> {};
+
+class FastPackedObjectElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<
+ FastPackedObjectElementsAccessor,
+ ElementsKindTraits<PACKED_ELEMENTS>> {};
+
+template <typename Subclass, typename KindTraits>
+class FastSealedObjectElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<Subclass, KindTraits> {
+ public:
+ using BackingStore = typename KindTraits::BackingStore;
+
+ static Handle<Object> RemoveElement(Handle<JSArray> receiver,
+ Where remove_position) {
+ UNREACHABLE();
+ }
+
+ static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ UNREACHABLE();
+ }
+
+ static void DeleteAtEnd(Handle<JSObject> obj,
+ Handle<BackingStore> backing_store, uint32_t entry) {
+ UNREACHABLE();
+ }
+
+ static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
+ Handle<FixedArrayBase> store) {
+ UNREACHABLE();
+ }
+
+ static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+
+ static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+ uint32_t push_size) {
+ UNREACHABLE();
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ UNREACHABLE();
+ }
+
+ static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
+ uint32_t old_length = 0;
+ CHECK(array->length().ToArrayIndex(&old_length));
+ if (length == old_length) {
+ // Do nothing.
+ return;
+ }
+
+ // Transition to DICTIONARY_ELEMENTS.
+ // Convert to dictionary mode
+ Handle<NumberDictionary> new_element_dictionary =
+ old_length == 0 ? isolate->factory()->empty_slow_element_dictionary()
+ : array->GetElementsAccessor()->Normalize(array);
+
+ // Migrate map.
+ Handle<Map> new_map = Map::Copy(isolate, handle(array->map(), isolate),
+ "SlowCopyForSetLengthImpl");
+ new_map->set_is_extensible(false);
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ JSObject::MigrateToMap(array, new_map);
+
+ if (!new_element_dictionary.is_null()) {
+ array->set_elements(*new_element_dictionary);
+ }
+
+ if (array->elements() !=
+ ReadOnlyRoots(isolate).empty_slow_element_dictionary()) {
+ Handle<NumberDictionary> dictionary(array->element_dictionary(), isolate);
+ // Make sure we never go back to the fast case
+ array->RequireSlowElements(*dictionary);
+ JSObject::ApplyAttributesToDictionary(isolate, ReadOnlyRoots(isolate),
+ dictionary,
+ PropertyAttributes::SEALED);
+ }
+
+ // Set length
+ Handle<FixedArrayBase> new_backing_store(array->elements(), isolate);
+ DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
+ new_backing_store);
+ }
+};
+
+class FastPackedSealedObjectElementsAccessor
+ : public FastSealedObjectElementsAccessor<
+ FastPackedSealedObjectElementsAccessor,
+ ElementsKindTraits<PACKED_SEALED_ELEMENTS>> {
+};
+
+class FastHoleySealedObjectElementsAccessor
+ : public FastSealedObjectElementsAccessor<
+ FastHoleySealedObjectElementsAccessor,
+ ElementsKindTraits<HOLEY_SEALED_ELEMENTS>> {
+};
+
+template <typename Subclass, typename KindTraits>
+class FastFrozenObjectElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<Subclass, KindTraits> {
+ public:
+ using BackingStore = typename KindTraits::BackingStore;
+
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ Object value) {
+ UNREACHABLE();
+ }
+
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value) {
+ UNREACHABLE();
+ }
+
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value, WriteBarrierMode mode) {
+ UNREACHABLE();
+ }
+
+ static Handle<Object> RemoveElement(Handle<JSArray> receiver,
+ Where remove_position) {
+ UNREACHABLE();
+ }
+
+ static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ UNREACHABLE();
+ }
+
+ static void DeleteAtEnd(Handle<JSObject> obj,
+ Handle<BackingStore> backing_store, uint32_t entry) {
+ UNREACHABLE();
+ }
+
+ static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
+ Handle<FixedArrayBase> store) {
+ UNREACHABLE();
+ }
+
+ static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+
+ static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+ uint32_t push_size) {
+ UNREACHABLE();
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ UNREACHABLE();
+ }
+
+ static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
+ UNREACHABLE();
+ }
+
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ UNREACHABLE();
+ }
+};
+
+class FastPackedFrozenObjectElementsAccessor
+ : public FastFrozenObjectElementsAccessor<
+ FastPackedFrozenObjectElementsAccessor,
+ ElementsKindTraits<PACKED_FROZEN_ELEMENTS>> {
+};
+
+class FastHoleyFrozenObjectElementsAccessor
+ : public FastFrozenObjectElementsAccessor<
+ FastHoleyFrozenObjectElementsAccessor,
+ ElementsKindTraits<HOLEY_FROZEN_ELEMENTS>> {
+};
+
+class FastHoleyObjectElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<
+ FastHoleyObjectElementsAccessor, ElementsKindTraits<HOLEY_ELEMENTS>> {
+};
+
+template <typename Subclass, typename KindTraits>
+class FastDoubleElementsAccessor
+ : public FastElementsAccessor<Subclass, KindTraits> {
+ public:
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
+ uint32_t entry) {
+ return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), entry,
+ isolate);
+ }
+
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ Object value) {
+ SetImpl(holder->elements(), entry, value);
+ }
+
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value) {
+ FixedDoubleArray::cast(backing_store).set(entry, value.Number());
+ }
+
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value, WriteBarrierMode mode) {
+ FixedDoubleArray::cast(backing_store).set(entry, value.Number());
+ }
+
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
+ DisallowHeapAllocation no_allocation;
+ switch (from_kind) {
+ case PACKED_SMI_ELEMENTS:
+ CopyPackedSmiToDoubleElements(from, from_start, to, to_start,
+ packed_size, copy_size);
+ break;
+ case HOLEY_SMI_ELEMENTS:
+ CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
+ break;
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size);
+ break;
+ case PACKED_ELEMENTS:
+ case PACKED_FROZEN_ELEMENTS:
+ case PACKED_SEALED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
+ CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
+ break;
+ case DICTIONARY_ELEMENTS:
+ CopyDictionaryToDoubleElements(isolate, from, from_start, to, to_start,
+ copy_size);
+ break;
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ // This function is currently only used for JSArrays with non-zero
+ // length.
+ UNREACHABLE();
+ }
+ }
+
+ static Maybe<bool> CollectValuesOrEntriesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+ PropertyFilter filter) {
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(object->elements()), isolate);
+ int count = 0;
+ uint32_t length = elements->length();
+ for (uint32_t index = 0; index < length; ++index) {
+ if (!Subclass::HasEntryImpl(isolate, *elements, index)) continue;
+ Handle<Object> value = Subclass::GetImpl(isolate, *elements, index);
+ if (get_entries) {
+ value = MakeEntryPair(isolate, index, value);
+ }
+ values_or_entries->set(count++, *value);
+ }
+ *nof_items = count;
+ return Just(true);
+ }
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> search_value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase elements_base = receiver->elements();
+ Object value = *search_value;
+
+ length = std::min(static_cast<uint32_t>(elements_base.length()), length);
+
+ if (start_from >= length) return Just<int64_t>(-1);
+
+ if (!value.IsNumber()) {
+ return Just<int64_t>(-1);
+ }
+ if (value.IsNaN()) {
+ return Just<int64_t>(-1);
+ }
+ double numeric_search_value = value.Number();
+ FixedDoubleArray elements = FixedDoubleArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (elements.is_the_hole(k)) {
+ continue;
+ }
+ if (elements.get_scalar(k) == numeric_search_value) {
+ return Just<int64_t>(k);
+ }
+ }
+ return Just<int64_t>(-1);
+ }
+};
+
+class FastPackedDoubleElementsAccessor
+ : public FastDoubleElementsAccessor<
+ FastPackedDoubleElementsAccessor,
+ ElementsKindTraits<PACKED_DOUBLE_ELEMENTS>> {};
+
+class FastHoleyDoubleElementsAccessor
+ : public FastDoubleElementsAccessor<
+ FastHoleyDoubleElementsAccessor,
+ ElementsKindTraits<HOLEY_DOUBLE_ELEMENTS>> {};
+
+// Super class for all external element arrays.
+template <ElementsKind Kind, typename ElementType>
+class TypedElementsAccessor
+ : public ElementsAccessorBase<TypedElementsAccessor<Kind, ElementType>,
+ ElementsKindTraits<Kind>> {
+ public:
+ using BackingStore = typename ElementsKindTraits<Kind>::BackingStore;
+ using AccessorClass = TypedElementsAccessor<Kind, ElementType>;
+
+ // Conversions from (other) scalar values.
+ static ElementType FromScalar(int value) {
+ return static_cast<ElementType>(value);
+ }
+ static ElementType FromScalar(uint32_t value) {
+ return static_cast<ElementType>(value);
+ }
+ static ElementType FromScalar(double value) {
+ return FromScalar(DoubleToInt32(value));
+ }
+ static ElementType FromScalar(int64_t value) { UNREACHABLE(); }
+ static ElementType FromScalar(uint64_t value) { UNREACHABLE(); }
+
+ // Conversions from objects / handles.
+ static ElementType FromObject(Object value, bool* lossless = nullptr) {
+ if (value.IsSmi()) {
+ return FromScalar(Smi::ToInt(value));
+ } else if (value.IsHeapNumber()) {
+ return FromScalar(HeapNumber::cast(value).value());
+ } else {
+ // Clamp undefined here as well. All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value.IsUndefined());
+ return FromScalar(Oddball::cast(value).to_number_raw());
+ }
+ }
+ static ElementType FromHandle(Handle<Object> value,
+ bool* lossless = nullptr) {
+ return FromObject(*value, lossless);
+ }
+
+ // Conversion of scalar value to handlified object.
+ static Handle<Object> ToHandle(Isolate* isolate, ElementType value);
+
+ static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object value) {
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
+ DCHECK_LE(entry, typed_array->length());
+ SetImpl(static_cast<ElementType*>(typed_array->DataPtr()), entry,
+ FromObject(value));
+ }
+
+ static void SetImpl(ElementType* data_ptr, size_t entry, ElementType value) {
+ // The JavaScript memory model allows for racy reads and writes to a
+ // SharedArrayBuffer's backing store. ThreadSanitizer will catch these
+ // racy accesses and warn about them, so we disable TSAN for these reads
+ // and writes using annotations.
+ //
+ // We don't use relaxed atomics here, as it is not a requirement of the
+ // JavaScript memory model to have tear-free reads of overlapping accesses,
+ // and using relaxed atomics may introduce overhead.
+ TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
+ // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
+ // fields (external pointers, doubles and BigInt data) are only
+ // kTaggedSize aligned so we have to use unaligned pointer friendly way of
+ // accessing them in order to avoid undefined behavior in C++ code.
+ WriteUnalignedValue<ElementType>(
+ reinterpret_cast<Address>(data_ptr + entry), value);
+ } else {
+ data_ptr[entry] = value;
+ }
+ TSAN_ANNOTATE_IGNORE_WRITES_END;
+ }
+
+ static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
+ uint32_t entry) {
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
+ Isolate* isolate = typed_array->GetIsolate();
+ DCHECK_LE(entry, typed_array->length());
+ DCHECK(!typed_array->WasDetached());
+ ElementType elem =
+ GetImpl(static_cast<ElementType*>(typed_array->DataPtr()), entry);
+ return ToHandle(isolate, elem);
+ }
+
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
+ uint32_t entry) {
+ UNREACHABLE();
+ }
+
+ static ElementType GetImpl(ElementType* data_ptr, size_t entry) {
+ // The JavaScript memory model allows for racy reads and writes to a
+ // SharedArrayBuffer's backing store. ThreadSanitizer will catch these
+ // racy accesses and warn about them, so we disable TSAN for these reads
+ // and writes using annotations.
+ //
+ // We don't use relaxed atomics here, as it is not a requirement of the
+ // JavaScript memory model to have tear-free reads of overlapping accesses,
+ // and using relaxed atomics may introduce overhead.
+ TSAN_ANNOTATE_IGNORE_READS_BEGIN;
+ ElementType result;
+ if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
+ // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
+ // fields (external pointers, doubles and BigInt data) are only
+ // kTaggedSize aligned so we have to use unaligned pointer friendly way of
+ // accessing them in order to avoid undefined behavior in C++ code.
+ result = ReadUnalignedValue<ElementType>(
+ reinterpret_cast<Address>(data_ptr + entry));
+ } else {
+ result = data_ptr[entry];
+ }
+ TSAN_ANNOTATE_IGNORE_READS_END;
+ return result;
+ }
+
+ static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell);
+ }
+
+ static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
+ uint32_t entry) {
+ return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell);
+ }
+
+ static bool HasElementImpl(Isolate* isolate, JSObject holder, uint32_t index,
+ FixedArrayBase backing_store,
+ PropertyFilter filter) {
+ return index < AccessorClass::GetCapacityImpl(holder, backing_store);
+ }
+
+ static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
+ return false;
+ }
+
+ static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
+ // External arrays do not support changing their length.
+ UNREACHABLE();
+ }
+
+ static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ UNREACHABLE();
+ }
+
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase backing_store,
+ uint32_t entry) {
+ return entry;
+ }
+
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index, PropertyFilter filter) {
+ return index < AccessorClass::GetCapacityImpl(holder, backing_store)
+ ? index
+ : kMaxUInt32;
+ }
+
+ static uint32_t GetCapacityImpl(JSObject holder,
+ FixedArrayBase backing_store) {
+ JSTypedArray typed_array = JSTypedArray::cast(holder);
+ if (typed_array.WasDetached()) return 0;
+ // TODO(bmeurer, v8:4153): We need to support arbitrary size_t here.
+ return static_cast<uint32_t>(typed_array.length());
+ }
+
+ static uint32_t NumberOfElementsImpl(JSObject receiver,
+ FixedArrayBase backing_store) {
+ return AccessorClass::GetCapacityImpl(receiver, backing_store);
+ }
+
+ static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) {
+ Isolate* isolate = receiver->GetIsolate();
+ Handle<FixedArrayBase> elements(receiver->elements(), isolate);
+ uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
+ for (uint32_t i = 0; i < length; i++) {
+ Handle<Object> value = AccessorClass::GetInternalImpl(receiver, i);
+ accumulator->AddKey(value, convert);
+ }
+ }
+
+ static Maybe<bool> CollectValuesOrEntriesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+ PropertyFilter filter) {
+ int count = 0;
+ if ((filter & ONLY_CONFIGURABLE) == 0) {
+ Handle<FixedArrayBase> elements(object->elements(), isolate);
+ uint32_t length = AccessorClass::GetCapacityImpl(*object, *elements);
+ for (uint32_t index = 0; index < length; ++index) {
+ Handle<Object> value = AccessorClass::GetInternalImpl(object, index);
+ if (get_entries) {
+ value = MakeEntryPair(isolate, index, value);
+ }
+ values_or_entries->set(count++, *value);
+ }
+ }
+ *nof_items = count;
+ return Just(true);
+ }
+
+ static Object FillImpl(Handle<JSObject> receiver, Handle<Object> value,
+ uint32_t start, uint32_t end) {
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(receiver);
+ DCHECK(!typed_array->WasDetached());
+ DCHECK_LE(0, start);
+ DCHECK_LE(start, end);
+ DCHECK_LE(end, typed_array->length());
+ DisallowHeapAllocation no_gc;
+ ElementType scalar = FromHandle(value);
+ ElementType* data = static_cast<ElementType*>(typed_array->DataPtr());
+ if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
+ // TODO(ishell, v8:8875): See UnalignedSlot<T> for details.
+ std::fill(UnalignedSlot<ElementType>(data + start),
+ UnalignedSlot<ElementType>(data + end), scalar);
+ } else {
+ std::fill(data + start, data + end, scalar);
+ }
+ return *typed_array;
+ }
+
+ static Maybe<bool> IncludesValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DisallowHeapAllocation no_gc;
+ JSTypedArray typed_array = JSTypedArray::cast(*receiver);
+
+ // TODO(caitp): return Just(false) here when implementing strict throwing on
+ // detached views.
+ if (typed_array.WasDetached()) {
+ return Just(value->IsUndefined(isolate) && length > start_from);
+ }
+
+ if (value->IsUndefined(isolate) && length > typed_array.length()) {
+ return Just(true);
+ }
+
+ // Prototype has no elements, and not searching for the hole --- limit
+ // search to backing store length.
+ if (typed_array.length() < length) {
+ // TODO(bmeurer, v8:4153): Don't cast to uint32_t here.
+ length = static_cast<uint32_t>(typed_array.length());
+ }
+
+ ElementType typed_search_value;
+ ElementType* data_ptr =
+ reinterpret_cast<ElementType*>(typed_array.DataPtr());
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (!value->IsBigInt()) return Just(false);
+ bool lossless;
+ typed_search_value = FromHandle(value, &lossless);
+ if (!lossless) return Just(false);
+ } else {
+ if (!value->IsNumber()) return Just(false);
+ double search_value = value->Number();
+ if (!std::isfinite(search_value)) {
+ // Integral types cannot represent +Inf or NaN.
+ if (Kind < FLOAT32_ELEMENTS || Kind > FLOAT64_ELEMENTS) {
+ return Just(false);
+ }
+ if (std::isnan(search_value)) {
+ for (uint32_t k = start_from; k < length; ++k) {
+ double elem_k =
+ static_cast<double>(AccessorClass::GetImpl(data_ptr, k));
+ if (std::isnan(elem_k)) return Just(true);
+ }
+ return Just(false);
+ }
+ } else if (search_value < std::numeric_limits<ElementType>::lowest() ||
+ search_value > std::numeric_limits<ElementType>::max()) {
+ // Return false if value can't be represented in this space.
+ return Just(false);
+ }
+ typed_search_value = static_cast<ElementType>(search_value);
+ if (static_cast<double>(typed_search_value) != search_value) {
+ return Just(false); // Loss of precision.
+ }
+ }
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ if (elem_k == typed_search_value) return Just(true);
+ }
+ return Just(false);
+ }
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DisallowHeapAllocation no_gc;
+ JSTypedArray typed_array = JSTypedArray::cast(*receiver);
+
+ if (typed_array.WasDetached()) return Just<int64_t>(-1);
+
+ ElementType typed_search_value;
+
+ ElementType* data_ptr =
+ reinterpret_cast<ElementType*>(typed_array.DataPtr());
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (!value->IsBigInt()) return Just<int64_t>(-1);
+ bool lossless;
+ typed_search_value = FromHandle(value, &lossless);
+ if (!lossless) return Just<int64_t>(-1);
+ } else {
+ if (!value->IsNumber()) return Just<int64_t>(-1);
+ double search_value = value->Number();
+ if (!std::isfinite(search_value)) {
+ // Integral types cannot represent +Inf or NaN.
+ if (Kind < FLOAT32_ELEMENTS || Kind > FLOAT64_ELEMENTS) {
+ return Just<int64_t>(-1);
+ }
+ if (std::isnan(search_value)) {
+ return Just<int64_t>(-1);
+ }
+ } else if (search_value < std::numeric_limits<ElementType>::lowest() ||
+ search_value > std::numeric_limits<ElementType>::max()) {
+ // Return false if value can't be represented in this ElementsKind.
+ return Just<int64_t>(-1);
+ }
+ typed_search_value = static_cast<ElementType>(search_value);
+ if (static_cast<double>(typed_search_value) != search_value) {
+ return Just<int64_t>(-1); // Loss of precision.
+ }
+ }
+
+ // Prototype has no elements, and not searching for the hole --- limit
+ // search to backing store length.
+ if (typed_array.length() < length) {
+ // TODO(bmeurer, v8:4153): Don't cast to uint32_t here.
+ length = static_cast<uint32_t>(typed_array.length());
+ }
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ if (elem_k == typed_search_value) return Just<int64_t>(k);
+ }
+ return Just<int64_t>(-1);
+ }
+
+ static Maybe<int64_t> LastIndexOfValueImpl(Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from) {
+ DisallowHeapAllocation no_gc;
+ JSTypedArray typed_array = JSTypedArray::cast(*receiver);
+
+ DCHECK(!typed_array.WasDetached());
+
+ ElementType typed_search_value;
+
+ ElementType* data_ptr =
+ reinterpret_cast<ElementType*>(typed_array.DataPtr());
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (!value->IsBigInt()) return Just<int64_t>(-1);
+ bool lossless;
+ typed_search_value = FromHandle(value, &lossless);
+ if (!lossless) return Just<int64_t>(-1);
+ } else {
+ if (!value->IsNumber()) return Just<int64_t>(-1);
+ double search_value = value->Number();
+ if (!std::isfinite(search_value)) {
+ if (std::is_integral<ElementType>::value) {
+ // Integral types cannot represent +Inf or NaN.
+ return Just<int64_t>(-1);
+ } else if (std::isnan(search_value)) {
+ // Strict Equality Comparison of NaN is always false.
+ return Just<int64_t>(-1);
+ }
+ } else if (search_value < std::numeric_limits<ElementType>::lowest() ||
+ search_value > std::numeric_limits<ElementType>::max()) {
+ // Return -1 if value can't be represented in this ElementsKind.
+ return Just<int64_t>(-1);
+ }
+ typed_search_value = static_cast<ElementType>(search_value);
+ if (static_cast<double>(typed_search_value) != search_value) {
+ return Just<int64_t>(-1); // Loss of precision.
+ }
+ }
+
+ DCHECK_LT(start_from, typed_array.length());
+ uint32_t k = start_from;
+ do {
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ if (elem_k == typed_search_value) return Just<int64_t>(k);
+ } while (k-- != 0);
+ return Just<int64_t>(-1);
+ }
+
+ static void ReverseImpl(JSObject receiver) {
+ DisallowHeapAllocation no_gc;
+ JSTypedArray typed_array = JSTypedArray::cast(receiver);
+
+ DCHECK(!typed_array.WasDetached());
+
+ size_t len = typed_array.length();
+ if (len == 0) return;
+
+ ElementType* data = static_cast<ElementType*>(typed_array.DataPtr());
+ if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
+ // TODO(ishell, v8:8875): See UnalignedSlot<T> for details.
+ std::reverse(UnalignedSlot<ElementType>(data),
+ UnalignedSlot<ElementType>(data + len));
+ } else {
+ std::reverse(data, data + len);
+ }
+ }
+
+ static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t length) {
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object);
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
+ for (uint32_t i = 0; i < length; i++) {
+ Handle<Object> value = AccessorClass::GetInternalImpl(typed_array, i);
+ result->set(i, *value);
+ }
+ return result;
+ }
+
+ static void CopyTypedArrayElementsSliceImpl(JSTypedArray source,
+ JSTypedArray destination,
+ size_t start, size_t end) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_EQ(destination.GetElementsKind(), AccessorClass::kind());
+ CHECK(!source.WasDetached());
+ CHECK(!destination.WasDetached());
+ DCHECK_LE(start, end);
+ DCHECK_LE(end, source.length());
+ size_t count = end - start;
+ DCHECK_LE(count, destination.length());
+ ElementType* dest_data = static_cast<ElementType*>(destination.DataPtr());
+ switch (source.GetElementsKind()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: { \
+ ctype* source_data = reinterpret_cast<ctype*>(source.DataPtr()) + start; \
+ CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>(source_data, dest_data, \
+ count); \
+ break; \
+ }
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ static bool HasSimpleRepresentation(ExternalArrayType type) {
+ return !(type == kExternalFloat32Array || type == kExternalFloat64Array ||
+ type == kExternalUint8ClampedArray);
+ }
+
+ template <ElementsKind SourceKind, typename SourceElementType>
+ static void CopyBetweenBackingStores(SourceElementType* source_data_ptr,
+ ElementType* dest_data_ptr,
+ size_t length) {
+ DisallowHeapAllocation no_gc;
+ for (size_t i = 0; i < length; i++) {
+ // We use scalar accessors to avoid boxing/unboxing, so there are no
+ // allocations.
+ SourceElementType source_elem =
+ TypedElementsAccessor<SourceKind, SourceElementType>::GetImpl(
+ source_data_ptr, i);
+ ElementType dest_elem = FromScalar(source_elem);
+ SetImpl(dest_data_ptr, i, dest_elem);
+ }
+ }
+
+ static void CopyElementsFromTypedArray(JSTypedArray source,
+ JSTypedArray destination,
+ size_t length, uint32_t offset) {
+ // The source is a typed array, so we know we don't need to do ToNumber
+ // side-effects, as the source elements will always be a number.
+ DisallowHeapAllocation no_gc;
+
+ CHECK(!source.WasDetached());
+ CHECK(!destination.WasDetached());
+
+ DCHECK_LE(offset, destination.length());
+ DCHECK_LE(length, destination.length() - offset);
+ DCHECK_LE(length, source.length());
+
+ ExternalArrayType source_type = source.type();
+ ExternalArrayType destination_type = destination.type();
+
+ bool same_type = source_type == destination_type;
+ bool same_size = source.element_size() == destination.element_size();
+ bool both_are_simple = HasSimpleRepresentation(source_type) &&
+ HasSimpleRepresentation(destination_type);
+
+ uint8_t* source_data = static_cast<uint8_t*>(source.DataPtr());
+ uint8_t* dest_data = static_cast<uint8_t*>(destination.DataPtr());
+ size_t source_byte_length = source.byte_length();
+ size_t dest_byte_length = destination.byte_length();
+
+ // We can simply copy the backing store if the types are the same, or if
+ // we are converting e.g. Uint8 <-> Int8, as the binary representation
+ // will be the same. This is not the case for floats or clamped Uint8,
+ // which have special conversion operations.
+ if (same_type || (same_size && both_are_simple)) {
+ size_t element_size = source.element_size();
+ std::memmove(dest_data + offset * element_size, source_data,
+ length * element_size);
+ } else {
+ std::unique_ptr<uint8_t[]> cloned_source_elements;
+
+ // If the typedarrays are overlapped, clone the source.
+ if (dest_data + dest_byte_length > source_data &&
+ source_data + source_byte_length > dest_data) {
+ cloned_source_elements.reset(new uint8_t[source_byte_length]);
+ std::memcpy(cloned_source_elements.get(), source_data,
+ source_byte_length);
+ source_data = cloned_source_elements.get();
+ }
+
+ switch (source.GetElementsKind()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>( \
+ reinterpret_cast<ctype*>(source_data), \
+ reinterpret_cast<ElementType*>(dest_data) + offset, length); \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ default:
+ UNREACHABLE();
+ break;
+ }
+#undef TYPED_ARRAY_CASE
+ }
+ }
+
+ static bool HoleyPrototypeLookupRequired(Isolate* isolate, Context context,
+ JSArray source) {
+ DisallowHeapAllocation no_gc;
+ DisallowJavascriptExecution no_js(isolate);
+
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
+ if (isolate->force_slow_path()) return true;
+#endif
+
+ Object source_proto = source.map().prototype();
+
+ // Null prototypes are OK - we don't need to do prototype chain lookups on
+ // them.
+ if (source_proto.IsNull(isolate)) return false;
+ if (source_proto.IsJSProxy()) return true;
+ if (!context.native_context().is_initial_array_prototype(
+ JSObject::cast(source_proto))) {
+ return true;
+ }
+
+ return !isolate->IsNoElementsProtectorIntact(context);
+ }
+
+ static bool TryCopyElementsFastNumber(Context context, JSArray source,
+ JSTypedArray destination, size_t length,
+ uint32_t offset) {
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) return false;
+ Isolate* isolate = source.GetIsolate();
+ DisallowHeapAllocation no_gc;
+ DisallowJavascriptExecution no_js(isolate);
+
+ CHECK(!destination.WasDetached());
+
+ size_t current_length;
+ DCHECK(source.length().IsNumber() &&
+ TryNumberToSize(source.length(), &current_length) &&
+ length <= current_length);
+ USE(current_length);
+
+ size_t dest_length = destination.length();
+ DCHECK(length + offset <= dest_length);
+ USE(dest_length);
+
+ ElementsKind kind = source.GetElementsKind();
+
+ // When we find the hole, we normally have to look up the element on the
+ // prototype chain, which is not handled here and we return false instead.
+ // When the array has the original array prototype, and that prototype has
+ // not been changed in a way that would affect lookups, we can just convert
+ // the hole into undefined.
+ if (HoleyPrototypeLookupRequired(isolate, context, source)) return false;
+
+ Oddball undefined = ReadOnlyRoots(isolate).undefined_value();
+ ElementType* dest_data =
+ reinterpret_cast<ElementType*>(destination.DataPtr()) + offset;
+
+ // Fast-path for packed Smi kind.
+ if (kind == PACKED_SMI_ELEMENTS) {
+ FixedArray source_store = FixedArray::cast(source.elements());
+
+ for (uint32_t i = 0; i < length; i++) {
+ Object elem = source_store.get(i);
+ SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem)));
+ }
+ return true;
+ } else if (kind == HOLEY_SMI_ELEMENTS) {
+ FixedArray source_store = FixedArray::cast(source.elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (source_store.is_the_hole(isolate, i)) {
+ SetImpl(dest_data, i, FromObject(undefined));
+ } else {
+ Object elem = source_store.get(i);
+ SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem)));
+ }
+ }
+ return true;
+ } else if (kind == PACKED_DOUBLE_ELEMENTS) {
+ // Fast-path for packed double kind. We avoid boxing and then immediately
+ // unboxing the double here by using get_scalar.
+ FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements());
+
+ for (uint32_t i = 0; i < length; i++) {
+ // Use the from_double conversion for this specific TypedArray type,
+ // rather than relying on C++ to convert elem.
+ double elem = source_store.get_scalar(i);
+ SetImpl(dest_data, i, FromScalar(elem));
+ }
+ return true;
+ } else if (kind == HOLEY_DOUBLE_ELEMENTS) {
+ FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (source_store.is_the_hole(i)) {
+ SetImpl(dest_data, i, FromObject(undefined));
+ } else {
+ double elem = source_store.get_scalar(i);
+ SetImpl(dest_data, i, FromScalar(elem));
+ }
+ }
+ return true;
+ }
+ return false;
+ }
+
+ static Object CopyElementsHandleSlow(Handle<Object> source,
+ Handle<JSTypedArray> destination,
+ size_t length, uint32_t offset) {
+ Isolate* isolate = destination->GetIsolate();
+ for (size_t i = 0; i < length; i++) {
+ Handle<Object> elem;
+ if (i <= kMaxUInt32) {
+ LookupIterator it(isolate, source, static_cast<uint32_t>(i));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ Object::GetProperty(&it));
+ } else {
+ char buffer[kDoubleToCStringMinBufferSize];
+ Vector<char> string(buffer, arraysize(buffer));
+ DoubleToCString(static_cast<double>(i), string);
+ Handle<Name> name = isolate->factory()->InternalizeUtf8String(string);
+ LookupIterator it(isolate, source, name);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ Object::GetProperty(&it));
+ }
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ BigInt::FromObject(isolate, elem));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ Object::ToNumber(isolate, elem));
+ }
+
+ if (V8_UNLIKELY(destination->WasDetached())) {
+ const char* op = "set";
+ const MessageTemplate message = MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(op);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError(message, operation));
+ }
+ // The spec says we store the length, then get each element, so we don't
+ // need to check changes to length.
+ // TODO(bmeurer, v8:4153): Remove this static_cast.
+ SetImpl(destination, static_cast<uint32_t>(offset + i), *elem);
+ }
+ return *isolate->factory()->undefined_value();
+ }
+
+ // This doesn't guarantee that the destination array will be completely
+ // filled. The caller must do this by passing a source with equal length, if
+ // that is required.
+ static Object CopyElementsHandleImpl(Handle<Object> source,
+ Handle<JSObject> destination,
+ size_t length, uint32_t offset) {
+ Isolate* isolate = destination->GetIsolate();
+ Handle<JSTypedArray> destination_ta =
+ Handle<JSTypedArray>::cast(destination);
+ DCHECK_LE(offset + length, destination_ta->length());
+ CHECK(!destination_ta->WasDetached());
+
+ if (length == 0) return *isolate->factory()->undefined_value();
+
+ // All conversions from TypedArrays can be done without allocation.
+ if (source->IsJSTypedArray()) {
+ Handle<JSTypedArray> source_ta = Handle<JSTypedArray>::cast(source);
+ ElementsKind source_kind = source_ta->GetElementsKind();
+ bool source_is_bigint =
+ source_kind == BIGINT64_ELEMENTS || source_kind == BIGUINT64_ELEMENTS;
+ bool target_is_bigint =
+ Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS;
+ // If we have to copy more elements than we have in the source, we need to
+ // do special handling and conversion; that happens in the slow case.
+ if (source_is_bigint == target_is_bigint && !source_ta->WasDetached() &&
+ length + offset <= source_ta->length()) {
+ CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset);
+ return *isolate->factory()->undefined_value();
+ }
+ } else if (source->IsJSArray()) {
+ // Fast cases for packed numbers kinds where we don't need to allocate.
+ Handle<JSArray> source_js_array = Handle<JSArray>::cast(source);
+ size_t current_length;
+ if (source_js_array->length().IsNumber() &&
+ TryNumberToSize(source_js_array->length(), &current_length)) {
+ if (length <= current_length) {
+ Handle<JSArray> source_array = Handle<JSArray>::cast(source);
+ if (TryCopyElementsFastNumber(isolate->context(), *source_array,
+ *destination_ta, length, offset)) {
+ return *isolate->factory()->undefined_value();
+ }
+ }
+ }
+ }
+ // Final generic case that handles prototype chain lookups, getters, proxies
+ // and observable side effects via valueOf, etc.
+ return CopyElementsHandleSlow(source, destination_ta, length, offset);
+ }
+};
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<INT8_ELEMENTS, int8_t>::ToHandle(
+ Isolate* isolate, int8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<UINT8_ELEMENTS, uint8_t>::ToHandle(
+ Isolate* isolate, uint8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<INT16_ELEMENTS, int16_t>::ToHandle(
+ Isolate* isolate, int16_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<UINT16_ELEMENTS, uint16_t>::ToHandle(
+ Isolate* isolate, uint16_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<INT32_ELEMENTS, int32_t>::ToHandle(
+ Isolate* isolate, int32_t value) {
+ return isolate->factory()->NewNumberFromInt(value);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<UINT32_ELEMENTS, uint32_t>::ToHandle(
+ Isolate* isolate, uint32_t value) {
+ return isolate->factory()->NewNumberFromUint(value);
+}
+
+// static
+template <>
+float TypedElementsAccessor<FLOAT32_ELEMENTS, float>::FromScalar(double value) {
+ using limits = std::numeric_limits<float>;
+ if (value > limits::max()) return limits::infinity();
+ if (value < limits::lowest()) return -limits::infinity();
+ return static_cast<float>(value);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<FLOAT32_ELEMENTS, float>::ToHandle(
+ Isolate* isolate, float value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+// static
+template <>
+double TypedElementsAccessor<FLOAT64_ELEMENTS, double>::FromScalar(
+ double value) {
+ return value;
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<FLOAT64_ELEMENTS, double>::ToHandle(
+ Isolate* isolate, double value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<UINT8_CLAMPED_ELEMENTS, uint8_t>::FromScalar(
+ int value) {
+ if (value < 0x00) return 0x00;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<UINT8_CLAMPED_ELEMENTS, uint8_t>::FromScalar(
+ uint32_t value) {
+ // We need this special case for Uint32 -> Uint8Clamped, because the highest
+ // Uint32 values will be negative as an int, clamping to 0, rather than 255.
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<UINT8_CLAMPED_ELEMENTS, uint8_t>::FromScalar(
+ double value) {
+ // Handle NaNs and less than zero values which clamp to zero.
+ if (!(value > 0)) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(lrint(value));
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<UINT8_CLAMPED_ELEMENTS, uint8_t>::ToHandle(
+ Isolate* isolate, uint8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ int value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ uint32_t value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ double value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ int64_t value) {
+ return value;
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ uint64_t value) {
+ return static_cast<int64_t>(value);
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromObject(
+ Object value, bool* lossless) {
+ return BigInt::cast(value).AsInt64(lossless);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::ToHandle(
+ Isolate* isolate, int64_t value) {
+ return BigInt::FromInt64(isolate, value);
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromScalar(
+ int value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromScalar(
+ uint32_t value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromScalar(
+ double value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromScalar(
+ int64_t value) {
+ return static_cast<uint64_t>(value);
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromScalar(
+ uint64_t value) {
+ return value;
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromObject(
+ Object value, bool* lossless) {
+ return BigInt::cast(value).AsUint64(lossless);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::ToHandle(
+ Isolate* isolate, uint64_t value) {
+ return BigInt::FromUint64(isolate, value);
+}
+
+#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype) \
+ using Type##ElementsAccessor = TypedElementsAccessor<TYPE##_ELEMENTS, ctype>;
+TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
+#undef FIXED_ELEMENTS_ACCESSOR
+
+template <typename Subclass, typename ArgumentsAccessor, typename KindTraits>
+class SloppyArgumentsElementsAccessor
+ : public ElementsAccessorBase<Subclass, KindTraits> {
+ public:
+ static void ConvertArgumentsStoreResult(
+ Handle<SloppyArgumentsElements> elements, Handle<Object> result) {
+ UNREACHABLE();
+ }
+
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase parameters,
+ uint32_t entry) {
+ Handle<SloppyArgumentsElements> elements(
+ SloppyArgumentsElements::cast(parameters), isolate);
+ uint32_t length = elements->parameter_map_length();
+ if (entry < length) {
+ // Read context mapped entry.
+ DisallowHeapAllocation no_gc;
+ Object probe = elements->get_mapped_entry(entry);
+ DCHECK(!probe.IsTheHole(isolate));
+ Context context = elements->context();
+ int context_entry = Smi::ToInt(probe);
+ DCHECK(!context.get(context_entry).IsTheHole(isolate));
+ return handle(context.get(context_entry), isolate);
+ } else {
+ // Entry is not context mapped, defer to the arguments.
+ Handle<Object> result = ArgumentsAccessor::GetImpl(
+ isolate, elements->arguments(), entry - length);
+ return Subclass::ConvertArgumentsStoreResult(isolate, elements, result);
+ }
+ }
+
+ static void TransitionElementsKindImpl(Handle<JSObject> object,
+ Handle<Map> map) {
+ UNREACHABLE();
+ }
+
+ static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
+ UNREACHABLE();
+ }
+
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ Object value) {
+ SetImpl(holder->elements(), entry, value);
+ }
+
+ static inline void SetImpl(FixedArrayBase store, uint32_t entry,
+ Object value) {
+ SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
+ uint32_t length = elements.parameter_map_length();
+ if (entry < length) {
+ // Store context mapped entry.
+ DisallowHeapAllocation no_gc;
+ Object probe = elements.get_mapped_entry(entry);
+ DCHECK(!probe.IsTheHole());
+ Context context = elements.context();
+ int context_entry = Smi::ToInt(probe);
+ DCHECK(!context.get(context_entry).IsTheHole());
+ context.set(context_entry, value);
+ } else {
+ // Entry is not context mapped defer to arguments.
+ FixedArray arguments = elements.arguments();
+ Object current = ArgumentsAccessor::GetRaw(arguments, entry - length);
+ if (current.IsAliasedArgumentsEntry()) {
+ AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(current);
+ Context context = elements.context();
+ int context_entry = alias.aliased_context_slot();
+ DCHECK(!context.get(context_entry).IsTheHole());
+ context.set(context_entry, value);
+ } else {
+ ArgumentsAccessor::SetImpl(arguments, entry - length, value);
+ }
+ }
+ }
+
+ static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> parameter_map) {
+ // Sloppy arguments objects are not arrays.
+ UNREACHABLE();
+ }
+
+ static uint32_t GetCapacityImpl(JSObject holder, FixedArrayBase store) {
+ SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
+ FixedArray arguments = elements.arguments();
+ return elements.parameter_map_length() +
+ ArgumentsAccessor::GetCapacityImpl(holder, arguments);
+ }
+
+ static uint32_t GetMaxNumberOfEntries(JSObject holder,
+ FixedArrayBase backing_store) {
+ SloppyArgumentsElements elements =
+ SloppyArgumentsElements::cast(backing_store);
+ FixedArrayBase arguments = elements.arguments();
+ return elements.parameter_map_length() +
+ ArgumentsAccessor::GetMaxNumberOfEntries(holder, arguments);
+ }
+
+ static uint32_t NumberOfElementsImpl(JSObject receiver,
+ FixedArrayBase backing_store) {
+ Isolate* isolate = receiver.GetIsolate();
+ SloppyArgumentsElements elements =
+ SloppyArgumentsElements::cast(backing_store);
+ FixedArrayBase arguments = elements.arguments();
+ uint32_t nof_elements = 0;
+ uint32_t length = elements.parameter_map_length();
+ for (uint32_t entry = 0; entry < length; entry++) {
+ if (HasParameterMapArg(isolate, elements, entry)) nof_elements++;
+ }
+ return nof_elements +
+ ArgumentsAccessor::NumberOfElementsImpl(receiver, arguments);
+ }
+
+ static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) {
+ Isolate* isolate = accumulator->isolate();
+ Handle<FixedArrayBase> elements(receiver->elements(), isolate);
+ uint32_t length = GetCapacityImpl(*receiver, *elements);
+ for (uint32_t entry = 0; entry < length; entry++) {
+ if (!HasEntryImpl(isolate, *elements, entry)) continue;
+ Handle<Object> value = GetImpl(isolate, *elements, entry);
+ accumulator->AddKey(value, convert);
+ }
+ }
+
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase parameters,
+ uint32_t entry) {
+ SloppyArgumentsElements elements =
+ SloppyArgumentsElements::cast(parameters);
+ uint32_t length = elements.parameter_map_length();
+ if (entry < length) {
+ return HasParameterMapArg(isolate, elements, entry);
+ }
+ FixedArrayBase arguments = elements.arguments();
+ return ArgumentsAccessor::HasEntryImpl(isolate, arguments, entry - length);
+ }
+
+ static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
+ SloppyArgumentsElements elements =
+ SloppyArgumentsElements::cast(backing_store);
+ FixedArray arguments = elements.arguments();
+ return ArgumentsAccessor::HasAccessorsImpl(holder, arguments);
+ }
+
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase parameters,
+ uint32_t entry) {
+ SloppyArgumentsElements elements =
+ SloppyArgumentsElements::cast(parameters);
+ uint32_t length = elements.parameter_map_length();
+ if (entry < length) return entry;
+ FixedArray arguments = elements.arguments();
+ return ArgumentsAccessor::GetIndexForEntryImpl(arguments, entry - length);
+ }
+
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase parameters,
+ uint32_t index, PropertyFilter filter) {
+ SloppyArgumentsElements elements =
+ SloppyArgumentsElements::cast(parameters);
+ if (HasParameterMapArg(isolate, elements, index)) return index;
+ FixedArray arguments = elements.arguments();
+ uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(
+ isolate, holder, arguments, index, filter);
+ if (entry == kMaxUInt32) return kMaxUInt32;
+ // Arguments entries could overlap with the dictionary entries, hence offset
+ // them by the number of context mapped entries.
+ return elements.parameter_map_length() + entry;
+ }
+
+ static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ SloppyArgumentsElements elements =
+ SloppyArgumentsElements::cast(holder.elements());
+ uint32_t length = elements.parameter_map_length();
+ if (entry < length) {
+ return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
+ }
+ FixedArray arguments = elements.arguments();
+ return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
+ }
+
+ static bool HasParameterMapArg(Isolate* isolate,
+ SloppyArgumentsElements elements,
+ uint32_t index) {
+ uint32_t length = elements.parameter_map_length();
+ if (index >= length) return false;
+ return !elements.get_mapped_entry(index).IsTheHole(isolate);
+ }
+
+ static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ Handle<SloppyArgumentsElements> elements(
+ SloppyArgumentsElements::cast(obj->elements()), obj->GetIsolate());
+ uint32_t length = elements->parameter_map_length();
+ uint32_t delete_or_entry = entry;
+ if (entry < length) {
+ delete_or_entry = kMaxUInt32;
+ }
+ Subclass::SloppyDeleteImpl(obj, elements, delete_or_entry);
+ // SloppyDeleteImpl allocates a new dictionary elements store. For making
+ // heap verification happy we postpone clearing out the mapped entry.
+ if (entry < length) {
+ elements->set_mapped_entry(entry,
+ obj->GetReadOnlyRoots().the_hole_value());
+ }
+ }
+
+ static void SloppyDeleteImpl(Handle<JSObject> obj,
+ Handle<SloppyArgumentsElements> elements,
+ uint32_t entry) {
+ // Implemented in subclasses.
+ UNREACHABLE();
+ }
+
+ static void CollectElementIndicesImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys) {
+ Isolate* isolate = keys->isolate();
+ uint32_t nof_indices = 0;
+ Handle<FixedArray> indices = isolate->factory()->NewFixedArray(
+ GetCapacityImpl(*object, *backing_store));
+ DirectCollectElementIndicesImpl(isolate, object, backing_store,
+ GetKeysConversion::kKeepNumbers,
+ ENUMERABLE_STRINGS, indices, &nof_indices);
+ SortIndices(isolate, indices, nof_indices);
+ for (uint32_t i = 0; i < nof_indices; i++) {
+ keys->AddKey(indices->get(i));
+ }
+ }
+
+ static Handle<FixedArray> DirectCollectElementIndicesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
+ PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
+ uint32_t insertion_index = 0) {
+ Handle<SloppyArgumentsElements> elements =
+ Handle<SloppyArgumentsElements>::cast(backing_store);
+ uint32_t length = elements->parameter_map_length();
+
+ for (uint32_t i = 0; i < length; ++i) {
+ if (elements->get_mapped_entry(i).IsTheHole(isolate)) continue;
+ if (convert == GetKeysConversion::kConvertToString) {
+ Handle<String> index_string = isolate->factory()->Uint32ToString(i);
+ list->set(insertion_index, *index_string);
+ } else {
+ list->set(insertion_index, Smi::FromInt(i));
+ }
+ insertion_index++;
+ }
+
+ Handle<FixedArray> store(elements->arguments(), isolate);
+ return ArgumentsAccessor::DirectCollectElementIndicesImpl(
+ isolate, object, store, convert, filter, list, nof_indices,
+ insertion_index);
+ }
+
+ static Maybe<bool> IncludesValueImpl(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
+ Handle<Map> original_map(object->map(), isolate);
+ Handle<SloppyArgumentsElements> elements(
+ SloppyArgumentsElements::cast(object->elements()), isolate);
+ bool search_for_hole = value->IsUndefined(isolate);
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(object->map(), *original_map);
+ uint32_t entry =
+ GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES);
+ if (entry == kMaxUInt32) {
+ if (search_for_hole) return Just(true);
+ continue;
+ }
+
+ Handle<Object> element_k = Subclass::GetImpl(isolate, *elements, entry);
+
+ if (element_k->IsAccessorPair()) {
+ LookupIterator it(isolate, object, k, LookupIterator::OWN);
+ DCHECK(it.IsFound());
+ DCHECK_EQ(it.state(), LookupIterator::ACCESSOR);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k,
+ Object::GetPropertyWithAccessor(&it),
+ Nothing<bool>());
+
+ if (value->SameValueZero(*element_k)) return Just(true);
+
+ if (object->map() != *original_map) {
+ // Some mutation occurred in accessor. Abort "fast" path
+ return IncludesValueSlowPath(isolate, object, value, k + 1, length);
+ }
+ } else if (value->SameValueZero(*element_k)) {
+ return Just(true);
+ }
+ }
+ return Just(false);
+ }
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
+ Handle<Map> original_map(object->map(), isolate);
+ Handle<SloppyArgumentsElements> elements(
+ SloppyArgumentsElements::cast(object->elements()), isolate);
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(object->map(), *original_map);
+ uint32_t entry =
+ GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES);
+ if (entry == kMaxUInt32) {
+ continue;
+ }
+
+ Handle<Object> element_k = Subclass::GetImpl(isolate, *elements, entry);
+
+ if (element_k->IsAccessorPair()) {
+ LookupIterator it(isolate, object, k, LookupIterator::OWN);
+ DCHECK(it.IsFound());
+ DCHECK_EQ(it.state(), LookupIterator::ACCESSOR);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k,
+ Object::GetPropertyWithAccessor(&it),
+ Nothing<int64_t>());
+
+ if (value->StrictEquals(*element_k)) {
+ return Just<int64_t>(k);
+ }
+
+ if (object->map() != *original_map) {
+ // Some mutation occurred in accessor. Abort "fast" path.
+ return IndexOfValueSlowPath(isolate, object, value, k + 1, length);
+ }
+ } else if (value->StrictEquals(*element_k)) {
+ return Just<int64_t>(k);
+ }
+ }
+ return Just<int64_t>(-1);
+ }
+};
+
+class SlowSloppyArgumentsElementsAccessor
+ : public SloppyArgumentsElementsAccessor<
+ SlowSloppyArgumentsElementsAccessor, DictionaryElementsAccessor,
+ ElementsKindTraits<SLOW_SLOPPY_ARGUMENTS_ELEMENTS>> {
+ public:
+ static Handle<Object> ConvertArgumentsStoreResult(
+ Isolate* isolate, Handle<SloppyArgumentsElements> elements,
+ Handle<Object> result) {
+ // Elements of the arguments object in slow mode might be slow aliases.
+ if (result->IsAliasedArgumentsEntry()) {
+ DisallowHeapAllocation no_gc;
+ AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(*result);
+ Context context = elements->context();
+ int context_entry = alias.aliased_context_slot();
+ DCHECK(!context.get(context_entry).IsTheHole(isolate));
+ return handle(context.get(context_entry), isolate);
+ }
+ return result;
+ }
+ static void SloppyDeleteImpl(Handle<JSObject> obj,
+ Handle<SloppyArgumentsElements> elements,
+ uint32_t entry) {
+ // No need to delete a context mapped entry from the arguments elements.
+ if (entry == kMaxUInt32) return;
+ Isolate* isolate = obj->GetIsolate();
+ Handle<NumberDictionary> dict(NumberDictionary::cast(elements->arguments()),
+ isolate);
+ int length = elements->parameter_map_length();
+ dict = NumberDictionary::DeleteEntry(isolate, dict, entry - length);
+ elements->set_arguments(*dict);
+ }
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<SloppyArgumentsElements> elements(
+ SloppyArgumentsElements::cast(object->elements()), isolate);
+ Handle<FixedArrayBase> old_arguments(
+ FixedArrayBase::cast(elements->arguments()), isolate);
+ Handle<NumberDictionary> dictionary =
+ old_arguments->IsNumberDictionary()
+ ? Handle<NumberDictionary>::cast(old_arguments)
+ : JSObject::NormalizeElements(object);
+ PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
+ Handle<NumberDictionary> new_dictionary =
+ NumberDictionary::Add(isolate, dictionary, index, value, details);
+ if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
+ if (*dictionary != *new_dictionary) {
+ elements->set_arguments(*new_dictionary);
+ }
+ }
+
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<SloppyArgumentsElements> elements =
+ Handle<SloppyArgumentsElements>::cast(store);
+ uint32_t length = elements->parameter_map_length();
+ if (entry < length) {
+ Object probe = elements->get_mapped_entry(entry);
+ DCHECK(!probe.IsTheHole(isolate));
+ Context context = elements->context();
+ int context_entry = Smi::ToInt(probe);
+ DCHECK(!context.get(context_entry).IsTheHole(isolate));
+ context.set(context_entry, *value);
+
+ // Redefining attributes of an aliased element destroys fast aliasing.
+ elements->set_mapped_entry(entry,
+ ReadOnlyRoots(isolate).the_hole_value());
+ // For elements that are still writable we re-establish slow aliasing.
+ if ((attributes & READ_ONLY) == 0) {
+ value = isolate->factory()->NewAliasedArgumentsEntry(context_entry);
+ }
+
+ PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
+ Handle<NumberDictionary> arguments(
+ NumberDictionary::cast(elements->arguments()), isolate);
+ arguments =
+ NumberDictionary::Add(isolate, arguments, entry, value, details);
+ // If the attributes were NONE, we would have called set rather than
+ // reconfigure.
+ DCHECK_NE(NONE, attributes);
+ object->RequireSlowElements(*arguments);
+ elements->set_arguments(*arguments);
+ } else {
+ Handle<FixedArrayBase> arguments(elements->arguments(), isolate);
+ DictionaryElementsAccessor::ReconfigureImpl(
+ object, arguments, entry - length, value, attributes);
+ }
+ }
+};
+
+class FastSloppyArgumentsElementsAccessor
+ : public SloppyArgumentsElementsAccessor<
+ FastSloppyArgumentsElementsAccessor, FastHoleyObjectElementsAccessor,
+ ElementsKindTraits<FAST_SLOPPY_ARGUMENTS_ELEMENTS>> {
+ public:
+ static Handle<Object> ConvertArgumentsStoreResult(
+ Isolate* isolate, Handle<SloppyArgumentsElements> paramtere_map,
+ Handle<Object> result) {
+ DCHECK(!result->IsAliasedArgumentsEntry());
+ return result;
+ }
+
+ static Handle<FixedArray> GetArguments(Isolate* isolate,
+ FixedArrayBase store) {
+ SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
+ return Handle<FixedArray>(elements.arguments(), isolate);
+ }
+
+ static Handle<NumberDictionary> NormalizeImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> elements) {
+ Handle<FixedArray> arguments =
+ GetArguments(object->GetIsolate(), *elements);
+ return FastHoleyObjectElementsAccessor::NormalizeImpl(object, arguments);
+ }
+
+ static Handle<NumberDictionary> NormalizeArgumentsElements(
+ Handle<JSObject> object, Handle<SloppyArgumentsElements> elements,
+ uint32_t* entry) {
+ Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(object);
+ elements->set_arguments(*dictionary);
+ // kMaxUInt32 indicates that a context mapped element got deleted. In this
+ // case we only normalize the elements (aka. migrate to SLOW_SLOPPY).
+ if (*entry == kMaxUInt32) return dictionary;
+ uint32_t length = elements->parameter_map_length();
+ if (*entry >= length) {
+ *entry =
+ dictionary->FindEntry(object->GetIsolate(), *entry - length) + length;
+ }
+ return dictionary;
+ }
+
+ static void SloppyDeleteImpl(Handle<JSObject> obj,
+ Handle<SloppyArgumentsElements> elements,
+ uint32_t entry) {
+ // Always normalize element on deleting an entry.
+ NormalizeArgumentsElements(obj, elements, &entry);
+ SlowSloppyArgumentsElementsAccessor::SloppyDeleteImpl(obj, elements, entry);
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ DCHECK_EQ(NONE, attributes);
+ Isolate* isolate = object->GetIsolate();
+ Handle<SloppyArgumentsElements> elements(
+ SloppyArgumentsElements::cast(object->elements()), isolate);
+ Handle<FixedArray> old_arguments(elements->arguments(), isolate);
+ if (old_arguments->IsNumberDictionary() ||
+ static_cast<uint32_t>(old_arguments->length()) < new_capacity) {
+ GrowCapacityAndConvertImpl(object, new_capacity);
+ }
+ FixedArray arguments = elements->arguments();
+ // For fast holey objects, the entry equals the index. The code above made
+ // sure that there's enough space to store the value. We cannot convert
+ // index to entry explicitly since the slot still contains the hole, so the
+ // current EntryForIndex would indicate that it is "absent" by returning
+ // kMaxUInt32.
+ FastHoleyObjectElementsAccessor::SetImpl(arguments, index, *value);
+ }
+
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ DCHECK_EQ(object->elements(), *store);
+ Handle<SloppyArgumentsElements> elements(
+ SloppyArgumentsElements::cast(*store), object->GetIsolate());
+ NormalizeArgumentsElements(object, elements, &entry);
+ SlowSloppyArgumentsElementsAccessor::ReconfigureImpl(object, store, entry,
+ value, attributes);
+ }
+
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
+ DCHECK(!to.IsNumberDictionary());
+ if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
+ CopyDictionaryToObjectElements(isolate, from, from_start, to,
+ HOLEY_ELEMENTS, to_start, copy_size);
+ } else {
+ DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, from_kind);
+ CopyObjectToObjectElements(isolate, from, HOLEY_ELEMENTS, from_start, to,
+ HOLEY_ELEMENTS, to_start, copy_size);
+ }
+ }
+
+ static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<SloppyArgumentsElements> elements(
+ SloppyArgumentsElements::cast(object->elements()), isolate);
+ Handle<FixedArray> old_arguments(FixedArray::cast(elements->arguments()),
+ isolate);
+ ElementsKind from_kind = object->GetElementsKind();
+ // This method should only be called if there's a reason to update the
+ // elements.
+ DCHECK(from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS ||
+ static_cast<uint32_t>(old_arguments->length()) < capacity);
+ Handle<FixedArrayBase> arguments =
+ ConvertElementsWithCapacity(object, old_arguments, from_kind, capacity);
+ Handle<Map> new_map = JSObject::GetElementsTransitionMap(
+ object, FAST_SLOPPY_ARGUMENTS_ELEMENTS);
+ JSObject::MigrateToMap(object, new_map);
+ elements->set_arguments(FixedArray::cast(*arguments));
+ JSObject::ValidateElements(*object);
+ }
+};
+
+template <typename Subclass, typename BackingStoreAccessor, typename KindTraits>
+class StringWrapperElementsAccessor
+ : public ElementsAccessorBase<Subclass, KindTraits> {
+ public:
+ static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
+ uint32_t entry) {
+ return GetImpl(holder, entry);
+ }
+
+ static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+ Isolate* isolate = holder->GetIsolate();
+ Handle<String> string(GetString(*holder), isolate);
+ uint32_t length = static_cast<uint32_t>(string->length());
+ if (entry < length) {
+ return isolate->factory()->LookupSingleCharacterStringFromCode(
+ String::Flatten(isolate, string)->Get(entry));
+ }
+ return BackingStoreAccessor::GetImpl(isolate, holder->elements(),
+ entry - length);
+ }
+
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase elements,
+ uint32_t entry) {
+ UNREACHABLE();
+ }
+
+ static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ uint32_t length = static_cast<uint32_t>(GetString(holder).length());
+ if (entry < length) {
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ return PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
+ }
+ return BackingStoreAccessor::GetDetailsImpl(holder, entry - length);
+ }
+
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index, PropertyFilter filter) {
+ uint32_t length = static_cast<uint32_t>(GetString(holder).length());
+ if (index < length) return index;
+ uint32_t backing_store_entry = BackingStoreAccessor::GetEntryForIndexImpl(
+ isolate, holder, backing_store, index, filter);
+ if (backing_store_entry == kMaxUInt32) return kMaxUInt32;
+ DCHECK(backing_store_entry < kMaxUInt32 - length);
+ return backing_store_entry + length;
+ }
+
+ static void DeleteImpl(Handle<JSObject> holder, uint32_t entry) {
+ uint32_t length = static_cast<uint32_t>(GetString(*holder).length());
+ if (entry < length) {
+ return; // String contents can't be deleted.
+ }
+ BackingStoreAccessor::DeleteImpl(holder, entry - length);
+ }
+
+ static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object value) {
+ uint32_t length = static_cast<uint32_t>(GetString(*holder).length());
+ if (entry < length) {
+ return; // String contents are read-only.
+ }
+ BackingStoreAccessor::SetImpl(holder->elements(), entry - length, value);
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ DCHECK(index >= static_cast<uint32_t>(GetString(*object).length()));
+ // Explicitly grow fast backing stores if needed. Dictionaries know how to
+ // extend their capacity themselves.
+ if (KindTraits::Kind == FAST_STRING_WRAPPER_ELEMENTS &&
+ (object->GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS ||
+ BackingStoreAccessor::GetCapacityImpl(*object, object->elements()) !=
+ new_capacity)) {
+ GrowCapacityAndConvertImpl(object, new_capacity);
+ }
+ BackingStoreAccessor::AddImpl(object, index, value, attributes,
+ new_capacity);
+ }
+
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ uint32_t length = static_cast<uint32_t>(GetString(*object).length());
+ if (entry < length) {
+ return; // String contents can't be reconfigured.
+ }
+ BackingStoreAccessor::ReconfigureImpl(object, store, entry - length, value,
+ attributes);
+ }
+
+ static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) {
+ Isolate* isolate = receiver->GetIsolate();
+ Handle<String> string(GetString(*receiver), isolate);
+ string = String::Flatten(isolate, string);
+ uint32_t length = static_cast<uint32_t>(string->length());
+ for (uint32_t i = 0; i < length; i++) {
+ accumulator->AddKey(
+ isolate->factory()->LookupSingleCharacterStringFromCode(
+ string->Get(i)),
+ convert);
+ }
+ BackingStoreAccessor::AddElementsToKeyAccumulatorImpl(receiver, accumulator,
+ convert);
+ }
+
+ static void CollectElementIndicesImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys) {
+ uint32_t length = GetString(*object).length();
+ Factory* factory = keys->isolate()->factory();
+ for (uint32_t i = 0; i < length; i++) {
+ keys->AddKey(factory->NewNumberFromUint(i));
+ }
+ BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store,
+ keys);
+ }
+
+ static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
+ Handle<FixedArrayBase> old_elements(object->elements(),
+ object->GetIsolate());
+ ElementsKind from_kind = object->GetElementsKind();
+ if (from_kind == FAST_STRING_WRAPPER_ELEMENTS) {
+ // The optimizing compiler relies on the prototype lookups of String
+ // objects always returning undefined. If there's a store to the
+ // initial String.prototype object, make sure all the optimizations
+ // are invalidated.
+ object->GetIsolate()->UpdateNoElementsProtectorOnSetLength(object);
+ }
+ // This method should only be called if there's a reason to update the
+ // elements.
+ DCHECK(from_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
+ static_cast<uint32_t>(old_elements->length()) < capacity);
+ Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
+ FAST_STRING_WRAPPER_ELEMENTS,
+ capacity);
+ }
+
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
+ DCHECK(!to.IsNumberDictionary());
+ if (from_kind == SLOW_STRING_WRAPPER_ELEMENTS) {
+ CopyDictionaryToObjectElements(isolate, from, from_start, to,
+ HOLEY_ELEMENTS, to_start, copy_size);
+ } else {
+ DCHECK_EQ(FAST_STRING_WRAPPER_ELEMENTS, from_kind);
+ CopyObjectToObjectElements(isolate, from, HOLEY_ELEMENTS, from_start, to,
+ HOLEY_ELEMENTS, to_start, copy_size);
+ }
+ }
+
+ static uint32_t NumberOfElementsImpl(JSObject object,
+ FixedArrayBase backing_store) {
+ uint32_t length = GetString(object).length();
+ return length +
+ BackingStoreAccessor::NumberOfElementsImpl(object, backing_store);
+ }
+
+ private:
+ static String GetString(JSObject holder) {
+ DCHECK(holder.IsJSValue());
+ JSValue js_value = JSValue::cast(holder);
+ DCHECK(js_value.value().IsString());
+ return String::cast(js_value.value());
+ }
+};
+
+class FastStringWrapperElementsAccessor
+ : public StringWrapperElementsAccessor<
+ FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor,
+ ElementsKindTraits<FAST_STRING_WRAPPER_ELEMENTS>> {
+ public:
+ static Handle<NumberDictionary> NormalizeImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> elements) {
+ return FastHoleyObjectElementsAccessor::NormalizeImpl(object, elements);
+ }
+};
+
+class SlowStringWrapperElementsAccessor
+ : public StringWrapperElementsAccessor<
+ SlowStringWrapperElementsAccessor, DictionaryElementsAccessor,
+ ElementsKindTraits<SLOW_STRING_WRAPPER_ELEMENTS>> {
+ public:
+ static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
+ return DictionaryElementsAccessor::HasAccessorsImpl(holder, backing_store);
+ }
+};
+
+} // namespace
+
+MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
+ Arguments* args) {
+ if (args->length() == 0) {
+ // Optimize the case where there are no parameters passed.
+ JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ return array;
+
+ } else if (args->length() == 1 && args->at(0)->IsNumber()) {
+ uint32_t length;
+ if (!args->at(0)->ToArrayLength(&length)) {
+ return ThrowArrayLengthRangeError(array->GetIsolate());
+ }
+
+ // Optimize the case where there is one argument and the argument is a small
+ // smi.
+ if (length > 0 && length < JSArray::kInitialMaxFastElementArray) {
+ ElementsKind elements_kind = array->GetElementsKind();
+ JSArray::Initialize(array, length, length);
+
+ if (!IsHoleyElementsKind(elements_kind)) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ JSObject::TransitionElementsKind(array, elements_kind);
+ }
+ } else if (length == 0) {
+ JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ } else {
+ // Take the argument as the length.
+ JSArray::Initialize(array, 0);
+ JSArray::SetLength(array, length);
+ }
+ return array;
+ }
+
+ Factory* factory = array->GetIsolate()->factory();
+
+ // Set length and elements on the array.
+ int number_of_elements = args->length();
+ JSObject::EnsureCanContainElements(array, args, 0, number_of_elements,
+ ALLOW_CONVERTED_DOUBLE_ELEMENTS);
+
+ // Allocate an appropriately typed elements array.
+ ElementsKind elements_kind = array->GetElementsKind();
+ Handle<FixedArrayBase> elms;
+ if (IsDoubleElementsKind(elements_kind)) {
+ elms = Handle<FixedArrayBase>::cast(
+ factory->NewFixedDoubleArray(number_of_elements));
+ } else {
+ elms = Handle<FixedArrayBase>::cast(
+ factory->NewFixedArrayWithHoles(number_of_elements));
+ }
+
+ // Fill in the content
+ switch (elements_kind) {
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS: {
+ Handle<FixedArray> smi_elms = Handle<FixedArray>::cast(elms);
+ for (int entry = 0; entry < number_of_elements; entry++) {
+ smi_elms->set(entry, (*args)[entry], SKIP_WRITE_BARRIER);
+ }
+ break;
+ }
+ case HOLEY_ELEMENTS:
+ case PACKED_ELEMENTS: {
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ Handle<FixedArray> object_elms = Handle<FixedArray>::cast(elms);
+ for (int entry = 0; entry < number_of_elements; entry++) {
+ object_elms->set(entry, (*args)[entry], mode);
+ }
+ break;
+ }
+ case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
+ Handle<FixedDoubleArray> double_elms =
+ Handle<FixedDoubleArray>::cast(elms);
+ for (int entry = 0; entry < number_of_elements; entry++) {
+ double_elms->set(entry, (*args)[entry].Number());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ array->set_elements(*elms);
+ array->set_length(Smi::FromInt(number_of_elements));
+ return array;
+}
+
+void CopyFastNumberJSArrayElementsToTypedArray(Address raw_context,
+ Address raw_source,
+ Address raw_destination,
+ uintptr_t length,
+ uintptr_t offset) {
+ Context context = Context::cast(Object(raw_context));
+ JSArray source = JSArray::cast(Object(raw_source));
+ JSTypedArray destination = JSTypedArray::cast(Object(raw_destination));
+
+ switch (destination.GetElementsKind()) {
+#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ CHECK(Type##ElementsAccessor::TryCopyElementsFastNumber( \
+ context, source, destination, length, static_cast<uint32_t>(offset))); \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAYS_CASE)
+#undef TYPED_ARRAYS_CASE
+ default:
+ UNREACHABLE();
+ }
+}
+
+void CopyTypedArrayElementsToTypedArray(Address raw_source,
+ Address raw_destination,
+ uintptr_t length, uintptr_t offset) {
+ JSTypedArray source = JSTypedArray::cast(Object(raw_source));
+ JSTypedArray destination = JSTypedArray::cast(Object(raw_destination));
+
+ switch (destination.GetElementsKind()) {
+#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ Type##ElementsAccessor::CopyElementsFromTypedArray( \
+ source, destination, length, static_cast<uint32_t>(offset)); \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAYS_CASE)
+#undef TYPED_ARRAYS_CASE
+ default:
+ UNREACHABLE();
+ }
+}
+
+void CopyTypedArrayElementsSlice(Address raw_source, Address raw_destination,
+ uintptr_t start, uintptr_t end) {
+ JSTypedArray source = JSTypedArray::cast(Object(raw_source));
+ JSTypedArray destination = JSTypedArray::cast(Object(raw_destination));
+
+ destination.GetElementsAccessor()->CopyTypedArrayElementsSlice(
+ source, destination, start, end);
+}
+
+void ElementsAccessor::InitializeOncePerProcess() {
+ static ElementsAccessor* accessor_array[] = {
+#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(),
+ ELEMENTS_LIST(ACCESSOR_ARRAY)
+#undef ACCESSOR_ARRAY
+ };
+
+ STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
+ kElementsKindCount);
+
+ elements_accessors_ = accessor_array;
+}
+
+void ElementsAccessor::TearDown() {
+ if (elements_accessors_ == nullptr) return;
+#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
+ ELEMENTS_LIST(ACCESSOR_DELETE)
+#undef ACCESSOR_DELETE
+ elements_accessors_ = nullptr;
+}
+
+Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
+ uint32_t concat_size,
+ uint32_t result_len) {
+ ElementsKind result_elements_kind = GetInitialFastElementsKind();
+ bool has_raw_doubles = false;
+ {
+ DisallowHeapAllocation no_gc;
+ bool is_holey = false;
+ for (uint32_t i = 0; i < concat_size; i++) {
+ Object arg = (*args)[i];
+ ElementsKind arg_kind = JSArray::cast(arg).GetElementsKind();
+ has_raw_doubles = has_raw_doubles || IsDoubleElementsKind(arg_kind);
+ is_holey = is_holey || IsHoleyElementsKind(arg_kind);
+ result_elements_kind =
+ GetMoreGeneralElementsKind(result_elements_kind, arg_kind);
+ }
+ if (is_holey) {
+ result_elements_kind = GetHoleyElementsKind(result_elements_kind);
+ }
+ }
+
+ // If a double array is concatted into a fast elements array, the fast
+ // elements array needs to be initialized to contain proper holes, since
+ // boxing doubles may cause incremental marking.
+ bool requires_double_boxing =
+ has_raw_doubles && !IsDoubleElementsKind(result_elements_kind);
+ ArrayStorageAllocationMode mode = requires_double_boxing
+ ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+ : DONT_INITIALIZE_ARRAY_ELEMENTS;
+ Handle<JSArray> result_array = isolate->factory()->NewJSArray(
+ result_elements_kind, result_len, result_len, mode);
+ if (result_len == 0) return result_array;
+
+ uint32_t insertion_index = 0;
+ Handle<FixedArrayBase> storage(result_array->elements(), isolate);
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(result_elements_kind);
+ for (uint32_t i = 0; i < concat_size; i++) {
+ // It is crucial to keep |array| in a raw pointer form to avoid
+ // performance degradation.
+ JSArray array = JSArray::cast((*args)[i]);
+ uint32_t len = 0;
+ array.length().ToArrayLength(&len);
+ if (len == 0) continue;
+ ElementsKind from_kind = array.GetElementsKind();
+ accessor->CopyElements(array, 0, from_kind, storage, insertion_index, len);
+ insertion_index += len;
+ }
+
+ DCHECK_EQ(insertion_index, result_len);
+ return result_array;
+}
+
+ElementsAccessor** ElementsAccessor::elements_accessors_ = nullptr;
+
+#undef ELEMENTS_LIST
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/elements.h b/deps/v8/src/objects/elements.h
new file mode 100644
index 0000000000..844cd2ed94
--- /dev/null
+++ b/deps/v8/src/objects/elements.h
@@ -0,0 +1,241 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ELEMENTS_H_
+#define V8_OBJECTS_ELEMENTS_H_
+
+#include "src/objects/elements-kind.h"
+#include "src/objects/keys.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class JSTypedArray;
+
+// Abstract base class for handles that can operate on objects with differing
+// ElementsKinds.
+class ElementsAccessor {
+ public:
+ ElementsAccessor() = default;
+ virtual ~ElementsAccessor() = default;
+
+ // Returns a shared ElementsAccessor for the specified ElementsKind.
+ static ElementsAccessor* ForKind(ElementsKind elements_kind) {
+ DCHECK_LT(static_cast<int>(elements_kind), kElementsKindCount);
+ return elements_accessors_[elements_kind];
+ }
+
+ // Checks the elements of an object for consistency, asserting when a problem
+ // is found.
+ virtual void Validate(JSObject obj) = 0;
+
+ // Returns true if a holder contains an element with the specified index
+ // without iterating up the prototype chain. The caller can optionally pass
+ // in the backing store to use for the check, which must be compatible with
+ // the ElementsKind of the ElementsAccessor. If backing_store is nullptr, the
+ // holder->elements() is used as the backing store. If a |filter| is
+ // specified the PropertyAttributes of the element at the given index
+ // are compared to the given |filter|. If they match/overlap the given
+ // index is ignored. Note that only Dictionary elements have custom
+ // PropertyAttributes associated, hence the |filter| argument is ignored for
+ // all but DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
+ virtual bool HasElement(JSObject holder, uint32_t index,
+ FixedArrayBase backing_store,
+ PropertyFilter filter = ALL_PROPERTIES) = 0;
+
+ inline bool HasElement(JSObject holder, uint32_t index,
+ PropertyFilter filter = ALL_PROPERTIES);
+
+ // Note: this is currently not implemented for string wrapper and
+ // typed array elements.
+ virtual bool HasEntry(JSObject holder, uint32_t entry) = 0;
+
+ // TODO(cbruni): HasEntry and Get should not be exposed publicly with the
+ // entry parameter.
+ virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
+
+ virtual bool HasAccessors(JSObject holder) = 0;
+ virtual uint32_t NumberOfElements(JSObject holder) = 0;
+
+ // Modifies the length data property as specified for JSArrays and resizes the
+ // underlying backing store accordingly. The method honors the semantics of
+ // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
+ // have non-deletable elements can only be shrunk to the size of highest
+ // element that is non-deletable.
+ virtual void SetLength(Handle<JSArray> holder, uint32_t new_length) = 0;
+
+ // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
+ // of elements from source after source_start to the destination array.
+ static const int kCopyToEnd = -1;
+ // If kCopyToEndAndInitializeToHole is specified as the copy_size to
+ // CopyElements, it copies all of elements from source after source_start to
+ // destination array, padding any remaining uninitialized elements in the
+ // destination array with the hole.
+ static const int kCopyToEndAndInitializeToHole = -2;
+
+ // Copy all indices that have elements from |object| into the given
+ // KeyAccumulator. For Dictionary-based element-kinds we filter out elements
+ // whose PropertyAttribute match |filter|.
+ virtual void CollectElementIndices(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys) = 0;
+
+ inline void CollectElementIndices(Handle<JSObject> object,
+ KeyAccumulator* keys);
+
+ virtual Maybe<bool> CollectValuesOrEntries(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+ PropertyFilter filter = ALL_PROPERTIES) = 0;
+
+ virtual MaybeHandle<FixedArray> PrependElementIndices(
+ Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+ Handle<FixedArray> keys, GetKeysConversion convert,
+ PropertyFilter filter = ALL_PROPERTIES) = 0;
+
+ inline MaybeHandle<FixedArray> PrependElementIndices(
+ Handle<JSObject> object, Handle<FixedArray> keys,
+ GetKeysConversion convert, PropertyFilter filter = ALL_PROPERTIES);
+
+ virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) = 0;
+
+ virtual void TransitionElementsKind(Handle<JSObject> object,
+ Handle<Map> map) = 0;
+ virtual void GrowCapacityAndConvert(Handle<JSObject> object,
+ uint32_t capacity) = 0;
+ // Unlike GrowCapacityAndConvert do not attempt to convert the backing store
+ // and simply return false in this case.
+ virtual bool GrowCapacity(Handle<JSObject> object, uint32_t index) = 0;
+
+ static void InitializeOncePerProcess();
+ static void TearDown();
+
+ virtual void Set(Handle<JSObject> holder, uint32_t entry, Object value) = 0;
+
+ virtual void Add(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) = 0;
+
+ static Handle<JSArray> Concat(Isolate* isolate, Arguments* args,
+ uint32_t concat_size, uint32_t result_length);
+
+ virtual uint32_t Push(Handle<JSArray> receiver, Arguments* args,
+ uint32_t push_size) = 0;
+
+ virtual uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
+ uint32_t unshift_size) = 0;
+
+ virtual Handle<Object> Pop(Handle<JSArray> receiver) = 0;
+
+ virtual Handle<Object> Shift(Handle<JSArray> receiver) = 0;
+
+ virtual Handle<NumberDictionary> Normalize(Handle<JSObject> object) = 0;
+
+ virtual uint32_t GetCapacity(JSObject holder,
+ FixedArrayBase backing_store) = 0;
+
+ virtual Object Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) = 0;
+
+ // Check an Object's own elements for an element (using SameValueZero
+ // semantics)
+ virtual Maybe<bool> IncludesValue(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start,
+ uint32_t length) = 0;
+
+ // Check an Object's own elements for the index of an element (using SameValue
+ // semantics)
+ virtual Maybe<int64_t> IndexOfValue(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start,
+ uint32_t length) = 0;
+
+ virtual Maybe<int64_t> LastIndexOfValue(Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start) = 0;
+
+ virtual void Reverse(JSObject receiver) = 0;
+
+ virtual void CopyElements(Isolate* isolate, Handle<FixedArrayBase> source,
+ ElementsKind source_kind,
+ Handle<FixedArrayBase> destination, int size) = 0;
+
+ virtual Object CopyElements(Handle<Object> source,
+ Handle<JSObject> destination, size_t length,
+ uint32_t offset = 0) = 0;
+
+ virtual Handle<FixedArray> CreateListFromArrayLike(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t length) = 0;
+
+ virtual void CopyTypedArrayElementsSlice(JSTypedArray source,
+ JSTypedArray destination,
+ size_t start, size_t end) = 0;
+
+ protected:
+ friend class LookupIterator;
+
+ // Element handlers distinguish between entries and indices when they
+ // manipulate elements. Entries refer to elements in terms of their location
+ // in the underlying storage's backing store representation, and are between 0
+ // and GetCapacity. Indices refer to elements in terms of the value that would
+ // be specified in JavaScript to access the element. In most implementations,
+ // indices are equivalent to entries. In the NumberDictionary
+ // ElementsAccessor, entries are mapped to an index using the KeyAt method on
+ // the NumberDictionary.
+ virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
+ uint32_t index) = 0;
+
+ virtual PropertyDetails GetDetails(JSObject holder, uint32_t entry) = 0;
+ virtual void Reconfigure(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) = 0;
+
+ // Deletes an element in an object.
+ virtual void Delete(Handle<JSObject> holder, uint32_t entry) = 0;
+
+ // NOTE: this method violates the handlified function signature convention:
+ // raw pointer parameter |source_holder| in the function that allocates.
+ // This is done intentionally to avoid ArrayConcat() builtin performance
+ // degradation.
+ virtual void CopyElements(JSObject source_holder, uint32_t source_start,
+ ElementsKind source_kind,
+ Handle<FixedArrayBase> destination,
+ uint32_t destination_start, int copy_size) = 0;
+
+ private:
+ static ElementsAccessor** elements_accessors_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
+};
+
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
+ Handle<JSArray> array, Arguments* args);
+
+// Called directly from CSA.
+// {raw_context}: Context pointer.
+// {raw_source}: JSArray pointer.
+// {raw_destination}: JSTypedArray pointer.
+void CopyFastNumberJSArrayElementsToTypedArray(Address raw_context,
+ Address raw_source,
+ Address raw_destination,
+ uintptr_t length,
+ uintptr_t offset);
+// {raw_source}, {raw_destination}: JSTypedArray pointers.
+void CopyTypedArrayElementsToTypedArray(Address raw_source,
+ Address raw_destination,
+ uintptr_t length, uintptr_t offset);
+// {raw_source}, {raw_destination}: JSTypedArray pointers.
+void CopyTypedArrayElementsSlice(Address raw_source, Address raw_destination,
+ uintptr_t start, uintptr_t end);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_ELEMENTS_H_
diff --git a/deps/v8/src/objects/embedder-data-array.cc b/deps/v8/src/objects/embedder-data-array.cc
index c85e0b9f31..ba3e92c33c 100644
--- a/deps/v8/src/objects/embedder-data-array.cc
+++ b/deps/v8/src/objects/embedder-data-array.cc
@@ -4,7 +4,7 @@
#include "src/objects/embedder-data-array.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "src/objects/embedder-data-array-inl.h"
namespace v8 {
diff --git a/deps/v8/src/objects/embedder-data-array.h b/deps/v8/src/objects/embedder-data-array.h
index f5ab2fa7ee..ba4fe25465 100644
--- a/deps/v8/src/objects/embedder-data-array.h
+++ b/deps/v8/src/objects/embedder-data-array.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_EMBEDDER_DATA_ARRAY_H_
#define V8_OBJECTS_EMBEDDER_DATA_ARRAY_H_
-#include "src/globals.h"
-#include "src/maybe-handles.h"
+#include "src/common/globals.h"
+#include "src/handles/maybe-handles.h"
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index b87f31ac7d..6830a4d22e 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -7,11 +7,11 @@
#include "src/objects/embedder-data-slot.h"
+#include "src/common/v8memory.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/embedder-data-array.h"
#include "src/objects/js-objects-inl.h"
-#include "src/v8memory.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -25,7 +25,7 @@ EmbedderDataSlot::EmbedderDataSlot(EmbedderDataArray array, int entry_index)
EmbedderDataSlot::EmbedderDataSlot(JSObject object, int embedder_field_index)
: SlotBase(FIELD_ADDR(
- object, object->GetEmbedderFieldOffset(embedder_field_index))) {}
+ object, object.GetEmbedderFieldOffset(embedder_field_index))) {}
Object EmbedderDataSlot::load_tagged() const {
return ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load();
@@ -56,7 +56,7 @@ void EmbedderDataSlot::store_tagged(EmbedderDataArray array, int entry_index,
// static
void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
Object value) {
- int slot_offset = object->GetEmbedderFieldOffset(embedder_field_index);
+ int slot_offset = object.GetEmbedderFieldOffset(embedder_field_index);
ObjectSlot(FIELD_ADDR(object, slot_offset + kTaggedPayloadOffset))
.Relaxed_Store(value);
WRITE_BARRIER(object, slot_offset + kTaggedPayloadOffset, value);
diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h
index 6cebf28f2d..dee8c3ec56 100644
--- a/deps/v8/src/objects/embedder-data-slot.h
+++ b/deps/v8/src/objects/embedder-data-slot.h
@@ -7,8 +7,8 @@
#include <utility>
-#include "src/assert-scope.h"
-#include "src/globals.h"
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
#include "src/objects/slots.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index c3902ca9aa..e06cfce7de 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -8,7 +8,7 @@
#include "src/objects/feedback-cell.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/struct-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -25,10 +25,15 @@ ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset)
INT32_ACCESSORS(FeedbackCell, interrupt_budget, kInterruptBudgetOffset)
void FeedbackCell::clear_padding() {
- if (FeedbackCell::kSize == FeedbackCell::kUnalignedSize) return;
- DCHECK_GE(FeedbackCell::kSize, FeedbackCell::kUnalignedSize);
+ if (FeedbackCell::kAlignedSize == FeedbackCell::kUnalignedSize) return;
+ DCHECK_GE(FeedbackCell::kAlignedSize, FeedbackCell::kUnalignedSize);
memset(reinterpret_cast<byte*>(address() + FeedbackCell::kUnalignedSize), 0,
- FeedbackCell::kSize - FeedbackCell::kUnalignedSize);
+ FeedbackCell::kAlignedSize - FeedbackCell::kUnalignedSize);
+}
+
+void FeedbackCell::reset() {
+ set_value(GetReadOnlyRoots().undefined_value());
+ set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
}
} // namespace internal
diff --git a/deps/v8/src/objects/feedback-cell.h b/deps/v8/src/objects/feedback-cell.h
index a708f4cb92..3c085f72d9 100644
--- a/deps/v8/src/objects/feedback-cell.h
+++ b/deps/v8/src/objects/feedback-cell.h
@@ -37,23 +37,18 @@ class FeedbackCell : public Struct {
DECL_PRINTER(FeedbackCell)
DECL_VERIFIER(FeedbackCell)
-// Layout description.
-#define FEEDBACK_CELL_FIELDS(V) \
- V(kValueOffset, kTaggedSize) \
- /* Non-pointer fields */ \
- V(kInterruptBudgetOffset, kInt32Size) \
- /* Total size. */ \
- V(kUnalignedSize, 0)
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_FEEDBACK_CELL_FIELDS)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FEEDBACK_CELL_FIELDS)
-#undef FEEDBACK_CELL_FIELDS
-
- static const int kSize = RoundUp<kObjectAlignment>(int{kUnalignedSize});
+ static const int kUnalignedSize = kSize;
+ static const int kAlignedSize = RoundUp<kObjectAlignment>(int{kSize});
inline void clear_padding();
+ inline void reset();
using BodyDescriptor =
- FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kSize>;
+ FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kAlignedSize>;
OBJECT_CONSTRUCTORS(FeedbackCell, Struct);
};
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
new file mode 100644
index 0000000000..6b1fdcc1e5
--- /dev/null
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -0,0 +1,354 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FEEDBACK_VECTOR_INL_H_
+#define V8_OBJECTS_FEEDBACK_VECTOR_INL_H_
+
+#include "src/objects/feedback-vector.h"
+
+#include "src/common/globals.h"
+#include "src/heap/factory-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/code-inl.h"
+#include "src/objects/maybe-object-inl.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/smi.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(FeedbackVector, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(FeedbackMetadata, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(ClosureFeedbackCellArray, FixedArray)
+
+NEVER_READ_ONLY_SPACE_IMPL(FeedbackVector)
+NEVER_READ_ONLY_SPACE_IMPL(ClosureFeedbackCellArray)
+
+CAST_ACCESSOR(FeedbackVector)
+CAST_ACCESSOR(FeedbackMetadata)
+CAST_ACCESSOR(ClosureFeedbackCellArray)
+
+INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
+
+INT32_ACCESSORS(FeedbackMetadata, closure_feedback_cell_count,
+ kFeedbackCellCountOffset)
+
+int32_t FeedbackMetadata::synchronized_slot_count() const {
+ return base::Acquire_Load(reinterpret_cast<const base::Atomic32*>(
+ FIELD_ADDR(*this, kSlotCountOffset)));
+}
+
+int32_t FeedbackMetadata::get(int index) const {
+ DCHECK(index >= 0 && index < length());
+ int offset = kHeaderSize + index * kInt32Size;
+ return ReadField<int32_t>(offset);
+}
+
+void FeedbackMetadata::set(int index, int32_t value) {
+ DCHECK(index >= 0 && index < length());
+ int offset = kHeaderSize + index * kInt32Size;
+ WriteField<int32_t>(offset, value);
+}
+
+bool FeedbackMetadata::is_empty() const { return slot_count() == 0; }
+
+int FeedbackMetadata::length() const {
+ return FeedbackMetadata::length(slot_count());
+}
+
+int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
+ switch (kind) {
+ case FeedbackSlotKind::kForIn:
+ case FeedbackSlotKind::kInstanceOf:
+ case FeedbackSlotKind::kCompareOp:
+ case FeedbackSlotKind::kBinaryOp:
+ case FeedbackSlotKind::kLiteral:
+ case FeedbackSlotKind::kTypeProfile:
+ return 1;
+
+ case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kCloneObject:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed:
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ return 2;
+
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ }
+ return 1;
+}
+
+Handle<FeedbackCell> ClosureFeedbackCellArray::GetFeedbackCell(int index) {
+ return handle(FeedbackCell::cast(get(index)), GetIsolate());
+}
+
+ACCESSORS(FeedbackVector, shared_function_info, SharedFunctionInfo,
+ kSharedFunctionInfoOffset)
+WEAK_ACCESSORS(FeedbackVector, optimized_code_weak_or_smi,
+ kOptimizedCodeWeakOrSmiOffset)
+ACCESSORS(FeedbackVector, closure_feedback_cell_array, ClosureFeedbackCellArray,
+ kClosureFeedbackCellArrayOffset)
+INT32_ACCESSORS(FeedbackVector, length, kLengthOffset)
+INT32_ACCESSORS(FeedbackVector, invocation_count, kInvocationCountOffset)
+INT32_ACCESSORS(FeedbackVector, profiler_ticks, kProfilerTicksOffset)
+
+void FeedbackVector::clear_padding() {
+ if (FIELD_SIZE(kPaddingOffset) == 0) return;
+ DCHECK_EQ(4, FIELD_SIZE(kPaddingOffset));
+ memset(reinterpret_cast<void*>(address() + kPaddingOffset), 0,
+ FIELD_SIZE(kPaddingOffset));
+}
+
+bool FeedbackVector::is_empty() const { return length() == 0; }
+
+FeedbackMetadata FeedbackVector::metadata() const {
+ return shared_function_info().feedback_metadata();
+}
+
+void FeedbackVector::clear_invocation_count() { set_invocation_count(0); }
+
+Code FeedbackVector::optimized_code() const {
+ MaybeObject slot = optimized_code_weak_or_smi();
+ DCHECK(slot->IsSmi() || slot->IsWeakOrCleared());
+ HeapObject heap_object;
+ return slot->GetHeapObject(&heap_object) ? Code::cast(heap_object) : Code();
+}
+
+OptimizationMarker FeedbackVector::optimization_marker() const {
+ MaybeObject slot = optimized_code_weak_or_smi();
+ Smi value;
+ if (!slot->ToSmi(&value)) return OptimizationMarker::kNone;
+ return static_cast<OptimizationMarker>(value.value());
+}
+
+bool FeedbackVector::has_optimized_code() const {
+ return !optimized_code().is_null();
+}
+
+bool FeedbackVector::has_optimization_marker() const {
+ return optimization_marker() != OptimizationMarker::kLogFirstExecution &&
+ optimization_marker() != OptimizationMarker::kNone;
+}
+
+// Conversion from an integer index to either a slot or an ic slot.
+// static
+FeedbackSlot FeedbackVector::ToSlot(int index) {
+ DCHECK_GE(index, 0);
+ return FeedbackSlot(index);
+}
+
+MaybeObject FeedbackVector::Get(FeedbackSlot slot) const {
+ return get(GetIndex(slot));
+}
+
+MaybeObject FeedbackVector::get(int index) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kFeedbackSlotsOffset + index * kTaggedSize;
+ return RELAXED_READ_WEAK_FIELD(*this, offset);
+}
+
+Handle<FeedbackCell> FeedbackVector::GetClosureFeedbackCell(int index) const {
+ DCHECK_GE(index, 0);
+ ClosureFeedbackCellArray cell_array =
+ ClosureFeedbackCellArray::cast(closure_feedback_cell_array());
+ return cell_array.GetFeedbackCell(index);
+}
+
+void FeedbackVector::Set(FeedbackSlot slot, MaybeObject value,
+ WriteBarrierMode mode) {
+ set(GetIndex(slot), value, mode);
+}
+
+void FeedbackVector::set(int index, MaybeObject value, WriteBarrierMode mode) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kFeedbackSlotsOffset + index * kTaggedSize;
+ RELAXED_WRITE_WEAK_FIELD(*this, offset, value);
+ CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+void FeedbackVector::Set(FeedbackSlot slot, Object value,
+ WriteBarrierMode mode) {
+ set(GetIndex(slot), MaybeObject::FromObject(value), mode);
+}
+
+void FeedbackVector::set(int index, Object value, WriteBarrierMode mode) {
+ set(index, MaybeObject::FromObject(value), mode);
+}
+
+inline MaybeObjectSlot FeedbackVector::slots_start() {
+ return RawMaybeWeakField(kFeedbackSlotsOffset);
+}
+
+// Helper function to transform the feedback to BinaryOperationHint.
+BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
+ switch (type_feedback) {
+ case BinaryOperationFeedback::kNone:
+ return BinaryOperationHint::kNone;
+ case BinaryOperationFeedback::kSignedSmall:
+ return BinaryOperationHint::kSignedSmall;
+ case BinaryOperationFeedback::kSignedSmallInputs:
+ return BinaryOperationHint::kSignedSmallInputs;
+ case BinaryOperationFeedback::kNumber:
+ return BinaryOperationHint::kNumber;
+ case BinaryOperationFeedback::kNumberOrOddball:
+ return BinaryOperationHint::kNumberOrOddball;
+ case BinaryOperationFeedback::kString:
+ return BinaryOperationHint::kString;
+ case BinaryOperationFeedback::kBigInt:
+ return BinaryOperationHint::kBigInt;
+ default:
+ return BinaryOperationHint::kAny;
+ }
+ UNREACHABLE();
+}
+
+// Helper function to transform the feedback to CompareOperationHint.
+CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
+ switch (type_feedback) {
+ case CompareOperationFeedback::kNone:
+ return CompareOperationHint::kNone;
+ case CompareOperationFeedback::kSignedSmall:
+ return CompareOperationHint::kSignedSmall;
+ case CompareOperationFeedback::kNumber:
+ return CompareOperationHint::kNumber;
+ case CompareOperationFeedback::kNumberOrOddball:
+ return CompareOperationHint::kNumberOrOddball;
+ case CompareOperationFeedback::kInternalizedString:
+ return CompareOperationHint::kInternalizedString;
+ case CompareOperationFeedback::kString:
+ return CompareOperationHint::kString;
+ case CompareOperationFeedback::kSymbol:
+ return CompareOperationHint::kSymbol;
+ case CompareOperationFeedback::kBigInt:
+ return CompareOperationHint::kBigInt;
+ case CompareOperationFeedback::kReceiver:
+ return CompareOperationHint::kReceiver;
+ case CompareOperationFeedback::kReceiverOrNullOrUndefined:
+ return CompareOperationHint::kReceiverOrNullOrUndefined;
+ default:
+ return CompareOperationHint::kAny;
+ }
+ UNREACHABLE();
+}
+
+// Helper function to transform the feedback to ForInHint.
+ForInHint ForInHintFromFeedback(int type_feedback) {
+ switch (type_feedback) {
+ case ForInFeedback::kNone:
+ return ForInHint::kNone;
+ case ForInFeedback::kEnumCacheKeys:
+ return ForInHint::kEnumCacheKeys;
+ case ForInFeedback::kEnumCacheKeysAndIndices:
+ return ForInHint::kEnumCacheKeysAndIndices;
+ default:
+ return ForInHint::kAny;
+ }
+ UNREACHABLE();
+}
+
+Handle<Symbol> FeedbackVector::UninitializedSentinel(Isolate* isolate) {
+ return isolate->factory()->uninitialized_symbol();
+}
+
+Handle<Symbol> FeedbackVector::GenericSentinel(Isolate* isolate) {
+ return isolate->factory()->generic_symbol();
+}
+
+Handle<Symbol> FeedbackVector::MegamorphicSentinel(Isolate* isolate) {
+ return isolate->factory()->megamorphic_symbol();
+}
+
+Handle<Symbol> FeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
+ return isolate->factory()->premonomorphic_symbol();
+}
+
+Symbol FeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
+ return ReadOnlyRoots(isolate).uninitialized_symbol();
+}
+
+bool FeedbackMetadataIterator::HasNext() const {
+ return next_slot_.ToInt() < metadata().slot_count();
+}
+
+FeedbackSlot FeedbackMetadataIterator::Next() {
+ DCHECK(HasNext());
+ cur_slot_ = next_slot_;
+ slot_kind_ = metadata().GetKind(cur_slot_);
+ next_slot_ = FeedbackSlot(next_slot_.ToInt() + entry_size());
+ return cur_slot_;
+}
+
+int FeedbackMetadataIterator::entry_size() const {
+ return FeedbackMetadata::GetSlotSize(kind());
+}
+
+MaybeObject FeedbackNexus::GetFeedback() const {
+ MaybeObject feedback = vector().Get(slot());
+ FeedbackVector::AssertNoLegacyTypes(feedback);
+ return feedback;
+}
+
+MaybeObject FeedbackNexus::GetFeedbackExtra() const {
+#ifdef DEBUG
+ FeedbackSlotKind kind = vector().GetKind(slot());
+ DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
+#endif
+ int extra_index = vector().GetIndex(slot()) + 1;
+ return vector().get(extra_index);
+}
+
+void FeedbackNexus::SetFeedback(Object feedback, WriteBarrierMode mode) {
+ SetFeedback(MaybeObject::FromObject(feedback));
+}
+
+void FeedbackNexus::SetFeedback(MaybeObject feedback, WriteBarrierMode mode) {
+ FeedbackVector::AssertNoLegacyTypes(feedback);
+ vector().Set(slot(), feedback, mode);
+}
+
+void FeedbackNexus::SetFeedbackExtra(Object feedback_extra,
+ WriteBarrierMode mode) {
+#ifdef DEBUG
+ FeedbackSlotKind kind = vector().GetKind(slot());
+ DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
+ FeedbackVector::AssertNoLegacyTypes(MaybeObject::FromObject(feedback_extra));
+#endif
+ int index = vector().GetIndex(slot()) + 1;
+ vector().set(index, MaybeObject::FromObject(feedback_extra), mode);
+}
+
+void FeedbackNexus::SetFeedbackExtra(MaybeObject feedback_extra,
+ WriteBarrierMode mode) {
+#ifdef DEBUG
+ FeedbackVector::AssertNoLegacyTypes(feedback_extra);
+#endif
+ int index = vector().GetIndex(slot()) + 1;
+ vector().set(index, feedback_extra, mode);
+}
+
+Isolate* FeedbackNexus::GetIsolate() const { return vector().GetIsolate(); }
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FEEDBACK_VECTOR_INL_H_
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
new file mode 100644
index 0000000000..0393a55f69
--- /dev/null
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -0,0 +1,1420 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/feedback-vector.h"
+#include "src/ic/handler-configuration-inl.h"
+#include "src/ic/ic-inl.h"
+#include "src/objects/data-handler-inl.h"
+#include "src/objects/feedback-vector-inl.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/objects/map-inl.h"
+#include "src/objects/object-macros.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+FeedbackSlot FeedbackVectorSpec::AddSlot(FeedbackSlotKind kind) {
+ int slot = slots();
+ int entries_per_slot = FeedbackMetadata::GetSlotSize(kind);
+ append(kind);
+ for (int i = 1; i < entries_per_slot; i++) {
+ append(FeedbackSlotKind::kInvalid);
+ }
+ return FeedbackSlot(slot);
+}
+
+FeedbackSlot FeedbackVectorSpec::AddTypeProfileSlot() {
+ FeedbackSlot slot = AddSlot(FeedbackSlotKind::kTypeProfile);
+ CHECK_EQ(FeedbackVectorSpec::kTypeProfileSlotIndex,
+ FeedbackVector::GetIndex(slot));
+ return slot;
+}
+
+bool FeedbackVectorSpec::HasTypeProfileSlot() const {
+ FeedbackSlot slot =
+ FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
+ if (slots() <= slot.ToInt()) {
+ return false;
+ }
+ return GetKind(slot) == FeedbackSlotKind::kTypeProfile;
+}
+
+static bool IsPropertyNameFeedback(MaybeObject feedback) {
+ HeapObject heap_object;
+ if (!feedback->GetHeapObjectIfStrong(&heap_object)) return false;
+ if (heap_object.IsString()) {
+ DCHECK(heap_object.IsInternalizedString());
+ return true;
+ }
+ if (!heap_object.IsSymbol()) return false;
+ Symbol symbol = Symbol::cast(heap_object);
+ ReadOnlyRoots roots = symbol.GetReadOnlyRoots();
+ return symbol != roots.uninitialized_symbol() &&
+ symbol != roots.premonomorphic_symbol() &&
+ symbol != roots.megamorphic_symbol();
+}
+
+std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind) {
+ return os << FeedbackMetadata::Kind2String(kind);
+}
+
+FeedbackSlotKind FeedbackMetadata::GetKind(FeedbackSlot slot) const {
+ int index = VectorICComputer::index(0, slot.ToInt());
+ int data = get(index);
+ return VectorICComputer::decode(data, slot.ToInt());
+}
+
+void FeedbackMetadata::SetKind(FeedbackSlot slot, FeedbackSlotKind kind) {
+ int index = VectorICComputer::index(0, slot.ToInt());
+ int data = get(index);
+ int new_data = VectorICComputer::encode(data, slot.ToInt(), kind);
+ set(index, new_data);
+}
+
+// static
+Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
+ const FeedbackVectorSpec* spec) {
+ Factory* factory = isolate->factory();
+
+ const int slot_count = spec == nullptr ? 0 : spec->slots();
+ const int closure_feedback_cell_count =
+ spec == nullptr ? 0 : spec->closure_feedback_cells();
+ if (slot_count == 0 && closure_feedback_cell_count == 0) {
+ return factory->empty_feedback_metadata();
+ }
+#ifdef DEBUG
+ for (int i = 0; i < slot_count;) {
+ DCHECK(spec);
+ FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i));
+ int entry_size = FeedbackMetadata::GetSlotSize(kind);
+ for (int j = 1; j < entry_size; j++) {
+ FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i + j));
+ DCHECK_EQ(FeedbackSlotKind::kInvalid, kind);
+ }
+ i += entry_size;
+ }
+#endif
+
+ Handle<FeedbackMetadata> metadata =
+ factory->NewFeedbackMetadata(slot_count, closure_feedback_cell_count);
+
+ // Initialize the slots. The raw data section has already been pre-zeroed in
+ // NewFeedbackMetadata.
+ for (int i = 0; i < slot_count; i++) {
+ DCHECK(spec);
+ FeedbackSlot slot(i);
+ FeedbackSlotKind kind = spec->GetKind(slot);
+ metadata->SetKind(slot, kind);
+ }
+
+ return metadata;
+}
+
+bool FeedbackMetadata::SpecDiffersFrom(
+ const FeedbackVectorSpec* other_spec) const {
+ if (other_spec->slots() != slot_count()) {
+ return true;
+ }
+
+ int slots = slot_count();
+ for (int i = 0; i < slots;) {
+ FeedbackSlot slot(i);
+ FeedbackSlotKind kind = GetKind(slot);
+ int entry_size = FeedbackMetadata::GetSlotSize(kind);
+
+ if (kind != other_spec->GetKind(slot)) {
+ return true;
+ }
+ i += entry_size;
+ }
+ return false;
+}
+
+const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
+ switch (kind) {
+ case FeedbackSlotKind::kInvalid:
+ return "Invalid";
+ case FeedbackSlotKind::kCall:
+ return "Call";
+ case FeedbackSlotKind::kLoadProperty:
+ return "LoadProperty";
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ return "LoadGlobalInsideTypeof";
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ return "LoadGlobalNotInsideTypeof";
+ case FeedbackSlotKind::kLoadKeyed:
+ return "LoadKeyed";
+ case FeedbackSlotKind::kHasKeyed:
+ return "HasKeyed";
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ return "StoreNamedSloppy";
+ case FeedbackSlotKind::kStoreNamedStrict:
+ return "StoreNamedStrict";
+ case FeedbackSlotKind::kStoreOwnNamed:
+ return "StoreOwnNamed";
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ return "StoreGlobalSloppy";
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ return "StoreGlobalStrict";
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ return "StoreKeyedSloppy";
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ return "StoreKeyedStrict";
+ case FeedbackSlotKind::kStoreInArrayLiteral:
+ return "StoreInArrayLiteral";
+ case FeedbackSlotKind::kBinaryOp:
+ return "BinaryOp";
+ case FeedbackSlotKind::kCompareOp:
+ return "CompareOp";
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ return "StoreDataPropertyInLiteral";
+ case FeedbackSlotKind::kLiteral:
+ return "Literal";
+ case FeedbackSlotKind::kTypeProfile:
+ return "TypeProfile";
+ case FeedbackSlotKind::kForIn:
+ return "ForIn";
+ case FeedbackSlotKind::kInstanceOf:
+ return "InstanceOf";
+ case FeedbackSlotKind::kCloneObject:
+ return "CloneObject";
+ case FeedbackSlotKind::kKindsNumber:
+ break;
+ }
+ UNREACHABLE();
+}
+
+bool FeedbackMetadata::HasTypeProfileSlot() const {
+ FeedbackSlot slot =
+ FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
+ return slot.ToInt() < slot_count() &&
+ GetKind(slot) == FeedbackSlotKind::kTypeProfile;
+}
+
+FeedbackSlotKind FeedbackVector::GetKind(FeedbackSlot slot) const {
+ DCHECK(!is_empty());
+ return metadata().GetKind(slot);
+}
+
+FeedbackSlot FeedbackVector::GetTypeProfileSlot() const {
+ DCHECK(metadata().HasTypeProfileSlot());
+ FeedbackSlot slot =
+ FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
+ DCHECK_EQ(FeedbackSlotKind::kTypeProfile, GetKind(slot));
+ return slot;
+}
+
+// static
+Handle<ClosureFeedbackCellArray> ClosureFeedbackCellArray::New(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared) {
+ Factory* factory = isolate->factory();
+
+ int num_feedback_cells =
+ shared->feedback_metadata().closure_feedback_cell_count();
+
+ Handle<ClosureFeedbackCellArray> feedback_cell_array =
+ factory->NewClosureFeedbackCellArray(num_feedback_cells);
+
+ for (int i = 0; i < num_feedback_cells; i++) {
+ Handle<FeedbackCell> cell =
+ factory->NewNoClosuresCell(factory->undefined_value());
+ feedback_cell_array->set(i, *cell);
+ }
+ return feedback_cell_array;
+}
+
+// static
+Handle<FeedbackVector> FeedbackVector::New(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared,
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array) {
+ Factory* factory = isolate->factory();
+
+ const int slot_count = shared->feedback_metadata().slot_count();
+
+ Handle<FeedbackVector> vector = factory->NewFeedbackVector(
+ shared, closure_feedback_cell_array, AllocationType::kOld);
+
+ DCHECK_EQ(vector->length(), slot_count);
+
+ DCHECK_EQ(vector->shared_function_info(), *shared);
+ DCHECK_EQ(
+ vector->optimized_code_weak_or_smi(),
+ MaybeObject::FromSmi(Smi::FromEnum(
+ FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone)));
+ DCHECK_EQ(vector->invocation_count(), 0);
+ DCHECK_EQ(vector->profiler_ticks(), 0);
+
+ // Ensure we can skip the write barrier
+ Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
+ DCHECK_EQ(ReadOnlyRoots(isolate).uninitialized_symbol(),
+ *uninitialized_sentinel);
+ for (int i = 0; i < slot_count;) {
+ FeedbackSlot slot(i);
+ FeedbackSlotKind kind = shared->feedback_metadata().GetKind(slot);
+ int index = FeedbackVector::GetIndex(slot);
+ int entry_size = FeedbackMetadata::GetSlotSize(kind);
+
+ Object extra_value = *uninitialized_sentinel;
+ switch (kind) {
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ vector->set(index, HeapObjectReference::ClearedValue(isolate),
+ SKIP_WRITE_BARRIER);
+ break;
+ case FeedbackSlotKind::kForIn:
+ case FeedbackSlotKind::kCompareOp:
+ case FeedbackSlotKind::kBinaryOp:
+ vector->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
+ break;
+ case FeedbackSlotKind::kLiteral:
+ vector->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
+ break;
+ case FeedbackSlotKind::kCall:
+ vector->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ extra_value = Smi::kZero;
+ break;
+ case FeedbackSlotKind::kCloneObject:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed:
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ case FeedbackSlotKind::kTypeProfile:
+ case FeedbackSlotKind::kInstanceOf:
+ vector->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ break;
+
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ break;
+ }
+ for (int j = 1; j < entry_size; j++) {
+ vector->set(index + j, extra_value, SKIP_WRITE_BARRIER);
+ }
+ i += entry_size;
+ }
+
+ Handle<FeedbackVector> result = Handle<FeedbackVector>::cast(vector);
+ if (!isolate->is_best_effort_code_coverage() ||
+ isolate->is_collecting_type_profile()) {
+ AddToVectorsForProfilingTools(isolate, result);
+ }
+ return result;
+}
+
+// static
+void FeedbackVector::AddToVectorsForProfilingTools(
+ Isolate* isolate, Handle<FeedbackVector> vector) {
+ DCHECK(!isolate->is_best_effort_code_coverage() ||
+ isolate->is_collecting_type_profile());
+ if (!vector->shared_function_info().IsSubjectToDebugging()) return;
+ Handle<ArrayList> list = Handle<ArrayList>::cast(
+ isolate->factory()->feedback_vectors_for_profiling_tools());
+ list = ArrayList::Add(isolate, list, vector);
+ isolate->SetFeedbackVectorsForProfilingTools(*list);
+}
+
+// static
+void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
+ Handle<Code> code) {
+ DCHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
+ vector->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*code));
+}
+
+void FeedbackVector::ClearOptimizedCode() {
+ DCHECK(has_optimized_code());
+ SetOptimizationMarker(OptimizationMarker::kNone);
+}
+
+void FeedbackVector::ClearOptimizationMarker() {
+ DCHECK(!has_optimized_code());
+ SetOptimizationMarker(OptimizationMarker::kNone);
+}
+
+void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
+ set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(marker)));
+}
+
+void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
+ SharedFunctionInfo shared, const char* reason) {
+ MaybeObject slot = optimized_code_weak_or_smi();
+ if (slot->IsSmi()) {
+ return;
+ }
+
+ if (slot->IsCleared()) {
+ ClearOptimizationMarker();
+ return;
+ }
+
+ Code code = Code::cast(slot->GetHeapObject());
+ if (code.marked_for_deoptimization()) {
+ if (FLAG_trace_deopt) {
+ PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
+ reason);
+ shared.ShortPrint();
+ PrintF("]\n");
+ }
+ if (!code.deopt_already_counted()) {
+ code.set_deopt_already_counted(true);
+ }
+ ClearOptimizedCode();
+ }
+}
+
+bool FeedbackVector::ClearSlots(Isolate* isolate) {
+ MaybeObject uninitialized_sentinel = MaybeObject::FromObject(
+ FeedbackVector::RawUninitializedSentinel(isolate));
+
+ bool feedback_updated = false;
+ FeedbackMetadataIterator iter(metadata());
+ while (iter.HasNext()) {
+ FeedbackSlot slot = iter.Next();
+
+ MaybeObject obj = Get(slot);
+ if (obj != uninitialized_sentinel) {
+ FeedbackNexus nexus(*this, slot);
+ feedback_updated |= nexus.Clear();
+ }
+ }
+ return feedback_updated;
+}
+
+void FeedbackVector::AssertNoLegacyTypes(MaybeObject object) {
+#ifdef DEBUG
+ HeapObject heap_object;
+ if (object->GetHeapObject(&heap_object)) {
+ // Instead of FixedArray, the Feedback and the Extra should contain
+ // WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
+ DCHECK_IMPLIES(heap_object.IsFixedArray(), heap_object.IsHashTable());
+ }
+#endif
+}
+
+Handle<WeakFixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
+ Isolate* isolate = GetIsolate();
+ HeapObject heap_object;
+ if (GetFeedback()->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object.IsWeakFixedArray() &&
+ WeakFixedArray::cast(heap_object).length() == length) {
+ return handle(WeakFixedArray::cast(heap_object), isolate);
+ }
+ Handle<WeakFixedArray> array = isolate->factory()->NewWeakFixedArray(length);
+ SetFeedback(*array);
+ return array;
+}
+
+Handle<WeakFixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
+ Isolate* isolate = GetIsolate();
+ HeapObject heap_object;
+ if (GetFeedbackExtra()->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object.IsWeakFixedArray() &&
+ WeakFixedArray::cast(heap_object).length() == length) {
+ return handle(WeakFixedArray::cast(heap_object), isolate);
+ }
+ Handle<WeakFixedArray> array = isolate->factory()->NewWeakFixedArray(length);
+ SetFeedbackExtra(*array);
+ return array;
+}
+
+void FeedbackNexus::ConfigureUninitialized() {
+ Isolate* isolate = GetIsolate();
+ switch (kind()) {
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
+ SetFeedback(HeapObjectReference::ClearedValue(isolate),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ break;
+ }
+ case FeedbackSlotKind::kCloneObject:
+ case FeedbackSlotKind::kCall: {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER);
+ break;
+ }
+ case FeedbackSlotKind::kInstanceOf: {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ break;
+ }
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool FeedbackNexus::Clear() {
+ bool feedback_updated = false;
+
+ switch (kind()) {
+ case FeedbackSlotKind::kTypeProfile:
+ // We don't clear these kinds ever.
+ break;
+
+ case FeedbackSlotKind::kCompareOp:
+ case FeedbackSlotKind::kForIn:
+ case FeedbackSlotKind::kBinaryOp:
+ // We don't clear these, either.
+ break;
+
+ case FeedbackSlotKind::kLiteral:
+ SetFeedback(Smi::kZero, SKIP_WRITE_BARRIER);
+ feedback_updated = true;
+ break;
+
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed:
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kInstanceOf:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ case FeedbackSlotKind::kCloneObject:
+ if (!IsCleared()) {
+ ConfigureUninitialized();
+ feedback_updated = true;
+ }
+ break;
+
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ }
+ return feedback_updated;
+}
+
+void FeedbackNexus::ConfigurePremonomorphic(Handle<Map> receiver_map) {
+ SetFeedback(*FeedbackVector::PremonomorphicSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(HeapObjectReference::Weak(*receiver_map));
+}
+
+bool FeedbackNexus::ConfigureMegamorphic() {
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = GetIsolate();
+ MaybeObject sentinel =
+ MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate));
+ if (GetFeedback() != sentinel) {
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
+ return true;
+ }
+
+ return false;
+}
+
+bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = GetIsolate();
+ bool changed = false;
+ MaybeObject sentinel =
+ MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate));
+ if (GetFeedback() != sentinel) {
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER);
+ changed = true;
+ }
+
+ Smi extra = Smi::FromInt(static_cast<int>(property_type));
+ if (changed || GetFeedbackExtra() != MaybeObject::FromSmi(extra)) {
+ SetFeedbackExtra(extra, SKIP_WRITE_BARRIER);
+ changed = true;
+ }
+ return changed;
+}
+
+Map FeedbackNexus::GetFirstMap() const {
+ MapHandles maps;
+ ExtractMaps(&maps);
+ if (!maps.empty()) return *maps.at(0);
+ return Map();
+}
+
+InlineCacheState FeedbackNexus::ic_state() const {
+ Isolate* isolate = GetIsolate();
+ MaybeObject feedback = GetFeedback();
+
+ switch (kind()) {
+ case FeedbackSlotKind::kLiteral:
+ if (feedback->IsSmi()) return UNINITIALIZED;
+ return MONOMORPHIC;
+
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
+ if (feedback->IsSmi()) return MONOMORPHIC;
+
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::PremonomorphicSentinel(isolate))) {
+ DCHECK(kind() == FeedbackSlotKind::kStoreGlobalSloppy ||
+ kind() == FeedbackSlotKind::kStoreGlobalStrict);
+ return PREMONOMORPHIC;
+ }
+
+ DCHECK(feedback->IsWeakOrCleared());
+ MaybeObject extra = GetFeedbackExtra();
+ if (!feedback->IsCleared() ||
+ extra != MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ return MONOMORPHIC;
+ }
+ return UNINITIALIZED;
+ }
+
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed: {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ return UNINITIALIZED;
+ }
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(isolate))) {
+ return MEGAMORPHIC;
+ }
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::PremonomorphicSentinel(isolate))) {
+ return PREMONOMORPHIC;
+ }
+ if (feedback->IsWeakOrCleared()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
+ }
+ HeapObject heap_object;
+ if (feedback->GetHeapObjectIfStrong(&heap_object)) {
+ if (heap_object.IsWeakFixedArray()) {
+ // Determine state purely by our structure, don't check if the maps
+ // are cleared.
+ return POLYMORPHIC;
+ }
+ if (heap_object.IsName()) {
+ DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsKeyedHasICKind(kind()));
+ Object extra = GetFeedbackExtra()->GetHeapObjectAssumeStrong();
+ WeakFixedArray extra_array = WeakFixedArray::cast(extra);
+ return extra_array.length() > 2 ? POLYMORPHIC : MONOMORPHIC;
+ }
+ }
+ UNREACHABLE();
+ }
+ case FeedbackSlotKind::kCall: {
+ HeapObject heap_object;
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(isolate))) {
+ return GENERIC;
+ } else if (feedback->IsWeakOrCleared() ||
+ (feedback->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object.IsAllocationSite())) {
+ return MONOMORPHIC;
+ }
+
+ CHECK_EQ(feedback, MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate)));
+ return UNINITIALIZED;
+ }
+ case FeedbackSlotKind::kBinaryOp: {
+ BinaryOperationHint hint = GetBinaryOperationFeedback();
+ if (hint == BinaryOperationHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == BinaryOperationHint::kAny) {
+ return GENERIC;
+ }
+
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kCompareOp: {
+ CompareOperationHint hint = GetCompareOperationFeedback();
+ if (hint == CompareOperationHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == CompareOperationHint::kAny) {
+ return GENERIC;
+ }
+
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kForIn: {
+ ForInHint hint = GetForInFeedback();
+ if (hint == ForInHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == ForInHint::kAny) {
+ return GENERIC;
+ }
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kInstanceOf: {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ return UNINITIALIZED;
+ } else if (feedback ==
+ MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(isolate))) {
+ return MEGAMORPHIC;
+ }
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ return UNINITIALIZED;
+ } else if (feedback->IsWeakOrCleared()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
+ }
+
+ return MEGAMORPHIC;
+ }
+ case FeedbackSlotKind::kTypeProfile: {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ return UNINITIALIZED;
+ }
+ return MONOMORPHIC;
+ }
+
+ case FeedbackSlotKind::kCloneObject: {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ return UNINITIALIZED;
+ }
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(isolate))) {
+ return MEGAMORPHIC;
+ }
+ if (feedback->IsWeakOrCleared()) {
+ return MONOMORPHIC;
+ }
+
+ DCHECK(feedback->GetHeapObjectAssumeStrong().IsWeakFixedArray());
+ return POLYMORPHIC;
+ }
+
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ }
+ return UNINITIALIZED;
+}
+
+void FeedbackNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
+ DCHECK(IsGlobalICKind(kind()));
+ Isolate* isolate = GetIsolate();
+ SetFeedback(HeapObjectReference::Weak(*cell));
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+}
+
+bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index,
+ int context_slot_index,
+ bool immutable) {
+ DCHECK(IsGlobalICKind(kind()));
+ DCHECK_LE(0, script_context_index);
+ DCHECK_LE(0, context_slot_index);
+ if (!ContextIndexBits::is_valid(script_context_index) ||
+ !SlotIndexBits::is_valid(context_slot_index) ||
+ !ImmutabilityBit::is_valid(immutable)) {
+ return false;
+ }
+ int config = ContextIndexBits::encode(script_context_index) |
+ SlotIndexBits::encode(context_slot_index) |
+ ImmutabilityBit::encode(immutable);
+
+ SetFeedback(Smi::From31BitPattern(config));
+ Isolate* isolate = GetIsolate();
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ return true;
+}
+
+void FeedbackNexus::ConfigureHandlerMode(const MaybeObjectHandle& handler) {
+ DCHECK(IsGlobalICKind(kind()));
+ DCHECK(IC::IsHandler(*handler));
+ SetFeedback(HeapObjectReference::ClearedValue(GetIsolate()));
+ SetFeedbackExtra(*handler);
+}
+
+void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
+ Handle<Map> result_map) {
+ Isolate* isolate = GetIsolate();
+ MaybeObject maybe_feedback = GetFeedback();
+ Handle<HeapObject> feedback(maybe_feedback->IsStrongOrWeak()
+ ? maybe_feedback->GetHeapObject()
+ : HeapObject(),
+ isolate);
+ switch (ic_state()) {
+ case UNINITIALIZED:
+ // Cache the first map seen which meets the fast case requirements.
+ SetFeedback(HeapObjectReference::Weak(*source_map));
+ SetFeedbackExtra(*result_map);
+ break;
+ case MONOMORPHIC:
+ if (maybe_feedback->IsCleared() || feedback.is_identical_to(source_map) ||
+ Map::cast(*feedback).is_deprecated()) {
+ // Remain in MONOMORPHIC state if previous feedback has been collected.
+ SetFeedback(HeapObjectReference::Weak(*source_map));
+ SetFeedbackExtra(*result_map);
+ } else {
+ // Transition to POLYMORPHIC.
+ Handle<WeakFixedArray> array =
+ EnsureArrayOfSize(2 * kCloneObjectPolymorphicEntrySize);
+ array->Set(0, maybe_feedback);
+ array->Set(1, GetFeedbackExtra());
+ array->Set(2, HeapObjectReference::Weak(*source_map));
+ array->Set(3, MaybeObject::FromObject(*result_map));
+ SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
+ }
+ break;
+ case POLYMORPHIC: {
+ const int kMaxElements =
+ FLAG_max_polymorphic_map_count * kCloneObjectPolymorphicEntrySize;
+ Handle<WeakFixedArray> array = Handle<WeakFixedArray>::cast(feedback);
+ int i = 0;
+ for (; i < array->length(); i += kCloneObjectPolymorphicEntrySize) {
+ MaybeObject feedback = array->Get(i);
+ if (feedback->IsCleared()) break;
+ Handle<Map> cached_map(Map::cast(feedback->GetHeapObject()), isolate);
+ if (cached_map.is_identical_to(source_map) ||
+ cached_map->is_deprecated())
+ break;
+ }
+
+ if (i >= array->length()) {
+ if (i == kMaxElements) {
+ // Transition to MEGAMORPHIC.
+ MaybeObject sentinel = MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(isolate));
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
+ break;
+ }
+
+ // Grow polymorphic feedback array.
+ Handle<WeakFixedArray> new_array = EnsureArrayOfSize(
+ array->length() + kCloneObjectPolymorphicEntrySize);
+ for (int j = 0; j < array->length(); ++j) {
+ new_array->Set(j, array->Get(j));
+ }
+ array = new_array;
+ }
+
+ array->Set(i, HeapObjectReference::Weak(*source_map));
+ array->Set(i + 1, MaybeObject::FromObject(*result_map));
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+int FeedbackNexus::GetCallCount() {
+ DCHECK(IsCallICKind(kind()));
+
+ Object call_count = GetFeedbackExtra()->cast<Object>();
+ CHECK(call_count.IsSmi());
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
+ return CallCountField::decode(value);
+}
+
+void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
+ DCHECK(IsCallICKind(kind()));
+
+ Object call_count = GetFeedbackExtra()->cast<Object>();
+ CHECK(call_count.IsSmi());
+ uint32_t count = static_cast<uint32_t>(Smi::ToInt(call_count));
+ uint32_t value = CallCountField::encode(CallCountField::decode(count));
+ int result = static_cast<int>(value | SpeculationModeField::encode(mode));
+ SetFeedbackExtra(Smi::FromInt(result), SKIP_WRITE_BARRIER);
+}
+
+SpeculationMode FeedbackNexus::GetSpeculationMode() {
+ DCHECK(IsCallICKind(kind()));
+
+ Object call_count = GetFeedbackExtra()->cast<Object>();
+ CHECK(call_count.IsSmi());
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
+ return SpeculationModeField::decode(value);
+}
+
+float FeedbackNexus::ComputeCallFrequency() {
+ DCHECK(IsCallICKind(kind()));
+
+ double const invocation_count = vector().invocation_count();
+ double const call_count = GetCallCount();
+ if (invocation_count == 0) {
+ // Prevent division by 0.
+ return 0.0f;
+ }
+ return static_cast<float>(call_count / invocation_count);
+}
+
+void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
+ Handle<Map> receiver_map,
+ const MaybeObjectHandle& handler) {
+ DCHECK(handler.is_null() || IC::IsHandler(*handler));
+ if (kind() == FeedbackSlotKind::kStoreDataPropertyInLiteral) {
+ SetFeedback(HeapObjectReference::Weak(*receiver_map));
+ SetFeedbackExtra(*name);
+ } else {
+ if (name.is_null()) {
+ SetFeedback(HeapObjectReference::Weak(*receiver_map));
+ SetFeedbackExtra(*handler);
+ } else {
+ Handle<WeakFixedArray> array = EnsureExtraArrayOfSize(2);
+ SetFeedback(*name);
+ array->Set(0, HeapObjectReference::Weak(*receiver_map));
+ array->Set(1, *handler);
+ }
+ }
+}
+
+void FeedbackNexus::ConfigurePolymorphic(Handle<Name> name,
+ MapHandles const& maps,
+ MaybeObjectHandles* handlers) {
+ DCHECK_EQ(handlers->size(), maps.size());
+ int receiver_count = static_cast<int>(maps.size());
+ DCHECK_GT(receiver_count, 1);
+ Handle<WeakFixedArray> array;
+ if (name.is_null()) {
+ array = EnsureArrayOfSize(receiver_count * 2);
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+ } else {
+ array = EnsureExtraArrayOfSize(receiver_count * 2);
+ SetFeedback(*name);
+ }
+
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map = maps[current];
+ array->Set(current * 2, HeapObjectReference::Weak(*map));
+ DCHECK(IC::IsHandler(*handlers->at(current)));
+ array->Set(current * 2 + 1, *handlers->at(current));
+ }
+}
+
+int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
+ DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
+ IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
+ IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
+
+ Isolate* isolate = GetIsolate();
+ MaybeObject feedback = GetFeedback();
+ bool is_named_feedback = IsPropertyNameFeedback(feedback);
+ HeapObject heap_object;
+ if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object.IsWeakFixedArray()) ||
+ is_named_feedback) {
+ int found = 0;
+ WeakFixedArray array;
+ if (is_named_feedback) {
+ array =
+ WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong());
+ } else {
+ array = WeakFixedArray::cast(heap_object);
+ }
+ const int increment = 2;
+ HeapObject heap_object;
+ for (int i = 0; i < array.length(); i += increment) {
+ DCHECK(array.Get(i)->IsWeakOrCleared());
+ if (array.Get(i)->GetHeapObjectIfWeak(&heap_object)) {
+ Map map = Map::cast(heap_object);
+ maps->push_back(handle(map, isolate));
+ found++;
+ }
+ }
+ return found;
+ } else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
+ Map map = Map::cast(heap_object);
+ maps->push_back(handle(map, isolate));
+ return 1;
+ } else if (feedback->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object ==
+ heap_object.GetReadOnlyRoots().premonomorphic_symbol()) {
+ if (GetFeedbackExtra()->GetHeapObjectIfWeak(&heap_object)) {
+ Map map = Map::cast(heap_object);
+ maps->push_back(handle(map, isolate));
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
+ DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
+ IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
+ IsKeyedHasICKind(kind()));
+
+ MaybeObject feedback = GetFeedback();
+ Isolate* isolate = GetIsolate();
+ bool is_named_feedback = IsPropertyNameFeedback(feedback);
+ HeapObject heap_object;
+ if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object.IsWeakFixedArray()) ||
+ is_named_feedback) {
+ WeakFixedArray array;
+ if (is_named_feedback) {
+ array =
+ WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong());
+ } else {
+ array = WeakFixedArray::cast(heap_object);
+ }
+ const int increment = 2;
+ HeapObject heap_object;
+ for (int i = 0; i < array.length(); i += increment) {
+ DCHECK(array.Get(i)->IsWeakOrCleared());
+ if (array.Get(i)->GetHeapObjectIfWeak(&heap_object)) {
+ Map array_map = Map::cast(heap_object);
+ if (array_map == *map && !array.Get(i + increment - 1)->IsCleared()) {
+ MaybeObject handler = array.Get(i + increment - 1);
+ DCHECK(IC::IsHandler(handler));
+ return handle(handler, isolate);
+ }
+ }
+ }
+ } else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
+ Map cell_map = Map::cast(heap_object);
+ if (cell_map == *map && !GetFeedbackExtra()->IsCleared()) {
+ MaybeObject handler = GetFeedbackExtra();
+ DCHECK(IC::IsHandler(handler));
+ return handle(handler, isolate);
+ }
+ }
+
+ return MaybeObjectHandle();
+}
+
+bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
+ int length) const {
+ DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
+ IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
+ IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
+
+ MaybeObject feedback = GetFeedback();
+ Isolate* isolate = GetIsolate();
+ int count = 0;
+ bool is_named_feedback = IsPropertyNameFeedback(feedback);
+ HeapObject heap_object;
+ if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object.IsWeakFixedArray()) ||
+ is_named_feedback) {
+ WeakFixedArray array;
+ if (is_named_feedback) {
+ array =
+ WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong());
+ } else {
+ array = WeakFixedArray::cast(heap_object);
+ }
+ const int increment = 2;
+ HeapObject heap_object;
+ for (int i = 0; i < array.length(); i += increment) {
+ // Be sure to skip handlers whose maps have been cleared.
+ DCHECK(array.Get(i)->IsWeakOrCleared());
+ if (array.Get(i)->GetHeapObjectIfWeak(&heap_object) &&
+ !array.Get(i + increment - 1)->IsCleared()) {
+ MaybeObject handler = array.Get(i + increment - 1);
+ DCHECK(IC::IsHandler(handler));
+ code_list->push_back(handle(handler, isolate));
+ count++;
+ }
+ }
+ } else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
+ MaybeObject extra = GetFeedbackExtra();
+ if (!extra->IsCleared()) {
+ DCHECK(IC::IsHandler(extra));
+ code_list->push_back(handle(extra, isolate));
+ count++;
+ }
+ }
+ return count == length;
+}
+
+Name FeedbackNexus::GetName() const {
+ if (IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
+ IsKeyedHasICKind(kind())) {
+ MaybeObject feedback = GetFeedback();
+ if (IsPropertyNameFeedback(feedback)) {
+ return Name::cast(feedback->GetHeapObjectAssumeStrong());
+ }
+ }
+ return Name();
+}
+
+KeyedAccessLoadMode FeedbackNexus::GetKeyedAccessLoadMode() const {
+ DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedHasICKind(kind()));
+ MapHandles maps;
+ MaybeObjectHandles handlers;
+
+ if (GetKeyType() == PROPERTY) return STANDARD_LOAD;
+
+ ExtractMaps(&maps);
+ FindHandlers(&handlers, static_cast<int>(maps.size()));
+ for (MaybeObjectHandle const& handler : handlers) {
+ KeyedAccessLoadMode mode = LoadHandler::GetKeyedAccessLoadMode(*handler);
+ if (mode != STANDARD_LOAD) return mode;
+ }
+
+ return STANDARD_LOAD;
+}
+
+namespace {
+
+bool BuiltinHasKeyedAccessStoreMode(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ switch (builtin_index) {
+ case Builtins::kKeyedStoreIC_SloppyArguments_Standard:
+ case Builtins::kKeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW:
+ case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB:
+ case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionHandleCOW:
+ case Builtins::kStoreFastElementIC_Standard:
+ case Builtins::kStoreFastElementIC_GrowNoTransitionHandleCOW:
+ case Builtins::kStoreFastElementIC_NoTransitionIgnoreOOB:
+ case Builtins::kStoreFastElementIC_NoTransitionHandleCOW:
+ case Builtins::kStoreInArrayLiteralIC_Slow_Standard:
+ case Builtins::kStoreInArrayLiteralIC_Slow_GrowNoTransitionHandleCOW:
+ case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB:
+ case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionHandleCOW:
+ case Builtins::kKeyedStoreIC_Slow_Standard:
+ case Builtins::kKeyedStoreIC_Slow_GrowNoTransitionHandleCOW:
+ case Builtins::kKeyedStoreIC_Slow_NoTransitionIgnoreOOB:
+ case Builtins::kKeyedStoreIC_Slow_NoTransitionHandleCOW:
+ case Builtins::kElementsTransitionAndStore_Standard:
+ case Builtins::kElementsTransitionAndStore_GrowNoTransitionHandleCOW:
+ case Builtins::kElementsTransitionAndStore_NoTransitionIgnoreOOB:
+ case Builtins::kElementsTransitionAndStore_NoTransitionHandleCOW:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+KeyedAccessStoreMode KeyedAccessStoreModeForBuiltin(int builtin_index) {
+ DCHECK(BuiltinHasKeyedAccessStoreMode(builtin_index));
+ switch (builtin_index) {
+ case Builtins::kKeyedStoreIC_SloppyArguments_Standard:
+ case Builtins::kStoreInArrayLiteralIC_Slow_Standard:
+ case Builtins::kKeyedStoreIC_Slow_Standard:
+ case Builtins::kStoreFastElementIC_Standard:
+ case Builtins::kElementsTransitionAndStore_Standard:
+ return STANDARD_STORE;
+ case Builtins::kKeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW:
+ case Builtins::kStoreInArrayLiteralIC_Slow_GrowNoTransitionHandleCOW:
+ case Builtins::kKeyedStoreIC_Slow_GrowNoTransitionHandleCOW:
+ case Builtins::kStoreFastElementIC_GrowNoTransitionHandleCOW:
+ case Builtins::kElementsTransitionAndStore_GrowNoTransitionHandleCOW:
+ return STORE_AND_GROW_HANDLE_COW;
+ case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB:
+ case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB:
+ case Builtins::kKeyedStoreIC_Slow_NoTransitionIgnoreOOB:
+ case Builtins::kStoreFastElementIC_NoTransitionIgnoreOOB:
+ case Builtins::kElementsTransitionAndStore_NoTransitionIgnoreOOB:
+ return STORE_IGNORE_OUT_OF_BOUNDS;
+ case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionHandleCOW:
+ case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionHandleCOW:
+ case Builtins::kKeyedStoreIC_Slow_NoTransitionHandleCOW:
+ case Builtins::kStoreFastElementIC_NoTransitionHandleCOW:
+ case Builtins::kElementsTransitionAndStore_NoTransitionHandleCOW:
+ return STORE_HANDLE_COW;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace
+
+KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
+ DCHECK(IsKeyedStoreICKind(kind()) || IsStoreInArrayLiteralICKind(kind()));
+ KeyedAccessStoreMode mode = STANDARD_STORE;
+ MapHandles maps;
+ MaybeObjectHandles handlers;
+
+ if (GetKeyType() == PROPERTY) return mode;
+
+ ExtractMaps(&maps);
+ FindHandlers(&handlers, static_cast<int>(maps.size()));
+ for (const MaybeObjectHandle& maybe_code_handler : handlers) {
+ // The first handler that isn't the slow handler will have the bits we need.
+ Handle<Code> handler;
+ if (maybe_code_handler.object()->IsStoreHandler()) {
+ Handle<StoreHandler> data_handler =
+ Handle<StoreHandler>::cast(maybe_code_handler.object());
+ handler = handle(Code::cast(data_handler->smi_handler()),
+ vector().GetIsolate());
+ } else if (maybe_code_handler.object()->IsSmi()) {
+ // Skip proxy handlers.
+ DCHECK_EQ(*(maybe_code_handler.object()),
+ *StoreHandler::StoreProxy(GetIsolate()));
+ continue;
+ } else {
+ // Element store without prototype chain check.
+ handler = Handle<Code>::cast(maybe_code_handler.object());
+ }
+
+ if (handler->is_builtin()) {
+ const int builtin_index = handler->builtin_index();
+ if (!BuiltinHasKeyedAccessStoreMode(builtin_index)) continue;
+
+ mode = KeyedAccessStoreModeForBuiltin(builtin_index);
+ break;
+ }
+ }
+
+ return mode;
+}
+
+IcCheckType FeedbackNexus::GetKeyType() const {
+ DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
+ IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
+ MaybeObject feedback = GetFeedback();
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(GetIsolate()))) {
+ return static_cast<IcCheckType>(
+ Smi::ToInt(GetFeedbackExtra()->cast<Object>()));
+ }
+ return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
+}
+
+BinaryOperationHint FeedbackNexus::GetBinaryOperationFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kBinaryOp);
+ int feedback = GetFeedback().ToSmi().value();
+ return BinaryOperationHintFromFeedback(feedback);
+}
+
+CompareOperationHint FeedbackNexus::GetCompareOperationFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kCompareOp);
+ int feedback = GetFeedback().ToSmi().value();
+ return CompareOperationHintFromFeedback(feedback);
+}
+
+ForInHint FeedbackNexus::GetForInFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kForIn);
+ int feedback = GetFeedback().ToSmi().value();
+ return ForInHintFromFeedback(feedback);
+}
+
+MaybeHandle<JSObject> FeedbackNexus::GetConstructorFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kInstanceOf);
+ Isolate* isolate = GetIsolate();
+ MaybeObject feedback = GetFeedback();
+ HeapObject heap_object;
+ if (feedback->GetHeapObjectIfWeak(&heap_object)) {
+ return handle(JSObject::cast(heap_object), isolate);
+ }
+ return MaybeHandle<JSObject>();
+}
+
+namespace {
+
+bool InList(Handle<ArrayList> types, Handle<String> type) {
+ for (int i = 0; i < types->Length(); i++) {
+ Object obj = types->Get(i);
+ if (String::cast(obj).Equals(*type)) {
+ return true;
+ }
+ }
+ return false;
+}
+} // anonymous namespace
+
+void FeedbackNexus::Collect(Handle<String> type, int position) {
+ DCHECK(IsTypeProfileKind(kind()));
+ DCHECK_GE(position, 0);
+ Isolate* isolate = GetIsolate();
+
+ MaybeObject const feedback = GetFeedback();
+
+ // Map source position to collection of types
+ Handle<SimpleNumberDictionary> types;
+
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ types = SimpleNumberDictionary::New(isolate, 1);
+ } else {
+ types = handle(
+ SimpleNumberDictionary::cast(feedback->GetHeapObjectAssumeStrong()),
+ isolate);
+ }
+
+ Handle<ArrayList> position_specific_types;
+
+ int entry = types->FindEntry(isolate, position);
+ if (entry == SimpleNumberDictionary::kNotFound) {
+ position_specific_types = ArrayList::New(isolate, 1);
+ types = SimpleNumberDictionary::Set(
+ isolate, types, position,
+ ArrayList::Add(isolate, position_specific_types, type));
+ } else {
+ DCHECK(types->ValueAt(entry).IsArrayList());
+ position_specific_types =
+ handle(ArrayList::cast(types->ValueAt(entry)), isolate);
+ if (!InList(position_specific_types, type)) { // Add type
+ types = SimpleNumberDictionary::Set(
+ isolate, types, position,
+ ArrayList::Add(isolate, position_specific_types, type));
+ }
+ }
+ SetFeedback(*types);
+}
+
+std::vector<int> FeedbackNexus::GetSourcePositions() const {
+ DCHECK(IsTypeProfileKind(kind()));
+ std::vector<int> source_positions;
+ Isolate* isolate = GetIsolate();
+
+ MaybeObject const feedback = GetFeedback();
+
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ return source_positions;
+ }
+
+ Handle<SimpleNumberDictionary> types(
+ SimpleNumberDictionary::cast(feedback->GetHeapObjectAssumeStrong()),
+ isolate);
+
+ for (int index = SimpleNumberDictionary::kElementsStartIndex;
+ index < types->length(); index += SimpleNumberDictionary::kEntrySize) {
+ int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
+ Object key = types->get(key_index);
+ if (key.IsSmi()) {
+ int position = Smi::cast(key).value();
+ source_positions.push_back(position);
+ }
+ }
+ return source_positions;
+}
+
+std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
+ uint32_t position) const {
+ DCHECK(IsTypeProfileKind(kind()));
+ Isolate* isolate = GetIsolate();
+
+ MaybeObject const feedback = GetFeedback();
+ std::vector<Handle<String>> types_for_position;
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ return types_for_position;
+ }
+
+ Handle<SimpleNumberDictionary> types(
+ SimpleNumberDictionary::cast(feedback->GetHeapObjectAssumeStrong()),
+ isolate);
+
+ int entry = types->FindEntry(isolate, position);
+ if (entry == SimpleNumberDictionary::kNotFound) {
+ return types_for_position;
+ }
+ DCHECK(types->ValueAt(entry).IsArrayList());
+ Handle<ArrayList> position_specific_types =
+ Handle<ArrayList>(ArrayList::cast(types->ValueAt(entry)), isolate);
+ for (int i = 0; i < position_specific_types->Length(); i++) {
+ Object t = position_specific_types->Get(i);
+ types_for_position.push_back(Handle<String>(String::cast(t), isolate));
+ }
+
+ return types_for_position;
+}
+
+namespace {
+
+Handle<JSObject> ConvertToJSObject(Isolate* isolate,
+ Handle<SimpleNumberDictionary> feedback) {
+ Handle<JSObject> type_profile =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ for (int index = SimpleNumberDictionary::kElementsStartIndex;
+ index < feedback->length();
+ index += SimpleNumberDictionary::kEntrySize) {
+ int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
+ Object key = feedback->get(key_index);
+ if (key.IsSmi()) {
+ int value_index = index + SimpleNumberDictionary::kEntryValueIndex;
+
+ Handle<ArrayList> position_specific_types(
+ ArrayList::cast(feedback->get(value_index)), isolate);
+
+ int position = Smi::ToInt(key);
+ JSObject::AddDataElement(
+ type_profile, position,
+ isolate->factory()->NewJSArrayWithElements(
+ ArrayList::Elements(isolate, position_specific_types)),
+ PropertyAttributes::NONE);
+ }
+ }
+ return type_profile;
+}
+} // namespace
+
+JSObject FeedbackNexus::GetTypeProfile() const {
+ DCHECK(IsTypeProfileKind(kind()));
+ Isolate* isolate = GetIsolate();
+
+ MaybeObject const feedback = GetFeedback();
+
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ return *isolate->factory()->NewJSObject(isolate->object_function());
+ }
+
+ return *ConvertToJSObject(isolate,
+ handle(SimpleNumberDictionary::cast(
+ feedback->GetHeapObjectAssumeStrong()),
+ isolate));
+}
+
+void FeedbackNexus::ResetTypeProfile() {
+ DCHECK(IsTypeProfileKind(kind()));
+ SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
new file mode 100644
index 0000000000..89e0b9e6aa
--- /dev/null
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -0,0 +1,772 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FEEDBACK_VECTOR_H_
+#define V8_OBJECTS_FEEDBACK_VECTOR_H_
+
+#include <vector>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/map.h"
+#include "src/objects/name.h"
+#include "src/objects/type-hints.h"
+#include "src/zone/zone-containers.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+enum class FeedbackSlotKind {
+ // This kind means that the slot points to the middle of other slot
+ // which occupies more than one feedback vector element.
+ // There must be no such slots in the system.
+ kInvalid,
+
+ // Sloppy kinds come first, for easy language mode testing.
+ kStoreGlobalSloppy,
+ kStoreNamedSloppy,
+ kStoreKeyedSloppy,
+ kLastSloppyKind = kStoreKeyedSloppy,
+
+ // Strict and language mode unaware kinds.
+ kCall,
+ kLoadProperty,
+ kLoadGlobalNotInsideTypeof,
+ kLoadGlobalInsideTypeof,
+ kLoadKeyed,
+ kHasKeyed,
+ kStoreGlobalStrict,
+ kStoreNamedStrict,
+ kStoreOwnNamed,
+ kStoreKeyedStrict,
+ kStoreInArrayLiteral,
+ kBinaryOp,
+ kCompareOp,
+ kStoreDataPropertyInLiteral,
+ kTypeProfile,
+ kLiteral,
+ kForIn,
+ kInstanceOf,
+ kCloneObject,
+
+ kKindsNumber // Last value indicating number of kinds.
+};
+
+inline bool IsCallICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kCall;
+}
+
+inline bool IsLoadICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kLoadProperty;
+}
+
+inline bool IsLoadGlobalICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kLoadGlobalNotInsideTypeof ||
+ kind == FeedbackSlotKind::kLoadGlobalInsideTypeof;
+}
+
+inline bool IsKeyedLoadICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kLoadKeyed;
+}
+
+inline bool IsKeyedHasICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kHasKeyed;
+}
+
+inline bool IsStoreGlobalICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreGlobalSloppy ||
+ kind == FeedbackSlotKind::kStoreGlobalStrict;
+}
+
+inline bool IsStoreICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreNamedSloppy ||
+ kind == FeedbackSlotKind::kStoreNamedStrict;
+}
+
+inline bool IsStoreOwnICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreOwnNamed;
+}
+
+inline bool IsStoreDataPropertyInLiteralKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreDataPropertyInLiteral;
+}
+
+inline bool IsKeyedStoreICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreKeyedSloppy ||
+ kind == FeedbackSlotKind::kStoreKeyedStrict;
+}
+
+inline bool IsStoreInArrayLiteralICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreInArrayLiteral;
+}
+
+inline bool IsGlobalICKind(FeedbackSlotKind kind) {
+ return IsLoadGlobalICKind(kind) || IsStoreGlobalICKind(kind);
+}
+
+inline bool IsTypeProfileKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kTypeProfile;
+}
+
+inline bool IsCloneObjectKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kCloneObject;
+}
+
+inline TypeofMode GetTypeofModeFromSlotKind(FeedbackSlotKind kind) {
+ DCHECK(IsLoadGlobalICKind(kind));
+ return (kind == FeedbackSlotKind::kLoadGlobalInsideTypeof)
+ ? INSIDE_TYPEOF
+ : NOT_INSIDE_TYPEOF;
+}
+
+inline LanguageMode GetLanguageModeFromSlotKind(FeedbackSlotKind kind) {
+ DCHECK(IsStoreICKind(kind) || IsStoreOwnICKind(kind) ||
+ IsStoreGlobalICKind(kind) || IsKeyedStoreICKind(kind));
+ STATIC_ASSERT(FeedbackSlotKind::kStoreGlobalSloppy <=
+ FeedbackSlotKind::kLastSloppyKind);
+ STATIC_ASSERT(FeedbackSlotKind::kStoreKeyedSloppy <=
+ FeedbackSlotKind::kLastSloppyKind);
+ STATIC_ASSERT(FeedbackSlotKind::kStoreNamedSloppy <=
+ FeedbackSlotKind::kLastSloppyKind);
+ return (kind <= FeedbackSlotKind::kLastSloppyKind) ? LanguageMode::kSloppy
+ : LanguageMode::kStrict;
+}
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ FeedbackSlotKind kind);
+
+using MaybeObjectHandles = std::vector<MaybeObjectHandle>;
+
+class FeedbackMetadata;
+
+// ClosureFeedbackCellArray is a FixedArray that contains feedback cells used
+// when creating closures from a function. Along with the feedback
+// cells, the first slot (slot 0) is used to hold a budget to measure the
+// hotness of the function. This is created once the function is compiled and is
+// either held by the feedback vector (if allocated) or by the FeedbackCell of
+// the closure.
+class ClosureFeedbackCellArray : public FixedArray {
+ public:
+ NEVER_READ_ONLY_SPACE
+
+ DECL_CAST(ClosureFeedbackCellArray)
+
+ V8_EXPORT_PRIVATE static Handle<ClosureFeedbackCellArray> New(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared);
+ inline Handle<FeedbackCell> GetFeedbackCell(int index);
+
+ DECL_VERIFIER(ClosureFeedbackCellArray)
+ DECL_PRINTER(ClosureFeedbackCellArray)
+
+ private:
+ OBJECT_CONSTRUCTORS(ClosureFeedbackCellArray, FixedArray);
+};
+
+// A FeedbackVector has a fixed header with:
+// - shared function info (which includes feedback metadata)
+// - invocation count
+// - runtime profiler ticks
+// - optimized code cell (weak cell or Smi marker)
+// followed by an array of feedback slots, of length determined by the feedback
+// metadata.
+class FeedbackVector : public HeapObject {
+ public:
+ NEVER_READ_ONLY_SPACE
+
+ DECL_CAST(FeedbackVector)
+
+ inline bool is_empty() const;
+
+ inline FeedbackMetadata metadata() const;
+
+ // [shared_function_info]: The shared function info for the function with this
+ // feedback vector.
+ DECL_ACCESSORS(shared_function_info, SharedFunctionInfo)
+
+ // [optimized_code_weak_or_smi]: weak reference to optimized code or a Smi
+ // marker defining optimization behaviour.
+ DECL_ACCESSORS(optimized_code_weak_or_smi, MaybeObject)
+
+ // [feedback_cell_array]: The FixedArray to hold the feedback cells for any
+ // closures created by this function.
+ DECL_ACCESSORS(closure_feedback_cell_array, ClosureFeedbackCellArray)
+
+ // [length]: The length of the feedback vector (not including the header, i.e.
+ // the number of feedback slots).
+ DECL_INT32_ACCESSORS(length)
+
+ // [invocation_count]: The number of times this function has been invoked.
+ DECL_INT32_ACCESSORS(invocation_count)
+
+ // [profiler_ticks]: The number of times this function has been seen by the
+ // runtime profiler.
+ DECL_INT32_ACCESSORS(profiler_ticks)
+
+ // Initialize the padding if necessary.
+ inline void clear_padding();
+
+ inline void clear_invocation_count();
+
+ inline Code optimized_code() const;
+ inline OptimizationMarker optimization_marker() const;
+ inline bool has_optimized_code() const;
+ inline bool has_optimization_marker() const;
+ void ClearOptimizedCode();
+ void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
+ const char* reason);
+ static void SetOptimizedCode(Handle<FeedbackVector> vector,
+ Handle<Code> code);
+ void SetOptimizationMarker(OptimizationMarker marker);
+
+ // Clears the optimization marker in the feedback vector.
+ void ClearOptimizationMarker();
+
+ // Conversion from a slot to an integer index to the underlying array.
+ static int GetIndex(FeedbackSlot slot) { return slot.ToInt(); }
+
+ // Conversion from an integer index to the underlying array to a slot.
+ static inline FeedbackSlot ToSlot(int index);
+ inline MaybeObject Get(FeedbackSlot slot) const;
+ inline MaybeObject get(int index) const;
+ inline void Set(FeedbackSlot slot, MaybeObject value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void set(int index, MaybeObject value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void Set(FeedbackSlot slot, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void set(int index, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ // Returns the feedback cell at |index| that is used to create the
+ // closure.
+ inline Handle<FeedbackCell> GetClosureFeedbackCell(int index) const;
+
+ // Gives access to raw memory which stores the array's data.
+ inline MaybeObjectSlot slots_start();
+
+ // Returns slot kind for given slot.
+ V8_EXPORT_PRIVATE FeedbackSlotKind GetKind(FeedbackSlot slot) const;
+
+ FeedbackSlot GetTypeProfileSlot() const;
+
+ V8_EXPORT_PRIVATE static Handle<FeedbackVector> New(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared,
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array);
+
+#define DEFINE_SLOT_KIND_PREDICATE(Name) \
+ bool Name(FeedbackSlot slot) const { return Name##Kind(GetKind(slot)); }
+
+ DEFINE_SLOT_KIND_PREDICATE(IsCallIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsGlobalIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsLoadIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsLoadGlobalIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsKeyedLoadIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsStoreIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsStoreOwnIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsStoreGlobalIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsKeyedStoreIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsTypeProfile)
+#undef DEFINE_SLOT_KIND_PREDICATE
+
+ // Returns typeof mode encoded into kind of given slot.
+ inline TypeofMode GetTypeofMode(FeedbackSlot slot) const {
+ return GetTypeofModeFromSlotKind(GetKind(slot));
+ }
+
+ // Returns language mode encoded into kind of given slot.
+ inline LanguageMode GetLanguageMode(FeedbackSlot slot) const {
+ return GetLanguageModeFromSlotKind(GetKind(slot));
+ }
+
+ V8_EXPORT_PRIVATE static void AssertNoLegacyTypes(MaybeObject object);
+
+ DECL_PRINTER(FeedbackVector)
+ DECL_VERIFIER(FeedbackVector)
+
+ void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot); // NOLINT
+
+ // Clears the vector slots. Return true if feedback has changed.
+ bool ClearSlots(Isolate* isolate);
+
+ // The object that indicates an uninitialized cache.
+ static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
+
+ // The object that indicates a generic state.
+ static inline Handle<Symbol> GenericSentinel(Isolate* isolate);
+
+ // The object that indicates a megamorphic state.
+ static inline Handle<Symbol> MegamorphicSentinel(Isolate* isolate);
+
+ // The object that indicates a premonomorphic state.
+ static inline Handle<Symbol> PremonomorphicSentinel(Isolate* isolate);
+
+ // A raw version of the uninitialized sentinel that's safe to read during
+ // garbage collection (e.g., for patching the cache).
+ static inline Symbol RawUninitializedSentinel(Isolate* isolate);
+
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_FEEDBACK_VECTOR_FIELDS)
+
+ static const int kHeaderSize = kSize;
+
+ static_assert(kSize % kObjectAlignment == 0,
+ "Header must be padded for alignment");
+ static const int kFeedbackSlotsOffset = kHeaderSize;
+
+ class BodyDescriptor;
+
+ // Garbage collection support.
+ static constexpr int SizeFor(int length) {
+ return kFeedbackSlotsOffset + length * kTaggedSize;
+ }
+
+ private:
+ static void AddToVectorsForProfilingTools(Isolate* isolate,
+ Handle<FeedbackVector> vector);
+
+ OBJECT_CONSTRUCTORS(FeedbackVector, HeapObject);
+};
+
+class V8_EXPORT_PRIVATE FeedbackVectorSpec {
+ public:
+ explicit FeedbackVectorSpec(Zone* zone)
+ : slot_kinds_(zone), num_closure_feedback_cells_(0) {
+ slot_kinds_.reserve(16);
+ }
+
+ int slots() const { return static_cast<int>(slot_kinds_.size()); }
+ int closure_feedback_cells() const { return num_closure_feedback_cells_; }
+
+ int AddFeedbackCellForCreateClosure() {
+ return num_closure_feedback_cells_++;
+ }
+
+ FeedbackSlotKind GetKind(FeedbackSlot slot) const {
+ return static_cast<FeedbackSlotKind>(slot_kinds_.at(slot.ToInt()));
+ }
+
+ bool HasTypeProfileSlot() const;
+
+ // If used, the TypeProfileSlot is always added as the first slot and its
+ // index is constant. If other slots are added before the TypeProfileSlot,
+ // this number changes.
+ static const int kTypeProfileSlotIndex = 0;
+
+ FeedbackSlot AddCallICSlot() { return AddSlot(FeedbackSlotKind::kCall); }
+
+ FeedbackSlot AddLoadICSlot() {
+ return AddSlot(FeedbackSlotKind::kLoadProperty);
+ }
+
+ FeedbackSlot AddLoadGlobalICSlot(TypeofMode typeof_mode) {
+ return AddSlot(typeof_mode == INSIDE_TYPEOF
+ ? FeedbackSlotKind::kLoadGlobalInsideTypeof
+ : FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
+ }
+
+ FeedbackSlot AddKeyedLoadICSlot() {
+ return AddSlot(FeedbackSlotKind::kLoadKeyed);
+ }
+
+ FeedbackSlot AddKeyedHasICSlot() {
+ return AddSlot(FeedbackSlotKind::kHasKeyed);
+ }
+
+ FeedbackSlotKind GetStoreICSlot(LanguageMode language_mode) {
+ STATIC_ASSERT(LanguageModeSize == 2);
+ return is_strict(language_mode) ? FeedbackSlotKind::kStoreNamedStrict
+ : FeedbackSlotKind::kStoreNamedSloppy;
+ }
+
+ FeedbackSlot AddStoreICSlot(LanguageMode language_mode) {
+ return AddSlot(GetStoreICSlot(language_mode));
+ }
+
+ FeedbackSlot AddStoreOwnICSlot() {
+ return AddSlot(FeedbackSlotKind::kStoreOwnNamed);
+ }
+
+ FeedbackSlot AddStoreGlobalICSlot(LanguageMode language_mode) {
+ STATIC_ASSERT(LanguageModeSize == 2);
+ return AddSlot(is_strict(language_mode)
+ ? FeedbackSlotKind::kStoreGlobalStrict
+ : FeedbackSlotKind::kStoreGlobalSloppy);
+ }
+
+ FeedbackSlotKind GetKeyedStoreICSlotKind(LanguageMode language_mode) {
+ STATIC_ASSERT(LanguageModeSize == 2);
+ return is_strict(language_mode) ? FeedbackSlotKind::kStoreKeyedStrict
+ : FeedbackSlotKind::kStoreKeyedSloppy;
+ }
+
+ FeedbackSlot AddKeyedStoreICSlot(LanguageMode language_mode) {
+ return AddSlot(GetKeyedStoreICSlotKind(language_mode));
+ }
+
+ FeedbackSlot AddStoreInArrayLiteralICSlot() {
+ return AddSlot(FeedbackSlotKind::kStoreInArrayLiteral);
+ }
+
+ FeedbackSlot AddBinaryOpICSlot() {
+ return AddSlot(FeedbackSlotKind::kBinaryOp);
+ }
+
+ FeedbackSlot AddCompareICSlot() {
+ return AddSlot(FeedbackSlotKind::kCompareOp);
+ }
+
+ FeedbackSlot AddForInSlot() { return AddSlot(FeedbackSlotKind::kForIn); }
+
+ FeedbackSlot AddInstanceOfSlot() {
+ return AddSlot(FeedbackSlotKind::kInstanceOf);
+ }
+
+ FeedbackSlot AddLiteralSlot() { return AddSlot(FeedbackSlotKind::kLiteral); }
+
+ FeedbackSlot AddStoreDataPropertyInLiteralICSlot() {
+ return AddSlot(FeedbackSlotKind::kStoreDataPropertyInLiteral);
+ }
+
+ FeedbackSlot AddTypeProfileSlot();
+
+ FeedbackSlot AddCloneObjectSlot() {
+ return AddSlot(FeedbackSlotKind::kCloneObject);
+ }
+
+#ifdef OBJECT_PRINT
+ // For gdb debugging.
+ void Print();
+#endif // OBJECT_PRINT
+
+ DECL_PRINTER(FeedbackVectorSpec)
+
+ private:
+ FeedbackSlot AddSlot(FeedbackSlotKind kind);
+
+ void append(FeedbackSlotKind kind) {
+ slot_kinds_.push_back(static_cast<unsigned char>(kind));
+ }
+
+ ZoneVector<unsigned char> slot_kinds_;
+ unsigned int num_closure_feedback_cells_;
+
+ friend class SharedFeedbackSlot;
+};
+
+// Helper class that creates a feedback slot on-demand.
+class SharedFeedbackSlot {
+ public:
+ // FeedbackSlot default constructor constructs an invalid slot.
+ SharedFeedbackSlot(FeedbackVectorSpec* spec, FeedbackSlotKind kind)
+ : kind_(kind), spec_(spec) {}
+
+ FeedbackSlot Get() {
+ if (slot_.IsInvalid()) slot_ = spec_->AddSlot(kind_);
+ return slot_;
+ }
+
+ private:
+ FeedbackSlotKind kind_;
+ FeedbackSlot slot_;
+ FeedbackVectorSpec* spec_;
+};
+
+// FeedbackMetadata is an array-like object with a slot count (indicating how
+// many slots are stored). We save space by packing several slots into an array
+// of int32 data. The length is never stored - it is always calculated from
+// slot_count. All instances are created through the static New function, and
+// the number of slots is static once an instance is created.
+class FeedbackMetadata : public HeapObject {
+ public:
+ DECL_CAST(FeedbackMetadata)
+
+ // The number of slots that this metadata contains. Stored as an int32.
+ DECL_INT32_ACCESSORS(slot_count)
+
+ // The number of feedback cells required for create closures. Stored as an
+ // int32.
+ // TODO(mythria): Consider using 16 bits for this and slot_count so that we
+ // can save 4 bytes.
+ DECL_INT32_ACCESSORS(closure_feedback_cell_count)
+
+ // Get slot_count using an acquire load.
+ inline int32_t synchronized_slot_count() const;
+
+ // Returns number of feedback vector elements used by given slot kind.
+ static inline int GetSlotSize(FeedbackSlotKind kind);
+
+ bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
+
+ inline bool is_empty() const;
+
+ // Returns slot kind for given slot.
+ V8_EXPORT_PRIVATE FeedbackSlotKind GetKind(FeedbackSlot slot) const;
+
+ // If {spec} is null, then it is considered empty.
+ V8_EXPORT_PRIVATE static Handle<FeedbackMetadata> New(
+ Isolate* isolate, const FeedbackVectorSpec* spec = nullptr);
+
+ DECL_PRINTER(FeedbackMetadata)
+ DECL_VERIFIER(FeedbackMetadata)
+
+ static const char* Kind2String(FeedbackSlotKind kind);
+ bool HasTypeProfileSlot() const;
+
+ // Garbage collection support.
+ // This includes any necessary padding at the end of the object for pointer
+ // size alignment.
+ static int SizeFor(int slot_count) {
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length(slot_count) * kInt32Size);
+ }
+
+ static const int kSlotCountOffset = HeapObject::kHeaderSize;
+ static const int kFeedbackCellCountOffset = kSlotCountOffset + kInt32Size;
+ static const int kHeaderSize = kFeedbackCellCountOffset + kInt32Size;
+
+ class BodyDescriptor;
+
+ private:
+ friend class AccessorAssembler;
+
+ // Raw accessors to the encoded slot data.
+ inline int32_t get(int index) const;
+ inline void set(int index, int32_t value);
+
+ // The number of int32 data fields needed to store {slot_count} slots.
+ // Does not include any extra padding for pointer size alignment.
+ static int length(int slot_count) {
+ return VectorICComputer::word_count(slot_count);
+ }
+ inline int length() const;
+
+ static const int kFeedbackSlotKindBits = 5;
+ STATIC_ASSERT(static_cast<int>(FeedbackSlotKind::kKindsNumber) <
+ (1 << kFeedbackSlotKindBits));
+
+ void SetKind(FeedbackSlot slot, FeedbackSlotKind kind);
+
+ using VectorICComputer =
+ BitSetComputer<FeedbackSlotKind, kFeedbackSlotKindBits,
+ kInt32Size * kBitsPerByte, uint32_t>;
+
+ OBJECT_CONSTRUCTORS(FeedbackMetadata, HeapObject);
+};
+
+// Verify that an empty hash field looks like a tagged object, but can't
+// possibly be confused with a pointer.
+STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag);
+STATIC_ASSERT(Name::kEmptyHashField == 0x3);
+// Verify that a set hash field will not look like a tagged object.
+STATIC_ASSERT(Name::kHashNotComputedMask == kHeapObjectTag);
+
+class FeedbackMetadataIterator {
+ public:
+ explicit FeedbackMetadataIterator(Handle<FeedbackMetadata> metadata)
+ : metadata_handle_(metadata),
+ next_slot_(FeedbackSlot(0)),
+ slot_kind_(FeedbackSlotKind::kInvalid) {}
+
+ explicit FeedbackMetadataIterator(FeedbackMetadata metadata)
+ : metadata_(metadata),
+ next_slot_(FeedbackSlot(0)),
+ slot_kind_(FeedbackSlotKind::kInvalid) {}
+
+ inline bool HasNext() const;
+
+ inline FeedbackSlot Next();
+
+ // Returns slot kind of the last slot returned by Next().
+ FeedbackSlotKind kind() const {
+ DCHECK_NE(FeedbackSlotKind::kInvalid, slot_kind_);
+ DCHECK_NE(FeedbackSlotKind::kKindsNumber, slot_kind_);
+ return slot_kind_;
+ }
+
+ // Returns entry size of the last slot returned by Next().
+ inline int entry_size() const;
+
+ private:
+ FeedbackMetadata metadata() const {
+ return !metadata_handle_.is_null() ? *metadata_handle_ : metadata_;
+ }
+
+ // The reason for having a handle and a raw pointer to the meta data is
+ // to have a single iterator implementation for both "handlified" and raw
+ // pointer use cases.
+ Handle<FeedbackMetadata> metadata_handle_;
+ FeedbackMetadata metadata_;
+ FeedbackSlot cur_slot_;
+ FeedbackSlot next_slot_;
+ FeedbackSlotKind slot_kind_;
+};
+
+// A FeedbackNexus is the combination of a FeedbackVector and a slot.
+class V8_EXPORT_PRIVATE FeedbackNexus final {
+ public:
+ FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : vector_handle_(vector), slot_(slot) {
+ kind_ =
+ (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector->GetKind(slot);
+ }
+ FeedbackNexus(FeedbackVector vector, FeedbackSlot slot)
+ : vector_(vector), slot_(slot) {
+ kind_ =
+ (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector.GetKind(slot);
+ }
+
+ Handle<FeedbackVector> vector_handle() const {
+ DCHECK(vector_.is_null());
+ return vector_handle_;
+ }
+ FeedbackVector vector() const {
+ return vector_handle_.is_null() ? vector_ : *vector_handle_;
+ }
+ FeedbackSlot slot() const { return slot_; }
+ FeedbackSlotKind kind() const { return kind_; }
+
+ inline LanguageMode GetLanguageMode() const {
+ return vector().GetLanguageMode(slot());
+ }
+
+ InlineCacheState ic_state() const;
+ bool IsUninitialized() const { return ic_state() == UNINITIALIZED; }
+ bool IsMegamorphic() const { return ic_state() == MEGAMORPHIC; }
+ bool IsGeneric() const { return ic_state() == GENERIC; }
+
+ void Print(std::ostream& os); // NOLINT
+
+ // For map-based ICs (load, keyed-load, store, keyed-store).
+ Map GetFirstMap() const;
+
+ int ExtractMaps(MapHandles* maps) const;
+ MaybeObjectHandle FindHandlerForMap(Handle<Map> map) const;
+ bool FindHandlers(MaybeObjectHandles* code_list, int length = -1) const;
+
+ bool IsCleared() const {
+ InlineCacheState state = ic_state();
+ return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
+ }
+
+ // Clear() returns true if the state of the underlying vector was changed.
+ bool Clear();
+ void ConfigureUninitialized();
+ void ConfigurePremonomorphic(Handle<Map> receiver_map);
+ // ConfigureMegamorphic() returns true if the state of the underlying vector
+ // was changed. Extra feedback is cleared if the 0 parameter version is used.
+ bool ConfigureMegamorphic();
+ bool ConfigureMegamorphic(IcCheckType property_type);
+
+ inline MaybeObject GetFeedback() const;
+ inline MaybeObject GetFeedbackExtra() const;
+
+ inline Isolate* GetIsolate() const;
+
+ void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
+ const MaybeObjectHandle& handler);
+
+ void ConfigurePolymorphic(Handle<Name> name, MapHandles const& maps,
+ MaybeObjectHandles* handlers);
+
+ BinaryOperationHint GetBinaryOperationFeedback() const;
+ CompareOperationHint GetCompareOperationFeedback() const;
+ ForInHint GetForInFeedback() const;
+
+ // For KeyedLoad ICs.
+ KeyedAccessLoadMode GetKeyedAccessLoadMode() const;
+
+ // For KeyedStore ICs.
+ KeyedAccessStoreMode GetKeyedAccessStoreMode() const;
+
+ // For KeyedLoad and KeyedStore ICs.
+ IcCheckType GetKeyType() const;
+ Name GetName() const;
+
+ // For Call ICs.
+ int GetCallCount();
+ void SetSpeculationMode(SpeculationMode mode);
+ SpeculationMode GetSpeculationMode();
+
+ // Compute the call frequency based on the call count and the invocation
+ // count (taken from the type feedback vector).
+ float ComputeCallFrequency();
+
+ using SpeculationModeField = BitField<SpeculationMode, 0, 1>;
+ using CallCountField = BitField<uint32_t, 1, 31>;
+
+ // For InstanceOf ICs.
+ MaybeHandle<JSObject> GetConstructorFeedback() const;
+
+ // For Global Load and Store ICs.
+ void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
+ // Returns false if given combination of indices is not allowed.
+ bool ConfigureLexicalVarMode(int script_context_index, int context_slot_index,
+ bool immutable);
+ void ConfigureHandlerMode(const MaybeObjectHandle& handler);
+
+ // For CloneObject ICs
+ static constexpr int kCloneObjectPolymorphicEntrySize = 2;
+ void ConfigureCloneObject(Handle<Map> source_map, Handle<Map> result_map);
+
+// Bit positions in a smi that encodes lexical environment variable access.
+#define LEXICAL_MODE_BIT_FIELDS(V, _) \
+ V(ContextIndexBits, unsigned, 12, _) \
+ V(SlotIndexBits, unsigned, 18, _) \
+ V(ImmutabilityBit, bool, 1, _)
+
+ DEFINE_BIT_FIELDS(LEXICAL_MODE_BIT_FIELDS)
+#undef LEXICAL_MODE_BIT_FIELDS
+
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(LEXICAL_MODE_BIT_FIELDS_Ranges::kBitsCount <= kSmiValueSize);
+
+ // For TypeProfile feedback vector slots.
+ // ResetTypeProfile will always reset type profile information.
+ void ResetTypeProfile();
+
+ // Add a type to the list of types for source position <position>.
+ void Collect(Handle<String> type, int position);
+ JSObject GetTypeProfile() const;
+
+ std::vector<int> GetSourcePositions() const;
+ std::vector<Handle<String>> GetTypesForSourcePositions(uint32_t pos) const;
+
+ inline void SetFeedback(Object feedback,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetFeedback(MaybeObject feedback,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetFeedbackExtra(Object feedback_extra,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetFeedbackExtra(MaybeObject feedback_extra,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ Handle<WeakFixedArray> EnsureArrayOfSize(int length);
+ Handle<WeakFixedArray> EnsureExtraArrayOfSize(int length);
+
+ private:
+ // The reason for having a vector handle and a raw pointer is that we can and
+ // should use handles during IC miss, but not during GC when we clear ICs. If
+ // you have a handle to the vector that is better because more operations can
+ // be done, like allocation.
+ Handle<FeedbackVector> vector_handle_;
+ FeedbackVector vector_;
+ FeedbackSlot slot_;
+ FeedbackSlotKind kind_;
+};
+
+inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
+inline CompareOperationHint CompareOperationHintFromFeedback(int type_feedback);
+inline ForInHint ForInHintFromFeedback(int type_feedback);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FEEDBACK_VECTOR_H_
diff --git a/deps/v8/src/objects/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h
new file mode 100644
index 0000000000..be60fb54a2
--- /dev/null
+++ b/deps/v8/src/objects/field-index-inl.h
@@ -0,0 +1,73 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FIELD_INDEX_INL_H_
+#define V8_OBJECTS_FIELD_INDEX_INL_H_
+
+#include "src/objects/descriptor-array-inl.h"
+#include "src/objects/field-index.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+FieldIndex FieldIndex::ForInObjectOffset(int offset, Encoding encoding) {
+ DCHECK_IMPLIES(encoding == kWord32, IsAligned(offset, kInt32Size));
+ DCHECK_IMPLIES(encoding == kTagged, IsAligned(offset, kTaggedSize));
+ DCHECK_IMPLIES(encoding == kDouble, IsAligned(offset, kDoubleSize));
+ return FieldIndex(true, offset, encoding, 0, 0);
+}
+
+FieldIndex FieldIndex::ForPropertyIndex(const Map map, int property_index,
+ Representation representation) {
+ DCHECK(map.instance_type() >= FIRST_NONSTRING_TYPE);
+ int inobject_properties = map.GetInObjectProperties();
+ bool is_inobject = property_index < inobject_properties;
+ int first_inobject_offset;
+ int offset;
+ if (is_inobject) {
+ first_inobject_offset = map.GetInObjectPropertyOffset(0);
+ offset = map.GetInObjectPropertyOffset(property_index);
+ } else {
+ first_inobject_offset = FixedArray::kHeaderSize;
+ property_index -= inobject_properties;
+ offset = PropertyArray::OffsetOfElementAt(property_index);
+ }
+ Encoding encoding = FieldEncoding(representation);
+ return FieldIndex(is_inobject, offset, encoding, inobject_properties,
+ first_inobject_offset);
+}
+
+// Returns the index format accepted by the HLoadFieldByIndex instruction.
+// (In-object: zero-based from (object start + JSObject::kHeaderSize),
+// out-of-object: zero-based from FixedArray::kHeaderSize.)
+int FieldIndex::GetLoadByFieldIndex() const {
+ // For efficiency, the LoadByFieldIndex instruction takes an index that is
+ // optimized for quick access. If the property is inline, the index is
+ // positive. If it's out-of-line, the encoded index is -raw_index - 1 to
+ // disambiguate the zero out-of-line index from the zero inobject case.
+ // The index itself is shifted up by one bit, the lower-most bit
+ // signifying if the field is a mutable double box (1) or not (0).
+ int result = index();
+ if (is_inobject()) {
+ result -= JSObject::kHeaderSize / kTaggedSize;
+ } else {
+ result -= FixedArray::kHeaderSize / kTaggedSize;
+ result = -result - 1;
+ }
+ result = static_cast<uint32_t>(result) << 1;
+ return is_double() ? (result | 1) : result;
+}
+
+FieldIndex FieldIndex::ForDescriptor(const Map map, int descriptor_index) {
+ PropertyDetails details =
+ map.instance_descriptors().GetDetails(descriptor_index);
+ int field_index = details.field_index();
+ return ForPropertyIndex(map, field_index, details.representation());
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_FIELD_INDEX_INL_H_
diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h
new file mode 100644
index 0000000000..f352ef6800
--- /dev/null
+++ b/deps/v8/src/objects/field-index.h
@@ -0,0 +1,127 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FIELD_INDEX_H_
+#define V8_OBJECTS_FIELD_INDEX_H_
+
+#include "src/objects/property-details.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class Map;
+
+// Wrapper class to hold a field index, usually but not necessarily generated
+// from a property index. When available, the wrapper class captures additional
+// information to allow the field index to be translated back into the property
+// index it was originally generated from.
+class FieldIndex final {
+ public:
+ enum Encoding { kTagged, kDouble, kWord32 };
+
+ FieldIndex() : bit_field_(0) {}
+
+ static inline FieldIndex ForPropertyIndex(
+ const Map map, int index,
+ Representation representation = Representation::Tagged());
+ static inline FieldIndex ForInObjectOffset(int offset, Encoding encoding);
+ static inline FieldIndex ForDescriptor(const Map map, int descriptor_index);
+
+ inline int GetLoadByFieldIndex() const;
+
+ bool is_inobject() const { return IsInObjectBits::decode(bit_field_); }
+
+ bool is_double() const { return EncodingBits::decode(bit_field_) == kDouble; }
+
+ int offset() const { return OffsetBits::decode(bit_field_); }
+
+ // Zero-indexed from beginning of the object.
+ int index() const {
+ DCHECK(IsAligned(offset(), kTaggedSize));
+ return offset() / kTaggedSize;
+ }
+
+ int outobject_array_index() const {
+ DCHECK(!is_inobject());
+ return index() - first_inobject_property_offset() / kTaggedSize;
+ }
+
+ // Zero-based from the first inobject property. Overflows to out-of-object
+ // properties.
+ int property_index() const {
+ int result = index() - first_inobject_property_offset() / kTaggedSize;
+ if (!is_inobject()) {
+ result += InObjectPropertyBits::decode(bit_field_);
+ }
+ return result;
+ }
+
+ int GetFieldAccessStubKey() const {
+ return bit_field_ &
+ (IsInObjectBits::kMask | EncodingBits::kMask | OffsetBits::kMask);
+ }
+
+ bool operator==(FieldIndex const& other) const {
+ return bit_field_ == other.bit_field_;
+ }
+ bool operator!=(FieldIndex const& other) const { return !(*this == other); }
+
+ private:
+ FieldIndex(bool is_inobject, int offset, Encoding encoding,
+ int inobject_properties, int first_inobject_property_offset) {
+ DCHECK(IsAligned(first_inobject_property_offset, kTaggedSize));
+ bit_field_ = IsInObjectBits::encode(is_inobject) |
+ EncodingBits::encode(encoding) |
+ FirstInobjectPropertyOffsetBits::encode(
+ first_inobject_property_offset) |
+ OffsetBits::encode(offset) |
+ InObjectPropertyBits::encode(inobject_properties);
+ }
+
+ static Encoding FieldEncoding(Representation representation) {
+ switch (representation.kind()) {
+ case Representation::kNone:
+ case Representation::kSmi:
+ case Representation::kHeapObject:
+ case Representation::kTagged:
+ return kTagged;
+ case Representation::kDouble:
+ return kDouble;
+ default:
+ break;
+ }
+ PrintF("%s\n", representation.Mnemonic());
+ UNREACHABLE();
+ return kTagged;
+ }
+
+ int first_inobject_property_offset() const {
+ return FirstInobjectPropertyOffsetBits::decode(bit_field_);
+ }
+
+ static const int kOffsetBitsSize =
+ (kDescriptorIndexBitCount + 1 + kTaggedSizeLog2);
+
+ // Index from beginning of object.
+ class OffsetBits : public BitField64<int, 0, kOffsetBitsSize> {};
+ class IsInObjectBits : public BitField64<bool, OffsetBits::kNext, 1> {};
+ class EncodingBits : public BitField64<Encoding, IsInObjectBits::kNext, 2> {};
+ // Number of inobject properties.
+ class InObjectPropertyBits
+ : public BitField64<int, EncodingBits::kNext, kDescriptorIndexBitCount> {
+ };
+ // Offset of first inobject property from beginning of object.
+ class FirstInobjectPropertyOffsetBits
+ : public BitField64<int, InObjectPropertyBits::kNext,
+ kFirstInobjectPropertyOffsetBitCount> {};
+ STATIC_ASSERT(FirstInobjectPropertyOffsetBits::kNext <= 64);
+
+ uint64_t bit_field_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_FIELD_INDEX_H_
diff --git a/deps/v8/src/objects/field-type.cc b/deps/v8/src/objects/field-type.cc
new file mode 100644
index 0000000000..5c771c4ffa
--- /dev/null
+++ b/deps/v8/src/objects/field-type.cc
@@ -0,0 +1,87 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/field-type.h"
+
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/smi.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+FieldType FieldType::None() { return FieldType(Smi::FromInt(2).ptr()); }
+
+// static
+FieldType FieldType::Any() { return FieldType(Smi::FromInt(1).ptr()); }
+
+// static
+Handle<FieldType> FieldType::None(Isolate* isolate) {
+ return handle(None(), isolate);
+}
+
+// static
+Handle<FieldType> FieldType::Any(Isolate* isolate) {
+ return handle(Any(), isolate);
+}
+
+// static
+FieldType FieldType::Class(Map map) { return FieldType::cast(map); }
+
+// static
+Handle<FieldType> FieldType::Class(Handle<Map> map, Isolate* isolate) {
+ return handle(Class(*map), isolate);
+}
+
+// static
+FieldType FieldType::cast(Object object) {
+ DCHECK(object == None() || object == Any() || object.IsMap());
+ return FieldType(object.ptr());
+}
+
+bool FieldType::IsClass() const { return this->IsMap(); }
+
+Map FieldType::AsClass() const {
+ DCHECK(IsClass());
+ return Map::cast(*this);
+}
+
+bool FieldType::NowStable() const {
+ return !this->IsClass() || AsClass().is_stable();
+}
+
+bool FieldType::NowIs(FieldType other) const {
+ if (other.IsAny()) return true;
+ if (IsNone()) return true;
+ if (other.IsNone()) return false;
+ if (IsAny()) return false;
+ DCHECK(IsClass());
+ DCHECK(other.IsClass());
+ return *this == other;
+}
+
+bool FieldType::NowIs(Handle<FieldType> other) const { return NowIs(*other); }
+
+void FieldType::PrintTo(std::ostream& os) const {
+ if (IsAny()) {
+ os << "Any";
+ } else if (IsNone()) {
+ os << "None";
+ } else {
+ DCHECK(IsClass());
+ os << "Class(" << reinterpret_cast<void*>(AsClass().ptr()) << ")";
+ }
+}
+
+bool FieldType::NowContains(Object value) const {
+ if (*this == Any()) return true;
+ if (*this == None()) return false;
+ if (!value.IsHeapObject()) return false;
+ return HeapObject::cast(value).map() == Map::cast(*this);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/field-type.h b/deps/v8/src/objects/field-type.h
new file mode 100644
index 0000000000..3c22692307
--- /dev/null
+++ b/deps/v8/src/objects/field-type.h
@@ -0,0 +1,56 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FIELD_TYPE_H_
+#define V8_OBJECTS_FIELD_TYPE_H_
+
+#include "src/objects/heap-object.h"
+#include "src/objects/map.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class Handle;
+
+class FieldType : public Object {
+ public:
+ static FieldType None();
+ static FieldType Any();
+ V8_EXPORT_PRIVATE static Handle<FieldType> None(Isolate* isolate);
+ V8_EXPORT_PRIVATE static Handle<FieldType> Any(Isolate* isolate);
+ V8_EXPORT_PRIVATE static FieldType Class(Map map);
+ V8_EXPORT_PRIVATE static Handle<FieldType> Class(Handle<Map> map,
+ Isolate* isolate);
+ V8_EXPORT_PRIVATE static FieldType cast(Object object);
+ static FieldType unchecked_cast(Object object) {
+ return FieldType(object.ptr());
+ }
+
+ bool NowContains(Object value) const;
+
+ bool NowContains(Handle<Object> value) const { return NowContains(*value); }
+
+ bool IsClass() const;
+ Map AsClass() const;
+ bool IsNone() const { return *this == None(); }
+ bool IsAny() const { return *this == Any(); }
+ bool NowStable() const;
+ bool NowIs(FieldType other) const;
+ bool NowIs(Handle<FieldType> other) const;
+
+ V8_EXPORT_PRIVATE void PrintTo(std::ostream& os) const;
+
+ FieldType* operator->() { return this; }
+ const FieldType* operator->() const { return this; }
+
+ private:
+ explicit constexpr FieldType(Address ptr) : Object(ptr) {}
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_FIELD_TYPE_H_
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index d494f8d15b..6d2b42edbf 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -7,19 +7,19 @@
#include "src/objects/fixed-array.h"
-#include "src/base/tsan.h"
-#include "src/conversions.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions.h"
#include "src/objects/bigint.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/slots.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
+#include "src/sanitizer/tsan.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -30,7 +30,6 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(FixedArrayBase, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(FixedArray, FixedArrayBase)
OBJECT_CONSTRUCTORS_IMPL(FixedDoubleArray, FixedArrayBase)
-OBJECT_CONSTRUCTORS_IMPL(FixedTypedArrayBase, FixedArrayBase)
OBJECT_CONSTRUCTORS_IMPL(ArrayList, FixedArray)
OBJECT_CONSTRUCTORS_IMPL(ByteArray, FixedArrayBase)
OBJECT_CONSTRUCTORS_IMPL(TemplateList, FixedArray)
@@ -58,13 +57,13 @@ CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
-CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(TemplateList)
CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakArrayList)
SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+
SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset)
@@ -76,8 +75,6 @@ Object FixedArrayBase::unchecked_synchronized_length() const {
return ACQUIRE_READ_FIELD(*this, kLengthOffset);
}
-ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
-
ObjectSlot FixedArray::GetFirstElementAddress() {
return RawField(OffsetOfElementAt(0));
}
@@ -87,7 +84,7 @@ bool FixedArray::ContainsOnlySmisOrHoles() {
ObjectSlot current = GetFirstElementAddress();
for (int i = 0; i < length(); ++i, ++current) {
Object candidate = *current;
- if (!candidate->IsSmi() && candidate != the_hole) return false;
+ if (!candidate.IsSmi() && candidate != the_hole) return false;
}
return true;
}
@@ -98,25 +95,11 @@ Object FixedArray::get(int index) const {
}
Handle<Object> FixedArray::get(FixedArray array, int index, Isolate* isolate) {
- return handle(array->get(index), isolate);
-}
-
-template <class T>
-MaybeHandle<T> FixedArray::GetValue(Isolate* isolate, int index) const {
- Object obj = get(index);
- if (obj->IsUndefined(isolate)) return MaybeHandle<T>();
- return Handle<T>(T::cast(obj), isolate);
-}
-
-template <class T>
-Handle<T> FixedArray::GetValueChecked(Isolate* isolate, int index) const {
- Object obj = get(index);
- CHECK(!obj->IsUndefined(isolate));
- return Handle<T>(T::cast(obj), isolate);
+ return handle(array.get(index), isolate);
}
bool FixedArray::is_the_hole(Isolate* isolate, int index) {
- return get(index)->IsTheHole(isolate);
+ return get(index).IsTheHole(isolate);
}
void FixedArray::set(int index, Smi value) {
@@ -147,9 +130,9 @@ void FixedArray::set(int index, Object value, WriteBarrierMode mode) {
}
void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) {
- DCHECK_NE(array->map(), array->GetReadOnlyRoots().fixed_cow_array_map());
+ DCHECK_NE(array.map(), array.GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_GE(index, 0);
- DCHECK_LT(index, array->length());
+ DCHECK_LT(index, array.length());
DCHECK(!ObjectInYoungGeneration(value));
RELAXED_WRITE_FIELD(array, kHeaderSize + index * kTaggedSize, value);
}
@@ -202,16 +185,27 @@ ObjectSlot FixedArray::RawFieldOfElementAt(int index) {
return RawField(OffsetOfElementAt(index));
}
-void FixedArray::MoveElements(Heap* heap, int dst_index, int src_index, int len,
- WriteBarrierMode mode) {
+void FixedArray::MoveElements(Isolate* isolate, int dst_index, int src_index,
+ int len, WriteBarrierMode mode) {
+ if (len == 0) return;
+ DCHECK_LE(dst_index + len, length());
+ DCHECK_LE(src_index + len, length());
DisallowHeapAllocation no_gc;
- heap->MoveElements(*this, dst_index, src_index, len, mode);
+ ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
+ ObjectSlot src_slot(RawFieldOfElementAt(src_index));
+ isolate->heap()->MoveRange(*this, dst_slot, src_slot, len, mode);
}
-void FixedArray::CopyElements(Heap* heap, int dst_index, FixedArray src,
+void FixedArray::CopyElements(Isolate* isolate, int dst_index, FixedArray src,
int src_index, int len, WriteBarrierMode mode) {
+ if (len == 0) return;
+ DCHECK_LE(dst_index + len, length());
+ DCHECK_LE(src_index + len, src.length());
DisallowHeapAllocation no_gc;
- heap->CopyElements(*this, src, dst_index, src_index, len, mode);
+
+ ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
+ ObjectSlot src_slot(src.RawFieldOfElementAt(src_index));
+ isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
}
// Perform a binary search in a fixed array.
@@ -221,7 +215,7 @@ int BinarySearch(T* array, Name name, int valid_entries,
DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == nullptr);
int low = 0;
int high = array->number_of_entries() - 1;
- uint32_t hash = name->hash_field();
+ uint32_t hash = name.hash_field();
int limit = high;
DCHECK(low <= high);
@@ -229,7 +223,7 @@ int BinarySearch(T* array, Name name, int valid_entries,
while (low != high) {
int mid = low + (high - low) / 2;
Name mid_name = array->GetSortedKey(mid);
- uint32_t mid_hash = mid_name->hash_field();
+ uint32_t mid_hash = mid_name.hash_field();
if (mid_hash >= hash) {
high = mid;
@@ -241,7 +235,7 @@ int BinarySearch(T* array, Name name, int valid_entries,
for (; low <= limit; ++low) {
int sort_index = array->GetSortedKeyIndex(low);
Name entry = array->GetKey(sort_index);
- uint32_t current_hash = entry->hash_field();
+ uint32_t current_hash = entry.hash_field();
if (current_hash != hash) {
if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
*out_insertion_index = sort_index + (current_hash > hash ? 0 : 1);
@@ -268,12 +262,12 @@ template <SearchMode search_mode, typename T>
int LinearSearch(T* array, Name name, int valid_entries,
int* out_insertion_index) {
if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
- uint32_t hash = name->hash_field();
+ uint32_t hash = name.hash_field();
int len = array->number_of_entries();
for (int number = 0; number < len; number++) {
int sorted_index = array->GetSortedKeyIndex(number);
Name entry = array->GetKey(sorted_index);
- uint32_t current_hash = entry->hash_field();
+ uint32_t current_hash = entry.hash_field();
if (current_hash > hash) {
*out_insertion_index = sorted_index;
return T::kNotFound;
@@ -320,7 +314,7 @@ double FixedDoubleArray::get_scalar(int index) {
map() != GetReadOnlyRoots().fixed_array_map());
DCHECK(index >= 0 && index < this->length());
DCHECK(!is_the_hole(index));
- return READ_DOUBLE_FIELD(*this, kHeaderSize + index * kDoubleSize);
+ return ReadField<double>(kHeaderSize + index * kDoubleSize);
}
uint64_t FixedDoubleArray::get_representation(int index) {
@@ -328,15 +322,16 @@ uint64_t FixedDoubleArray::get_representation(int index) {
map() != GetReadOnlyRoots().fixed_array_map());
DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kDoubleSize;
- return READ_UINT64_FIELD(*this, offset);
+ // Bug(v8:8875): Doubles may be unaligned.
+ return ReadUnalignedValue<uint64_t>(field_address(offset));
}
Handle<Object> FixedDoubleArray::get(FixedDoubleArray array, int index,
Isolate* isolate) {
- if (array->is_the_hole(index)) {
+ if (array.is_the_hole(index)) {
return ReadOnlyRoots(isolate).the_hole_value_handle();
} else {
- return isolate->factory()->NewNumber(array->get_scalar(index));
+ return isolate->factory()->NewNumber(array.get_scalar(index));
}
}
@@ -345,9 +340,9 @@ void FixedDoubleArray::set(int index, double value) {
map() != GetReadOnlyRoots().fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
if (std::isnan(value)) {
- WRITE_DOUBLE_FIELD(*this, offset, std::numeric_limits<double>::quiet_NaN());
+ WriteField<double>(offset, std::numeric_limits<double>::quiet_NaN());
} else {
- WRITE_DOUBLE_FIELD(*this, offset, value);
+ WriteField<double>(offset, value);
}
DCHECK(!is_the_hole(index));
}
@@ -360,7 +355,7 @@ void FixedDoubleArray::set_the_hole(int index) {
DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() &&
map() != GetReadOnlyRoots().fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
- WRITE_UINT64_FIELD(*this, offset, kHoleNanInt64);
+ WriteUnalignedValue<uint64_t>(field_address(offset), kHoleNanInt64);
}
bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
@@ -371,8 +366,9 @@ bool FixedDoubleArray::is_the_hole(int index) {
return get_representation(index) == kHoleNanInt64;
}
-void FixedDoubleArray::MoveElements(Heap* heap, int dst_index, int src_index,
- int len, WriteBarrierMode mode) {
+void FixedDoubleArray::MoveElements(Isolate* isolate, int dst_index,
+ int src_index, int len,
+ WriteBarrierMode mode) {
DCHECK_EQ(SKIP_WRITE_BARRIER, mode);
double* data_start =
reinterpret_cast<double*>(FIELD_ADDR(*this, kHeaderSize));
@@ -414,6 +410,19 @@ MaybeObjectSlot WeakFixedArray::RawFieldOfElementAt(int index) {
return RawMaybeWeakField(OffsetOfElementAt(index));
}
+void WeakFixedArray::CopyElements(Isolate* isolate, int dst_index,
+ WeakFixedArray src, int src_index, int len,
+ WriteBarrierMode mode) {
+ if (len == 0) return;
+ DCHECK_LE(dst_index + len, length());
+ DCHECK_LE(src_index + len, src.length());
+ DisallowHeapAllocation no_gc;
+
+ MaybeObjectSlot dst_slot(data_start() + dst_index);
+ MaybeObjectSlot src_slot(src.data_start() + src_index);
+ isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
+}
+
MaybeObject WeakArrayList::Get(int index) const {
DCHECK(index >= 0 && index < this->capacity());
return RELAXED_READ_WEAK_FIELD(*this, OffsetOfElementAt(index));
@@ -431,10 +440,23 @@ MaybeObjectSlot WeakArrayList::data_start() {
return RawMaybeWeakField(kHeaderSize);
}
+void WeakArrayList::CopyElements(Isolate* isolate, int dst_index,
+ WeakArrayList src, int src_index, int len,
+ WriteBarrierMode mode) {
+ if (len == 0) return;
+ DCHECK_LE(dst_index + len, capacity());
+ DCHECK_LE(src_index + len, src.capacity());
+ DisallowHeapAllocation no_gc;
+
+ MaybeObjectSlot dst_slot(data_start() + dst_index);
+ MaybeObjectSlot src_slot(src.data_start() + src_index);
+ isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
+}
+
HeapObject WeakArrayList::Iterator::Next() {
if (!array_.is_null()) {
- while (index_ < array_->length()) {
- MaybeObject item = array_->Get(index_++);
+ while (index_ < array_.length()) {
+ MaybeObject item = array_.Get(index_++);
DCHECK(item->IsWeakOrCleared());
if (!item->IsCleared()) return item->GetHeapObjectAssumeWeak();
}
@@ -444,16 +466,16 @@ HeapObject WeakArrayList::Iterator::Next() {
}
int ArrayList::Length() const {
- if (FixedArray::cast(*this)->length() == 0) return 0;
- return Smi::ToInt(FixedArray::cast(*this)->get(kLengthIndex));
+ if (FixedArray::cast(*this).length() == 0) return 0;
+ return Smi::ToInt(FixedArray::cast(*this).get(kLengthIndex));
}
void ArrayList::SetLength(int length) {
- return FixedArray::cast(*this)->set(kLengthIndex, Smi::FromInt(length));
+ return FixedArray::cast(*this).set(kLengthIndex, Smi::FromInt(length));
}
Object ArrayList::Get(int index) const {
- return FixedArray::cast(*this)->get(kFirstIndex + index);
+ return FixedArray::cast(*this).get(kFirstIndex + index);
}
ObjectSlot ArrayList::Slot(int index) {
@@ -461,25 +483,25 @@ ObjectSlot ArrayList::Slot(int index) {
}
void ArrayList::Set(int index, Object obj, WriteBarrierMode mode) {
- FixedArray::cast(*this)->set(kFirstIndex + index, obj, mode);
+ FixedArray::cast(*this).set(kFirstIndex + index, obj, mode);
}
void ArrayList::Clear(int index, Object undefined) {
- DCHECK(undefined->IsUndefined());
- FixedArray::cast(*this)->set(kFirstIndex + index, undefined,
- SKIP_WRITE_BARRIER);
+ DCHECK(undefined.IsUndefined());
+ FixedArray::cast(*this).set(kFirstIndex + index, undefined,
+ SKIP_WRITE_BARRIER);
}
int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kTaggedSize); }
byte ByteArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize);
+ return ReadField<byte>(kHeaderSize + index * kCharSize);
}
void ByteArray::set(int index, byte value) {
DCHECK(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize, value);
+ WriteField<byte>(kHeaderSize + index * kCharSize, value);
}
void ByteArray::copy_in(int index, const byte* buffer, int length) {
@@ -498,22 +520,22 @@ void ByteArray::copy_out(int index, byte* buffer, int length) {
int ByteArray::get_int(int index) const {
DCHECK(index >= 0 && index < this->length() / kIntSize);
- return READ_INT_FIELD(*this, kHeaderSize + index * kIntSize);
+ return ReadField<int>(kHeaderSize + index * kIntSize);
}
void ByteArray::set_int(int index, int value) {
DCHECK(index >= 0 && index < this->length() / kIntSize);
- WRITE_INT_FIELD(*this, kHeaderSize + index * kIntSize, value);
+ WriteField<int>(kHeaderSize + index * kIntSize, value);
}
uint32_t ByteArray::get_uint32(int index) const {
DCHECK(index >= 0 && index < this->length() / kUInt32Size);
- return READ_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size);
+ return ReadField<uint32_t>(kHeaderSize + index * kUInt32Size);
}
void ByteArray::set_uint32(int index, uint32_t value) {
DCHECK(index >= 0 && index < this->length() / kUInt32Size);
- WRITE_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size, value);
+ WriteField<uint32_t>(kHeaderSize + index * kUInt32Size, value);
}
void ByteArray::clear_padding() {
@@ -559,385 +581,16 @@ int PodArray<T>::length() const {
return ByteArray::length() / sizeof(T);
}
-void* FixedTypedArrayBase::external_pointer() const {
- intptr_t ptr = READ_INTPTR_FIELD(*this, kExternalPointerOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-void FixedTypedArrayBase::set_external_pointer(void* value) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(*this, kExternalPointerOffset, ptr);
-}
-
-void* FixedTypedArrayBase::DataPtr() {
- return reinterpret_cast<void*>(
- base_pointer()->ptr() + reinterpret_cast<intptr_t>(external_pointer()));
-}
-
-int FixedTypedArrayBase::ElementSize(InstanceType type) {
- int element_size;
- switch (type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- element_size = sizeof(ctype); \
- break;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- default:
- UNREACHABLE();
- }
- return element_size;
-}
-
-int FixedTypedArrayBase::DataSize(InstanceType type) const {
- if (base_pointer() == Smi::kZero) return 0;
- return length() * ElementSize(type);
-}
-
-int FixedTypedArrayBase::DataSize() const {
- return DataSize(map()->instance_type());
-}
-
-size_t FixedTypedArrayBase::ByteLength() const {
- return static_cast<size_t>(length()) *
- static_cast<size_t>(ElementSize(map()->instance_type()));
-}
-
-int FixedTypedArrayBase::size() const {
- return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
-}
-
-int FixedTypedArrayBase::TypedArraySize(InstanceType type) const {
- return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type));
-}
-
-// static
-int FixedTypedArrayBase::TypedArraySize(InstanceType type, int length) {
- return OBJECT_POINTER_ALIGN(kDataOffset + length * ElementSize(type));
-}
-
-uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
-
-uint8_t Uint8ClampedArrayTraits::defaultValue() { return 0; }
-
-int8_t Int8ArrayTraits::defaultValue() { return 0; }
-
-uint16_t Uint16ArrayTraits::defaultValue() { return 0; }
-
-int16_t Int16ArrayTraits::defaultValue() { return 0; }
-
-uint32_t Uint32ArrayTraits::defaultValue() { return 0; }
-
-int32_t Int32ArrayTraits::defaultValue() { return 0; }
-
-float Float32ArrayTraits::defaultValue() {
- return std::numeric_limits<float>::quiet_NaN();
-}
-
-double Float64ArrayTraits::defaultValue() {
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- return FixedTypedArray<Traits>::get_scalar_from_data_ptr(DataPtr(), index);
-}
-
-// static
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::get_scalar_from_data_ptr(
- void* data_ptr, int index) {
- typename Traits::ElementType* ptr = reinterpret_cast<ElementType*>(data_ptr);
- // The JavaScript memory model allows for racy reads and writes to a
- // SharedArrayBuffer's backing store, which will always be a FixedTypedArray.
- // ThreadSanitizer will catch these racy accesses and warn about them, so we
- // disable TSAN for these reads and writes using annotations.
- //
- // We don't use relaxed atomics here, as it is not a requirement of the
- // JavaScript memory model to have tear-free reads of overlapping accesses,
- // and using relaxed atomics may introduce overhead.
- TSAN_ANNOTATE_IGNORE_READS_BEGIN;
- ElementType result;
- if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
- // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
- // fields (external pointers, doubles and BigInt data) are only kTaggedSize
- // aligned so we have to use unaligned pointer friendly way of accessing
- // them in order to avoid undefined behavior in C++ code.
- result = ReadUnalignedValue<ElementType>(reinterpret_cast<Address>(ptr) +
- index * sizeof(ElementType));
- } else {
- result = ptr[index];
- }
- TSAN_ANNOTATE_IGNORE_READS_END;
- return result;
-}
-
-template <class Traits>
-void FixedTypedArray<Traits>::set(int index, ElementType value) {
- CHECK((index >= 0) && (index < this->length()));
- // See the comment in FixedTypedArray<Traits>::get_scalar.
- auto* ptr = reinterpret_cast<ElementType*>(DataPtr());
- TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
- // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
- // fields (external pointers, doubles and BigInt data) are only kTaggedSize
- // aligned so we have to use unaligned pointer friendly way of accessing
- // them in order to avoid undefined behavior in C++ code.
- WriteUnalignedValue<ElementType>(
- reinterpret_cast<Address>(ptr) + index * sizeof(ElementType), value);
- } else {
- ptr[index] = value;
- }
- TSAN_ANNOTATE_IGNORE_WRITES_END;
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(int value) {
- return static_cast<ElementType>(value);
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(int value) {
- if (value < 0) return 0;
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(value);
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(int value) {
- UNREACHABLE();
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(int value) {
- UNREACHABLE();
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(uint32_t value) {
- return static_cast<ElementType>(value);
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(uint32_t value) {
- // We need this special case for Uint32 -> Uint8Clamped, because the highest
- // Uint32 values will be negative as an int, clamping to 0, rather than 255.
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(value);
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(uint32_t value) {
- UNREACHABLE();
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(uint32_t value) {
- UNREACHABLE();
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(double value) {
- return static_cast<ElementType>(DoubleToInt32(value));
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(double value) {
- // Handle NaNs and less than zero values which clamp to zero.
- if (!(value > 0)) return 0;
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(lrint(value));
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(double value) {
- UNREACHABLE();
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(double value) {
- UNREACHABLE();
-}
-
-template <>
-inline float FixedTypedArray<Float32ArrayTraits>::from(double value) {
- using limits = std::numeric_limits<float>;
- if (value > limits::max()) return limits::infinity();
- if (value < limits::lowest()) return -limits::infinity();
- return static_cast<float>(value);
-}
-
-template <>
-inline double FixedTypedArray<Float64ArrayTraits>::from(double value) {
- return value;
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(int64_t value) {
- UNREACHABLE();
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(uint64_t value) {
- UNREACHABLE();
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(int64_t value) {
- return value;
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(uint64_t value) {
- return value;
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(int64_t value) {
- return static_cast<uint64_t>(value);
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(uint64_t value) {
- return static_cast<int64_t>(value);
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::FromHandle(
- Handle<Object> value, bool* lossless) {
- if (value->IsSmi()) {
- return from(Smi::ToInt(*value));
- }
- DCHECK(value->IsHeapNumber());
- return from(HeapNumber::cast(*value)->value());
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::FromHandle(
- Handle<Object> value, bool* lossless) {
- DCHECK(value->IsBigInt());
- return BigInt::cast(*value)->AsInt64(lossless);
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::FromHandle(
- Handle<Object> value, bool* lossless) {
- DCHECK(value->IsBigInt());
- return BigInt::cast(*value)->AsUint64(lossless);
-}
-
-template <class Traits>
-Handle<Object> FixedTypedArray<Traits>::get(Isolate* isolate,
- FixedTypedArray<Traits> array,
- int index) {
- return Traits::ToHandle(isolate, array->get_scalar(index));
-}
-
-template <class Traits>
-void FixedTypedArray<Traits>::SetValue(uint32_t index, Object value) {
- ElementType cast_value = Traits::defaultValue();
- if (value->IsSmi()) {
- int int_value = Smi::ToInt(value);
- cast_value = from(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = from(double_value);
- } else {
- // Clamp undefined to the default value. All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- set(index, cast_value);
-}
-
-template <>
-inline void FixedTypedArray<BigInt64ArrayTraits>::SetValue(uint32_t index,
- Object value) {
- DCHECK(value->IsBigInt());
- set(index, BigInt::cast(value)->AsInt64());
-}
-
-template <>
-inline void FixedTypedArray<BigUint64ArrayTraits>::SetValue(uint32_t index,
- Object value) {
- DCHECK(value->IsBigInt());
- set(index, BigInt::cast(value)->AsUint64());
-}
-
-Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-Handle<Object> Uint8ClampedArrayTraits::ToHandle(Isolate* isolate,
- uint8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-Handle<Object> Int8ArrayTraits::ToHandle(Isolate* isolate, int8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-Handle<Object> Uint16ArrayTraits::ToHandle(Isolate* isolate, uint16_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-Handle<Object> Int16ArrayTraits::ToHandle(Isolate* isolate, int16_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-Handle<Object> Uint32ArrayTraits::ToHandle(Isolate* isolate, uint32_t scalar) {
- return isolate->factory()->NewNumberFromUint(scalar);
-}
-
-Handle<Object> Int32ArrayTraits::ToHandle(Isolate* isolate, int32_t scalar) {
- return isolate->factory()->NewNumberFromInt(scalar);
-}
-
-Handle<Object> Float32ArrayTraits::ToHandle(Isolate* isolate, float scalar) {
- return isolate->factory()->NewNumber(scalar);
-}
-
-Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
- return isolate->factory()->NewNumber(scalar);
-}
-
-Handle<Object> BigInt64ArrayTraits::ToHandle(Isolate* isolate, int64_t scalar) {
- return BigInt::FromInt64(isolate, scalar);
-}
-
-Handle<Object> BigUint64ArrayTraits::ToHandle(Isolate* isolate,
- uint64_t scalar) {
- return BigInt::FromUint64(isolate, scalar);
-}
-
-// static
-template <class Traits>
-STATIC_CONST_MEMBER_DEFINITION const InstanceType
- FixedTypedArray<Traits>::kInstanceType;
-
-template <class Traits>
-FixedTypedArray<Traits>::FixedTypedArray(Address ptr)
- : FixedTypedArrayBase(ptr) {
- DCHECK(IsHeapObject() && map()->instance_type() == Traits::kInstanceType);
-}
-
-template <class Traits>
-FixedTypedArray<Traits> FixedTypedArray<Traits>::cast(Object object) {
- return FixedTypedArray<Traits>(object.ptr());
-}
-
int TemplateList::length() const {
- return Smi::ToInt(FixedArray::cast(*this)->get(kLengthIndex));
+ return Smi::ToInt(FixedArray::cast(*this).get(kLengthIndex));
}
Object TemplateList::get(int index) const {
- return FixedArray::cast(*this)->get(kFirstElementIndex + index);
+ return FixedArray::cast(*this).get(kFirstElementIndex + index);
}
void TemplateList::set(int index, Object value) {
- FixedArray::cast(*this)->set(kFirstElementIndex + index, value);
+ FixedArray::cast(*this).set(kFirstElementIndex + index, value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index e3ab45ba0e..02f26502b2 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_FIXED_ARRAY_H_
#define V8_OBJECTS_FIXED_ARRAY_H_
-#include "src/maybe-handles.h"
+#include "src/handles/maybe-handles.h"
#include "src/objects/instance-type.h"
#include "src/objects/smi.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -100,8 +100,6 @@ class FixedArrayBase : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_FIXED_ARRAY_BASE_FIELDS)
- static const int kHeaderSize = kSize;
-
protected:
// Special-purpose constructor for subclasses that have fast paths where
// their ptr() is a Smi.
@@ -117,11 +115,6 @@ class FixedArray : public FixedArrayBase {
inline Object get(int index) const;
static inline Handle<Object> get(FixedArray array, int index,
Isolate* isolate);
- template <class T>
- MaybeHandle<T> GetValue(Isolate* isolate, int index) const;
-
- template <class T>
- Handle<T> GetValueChecked(Isolate* isolate, int index) const;
// Return a grown copy if the index is bigger than the array's length.
V8_EXPORT_PRIVATE static Handle<FixedArray> SetAndGrow(
@@ -147,16 +140,14 @@ class FixedArray : public FixedArrayBase {
inline ObjectSlot GetFirstElementAddress();
inline bool ContainsOnlySmisOrHoles();
- // Returns true iff the elements are Numbers and sorted ascending.
- bool ContainsSortedNumbers();
// Gives access to raw memory which stores the array's data.
inline ObjectSlot data_start();
- inline void MoveElements(Heap* heap, int dst_index, int src_index, int len,
- WriteBarrierMode mode);
+ inline void MoveElements(Isolate* isolate, int dst_index, int src_index,
+ int len, WriteBarrierMode mode);
- inline void CopyElements(Heap* heap, int dst_index, FixedArray src,
+ inline void CopyElements(Isolate* isolate, int dst_index, FixedArray src,
int src_index, int len, WriteBarrierMode mode);
inline void FillWithHoles(int from, int to);
@@ -201,6 +192,8 @@ class FixedArray : public FixedArrayBase {
using BodyDescriptor = FlexibleBodyDescriptor<kHeaderSize>;
+ static constexpr int kObjectsOffset = kHeaderSize;
+
protected:
// Set operation on FixedArray without using write barriers. Can
// only be used for storing old space objects or smis.
@@ -243,8 +236,8 @@ class FixedDoubleArray : public FixedArrayBase {
return kHeaderSize + length * kDoubleSize;
}
- inline void MoveElements(Heap* heap, int dst_index, int src_index, int len,
- WriteBarrierMode mode);
+ inline void MoveElements(Isolate* isolate, int dst_index, int src_index,
+ int len, WriteBarrierMode mode);
inline void FillWithHoles(int from, int to);
@@ -296,6 +289,9 @@ class WeakFixedArray : public HeapObject {
inline MaybeObjectSlot RawFieldOfElementAt(int index);
+ inline void CopyElements(Isolate* isolate, int dst_index, WeakFixedArray src,
+ int src_index, int len, WriteBarrierMode mode);
+
DECL_PRINTER(WeakFixedArray)
DECL_VERIFIER(WeakFixedArray)
@@ -354,6 +350,9 @@ class WeakArrayList : public HeapObject {
// Gives access to raw memory which stores the array's data.
inline MaybeObjectSlot data_start();
+ inline void CopyElements(Isolate* isolate, int dst_index, WeakArrayList src,
+ int src_index, int len, WriteBarrierMode mode);
+
V8_EXPORT_PRIVATE bool IsFull();
DECL_INT_ACCESSORS(capacity)
@@ -577,128 +576,6 @@ class PodArray : public ByteArray {
OBJECT_CONSTRUCTORS(PodArray<T>, ByteArray);
};
-class FixedTypedArrayBase : public FixedArrayBase {
- public:
- // [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr.
- DECL_ACCESSORS(base_pointer, Object)
-
- // [external_pointer]: Contains the offset between base_pointer and the start
- // of the data. If the base_pointer is a nullptr, the external_pointer
- // therefore points to the actual backing store.
- DECL_PRIMITIVE_ACCESSORS(external_pointer, void*)
-
- // Dispatched behavior.
- DECL_CAST(FixedTypedArrayBase)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
- TORQUE_GENERATED_FIXED_TYPED_ARRAY_BASE_FIELDS)
- static const int kHeaderSize = kSize;
-
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
- // is only kTaggedSize aligned but we can keep using unaligned access since
- // both x64 and arm64 architectures (where pointer compression supported)
- // allow unaligned access to doubles.
- STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
-#else
- STATIC_ASSERT(IsAligned(kHeaderSize, kDoubleAlignment));
-#endif
-
- static const int kDataOffset = kHeaderSize;
-
- static const int kMaxElementSize = 8;
-
-#ifdef V8_HOST_ARCH_32_BIT
- static const size_t kMaxByteLength = std::numeric_limits<size_t>::max();
-#else
- static const size_t kMaxByteLength =
- static_cast<size_t>(Smi::kMaxValue) * kMaxElementSize;
-#endif // V8_HOST_ARCH_32_BIT
-
- static const size_t kMaxLength = Smi::kMaxValue;
-
- class BodyDescriptor;
-
- inline int size() const;
-
- static inline int TypedArraySize(InstanceType type, int length);
- inline int TypedArraySize(InstanceType type) const;
-
- // Use with care: returns raw pointer into heap.
- inline void* DataPtr();
-
- inline int DataSize() const;
-
- inline size_t ByteLength() const;
-
- static inline intptr_t ExternalPointerValueForOnHeapArray() {
- return FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
- }
-
- static inline void* ExternalPointerPtrForOnHeapArray() {
- return reinterpret_cast<void*>(ExternalPointerValueForOnHeapArray());
- }
-
- private:
- static inline int ElementSize(InstanceType type);
-
- inline int DataSize(InstanceType type) const;
-
- OBJECT_CONSTRUCTORS(FixedTypedArrayBase, FixedArrayBase);
-};
-
-template <class Traits>
-class FixedTypedArray : public FixedTypedArrayBase {
- public:
- using ElementType = typename Traits::ElementType;
- static const InstanceType kInstanceType = Traits::kInstanceType;
-
- DECL_CAST(FixedTypedArray<Traits>)
-
- static inline ElementType get_scalar_from_data_ptr(void* data_ptr, int index);
- inline ElementType get_scalar(int index);
- static inline Handle<Object> get(Isolate* isolate, FixedTypedArray array,
- int index);
- inline void set(int index, ElementType value);
-
- static inline ElementType from(int value);
- static inline ElementType from(uint32_t value);
- static inline ElementType from(double value);
- static inline ElementType from(int64_t value);
- static inline ElementType from(uint64_t value);
-
- static inline ElementType FromHandle(Handle<Object> value,
- bool* lossless = nullptr);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- inline void SetValue(uint32_t index, Object value);
-
- DECL_PRINTER(FixedTypedArray)
- DECL_VERIFIER(FixedTypedArray)
-
- private:
- OBJECT_CONSTRUCTORS(FixedTypedArray, FixedTypedArrayBase);
-};
-
-#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType) \
- STATIC_ASSERT(sizeof(elementType) <= FixedTypedArrayBase::kMaxElementSize); \
- class Type##ArrayTraits { \
- public: /* NOLINT */ \
- using ElementType = elementType; \
- static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
- static const char* ArrayTypeName() { return "Fixed" #Type "Array"; } \
- static inline Handle<Object> ToHandle(Isolate* isolate, \
- elementType scalar); \
- static inline elementType defaultValue(); \
- }; \
- \
- using Fixed##Type##Array = FixedTypedArray<Type##ArrayTraits>;
-
-TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
-
-#undef FIXED_TYPED_ARRAY_TRAITS
-
class TemplateList : public FixedArray {
public:
static Handle<TemplateList> New(Isolate* isolate, int size);
diff --git a/deps/v8/src/objects/foreign-inl.h b/deps/v8/src/objects/foreign-inl.h
index 0ac9f652bb..fc93b66a0a 100644
--- a/deps/v8/src/objects/foreign-inl.h
+++ b/deps/v8/src/objects/foreign-inl.h
@@ -8,7 +8,7 @@
#include "src/objects/foreign.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,15 +23,15 @@ CAST_ACCESSOR(Foreign)
// static
bool Foreign::IsNormalized(Object value) {
if (value == Smi::kZero) return true;
- return Foreign::cast(value)->foreign_address() != kNullAddress;
+ return Foreign::cast(value).foreign_address() != kNullAddress;
}
Address Foreign::foreign_address() {
- return READ_UINTPTR_FIELD(*this, kForeignAddressOffset);
+ return ReadField<Address>(kForeignAddressOffset);
}
void Foreign::set_foreign_address(Address value) {
- WRITE_UINTPTR_FIELD(*this, kForeignAddressOffset, value);
+ WriteField<Address>(kForeignAddressOffset, value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/foreign.h b/deps/v8/src/objects/foreign.h
index 629d549b6d..617ca0e34f 100644
--- a/deps/v8/src/objects/foreign.h
+++ b/deps/v8/src/objects/foreign.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_FOREIGN_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/frame-array-inl.h b/deps/v8/src/objects/frame-array-inl.h
index 78d08da00f..5b342c64c0 100644
--- a/deps/v8/src/objects/frame-array-inl.h
+++ b/deps/v8/src/objects/frame-array-inl.h
@@ -33,17 +33,17 @@ FRAME_ARRAY_FIELD_LIST(DEFINE_FRAME_ARRAY_ACCESSORS)
#undef DEFINE_FRAME_ARRAY_ACCESSORS
bool FrameArray::IsWasmFrame(int frame_ix) const {
- const int flags = Flags(frame_ix)->value();
+ const int flags = Flags(frame_ix).value();
return (flags & kIsWasmFrame) != 0;
}
bool FrameArray::IsWasmInterpretedFrame(int frame_ix) const {
- const int flags = Flags(frame_ix)->value();
+ const int flags = Flags(frame_ix).value();
return (flags & kIsWasmInterpretedFrame) != 0;
}
bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const {
- const int flags = Flags(frame_ix)->value();
+ const int flags = Flags(frame_ix).value();
return (flags & kIsAsmJsWasmFrame) != 0;
}
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index 438718e25f..42750cf69c 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_FRAME_ARRAY_H_
#define V8_OBJECTS_FRAME_ARRAY_H_
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/wasm/wasm-objects.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
index b36c4e154f..bea8257515 100644
--- a/deps/v8/src/objects/free-space-inl.h
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -7,10 +7,10 @@
#include "src/objects/free-space.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -30,7 +30,7 @@ FreeSpace FreeSpace::next() {
Heap* heap = GetHeapFromWritableObject(*this);
Object free_space_map =
Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
- DCHECK_IMPLIES(!map_slot().contains_value(free_space_map->ptr()),
+ DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
#endif
@@ -43,7 +43,7 @@ void FreeSpace::set_next(FreeSpace next) {
Heap* heap = GetHeapFromWritableObject(*this);
Object free_space_map =
Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
- DCHECK_IMPLIES(!map_slot().contains_value(free_space_map->ptr()),
+ DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
#endif
@@ -53,7 +53,7 @@ void FreeSpace::set_next(FreeSpace next) {
FreeSpace FreeSpace::cast(HeapObject o) {
SLOW_DCHECK(!GetHeapFromWritableObject(o)->deserialization_complete() ||
- o->IsFreeSpace());
+ o.IsFreeSpace());
return bit_cast<FreeSpace>(o);
}
diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h
index f1f7bb56c5..38f5794646 100644
--- a/deps/v8/src/objects/free-space.h
+++ b/deps/v8/src/objects/free-space.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_FREE_SPACE_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/function-kind.h b/deps/v8/src/objects/function-kind.h
new file mode 100644
index 0000000000..4a1819813c
--- /dev/null
+++ b/deps/v8/src/objects/function-kind.h
@@ -0,0 +1,194 @@
+
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FUNCTION_KIND_H_
+#define V8_OBJECTS_FUNCTION_KIND_H_
+
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+enum FunctionKind : uint8_t {
+ // BEGIN constructable functions
+ kNormalFunction,
+ kModule,
+ // BEGIN class constructors
+ // BEGIN base constructors
+ kBaseConstructor,
+ // BEGIN default constructors
+ kDefaultBaseConstructor,
+ // END base constructors
+ // BEGIN derived cosntructors
+ kDefaultDerivedConstructor,
+ // END default constructors
+ kDerivedConstructor,
+ // END derived costructors
+ // END class cosntructors
+ // END constructable functions.
+ // BEGIN accessors
+ kGetterFunction,
+ kSetterFunction,
+ // END accessors
+ // BEGIN arrow functions
+ kArrowFunction,
+ // BEGIN async functions
+ kAsyncArrowFunction,
+ // END arrow functions
+ kAsyncFunction,
+ // BEGIN concise methods 1
+ kAsyncConciseMethod,
+ // BEGIN generators
+ kAsyncConciseGeneratorMethod,
+ // END concise methods 1
+ kAsyncGeneratorFunction,
+ // END async functions
+ kGeneratorFunction,
+ // BEGIN concise methods 2
+ kConciseGeneratorMethod,
+ // END generators
+ kConciseMethod,
+ kClassMembersInitializerFunction,
+ // END concise methods 2
+
+ kLastFunctionKind = kClassMembersInitializerFunction,
+};
+
+inline bool IsArrowFunction(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kArrowFunction,
+ FunctionKind::kAsyncArrowFunction);
+}
+
+inline bool IsModule(FunctionKind kind) {
+ return kind == FunctionKind::kModule;
+}
+
+inline bool IsAsyncGeneratorFunction(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kAsyncConciseGeneratorMethod,
+ FunctionKind::kAsyncGeneratorFunction);
+}
+
+inline bool IsGeneratorFunction(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kAsyncConciseGeneratorMethod,
+ FunctionKind::kConciseGeneratorMethod);
+}
+
+inline bool IsAsyncFunction(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kAsyncArrowFunction,
+ FunctionKind::kAsyncGeneratorFunction);
+}
+
+inline bool IsResumableFunction(FunctionKind kind) {
+ return IsGeneratorFunction(kind) || IsAsyncFunction(kind) || IsModule(kind);
+}
+
+inline bool IsConciseMethod(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kAsyncConciseMethod,
+ FunctionKind::kAsyncConciseGeneratorMethod) ||
+ IsInRange(kind, FunctionKind::kConciseGeneratorMethod,
+ FunctionKind::kClassMembersInitializerFunction);
+}
+
+inline bool IsStrictFunctionWithoutPrototype(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kGetterFunction,
+ FunctionKind::kAsyncArrowFunction) ||
+ IsInRange(kind, FunctionKind::kAsyncConciseMethod,
+ FunctionKind::kAsyncConciseGeneratorMethod) ||
+ IsInRange(kind, FunctionKind::kConciseGeneratorMethod,
+ FunctionKind::kClassMembersInitializerFunction);
+}
+
+inline bool IsGetterFunction(FunctionKind kind) {
+ return kind == FunctionKind::kGetterFunction;
+}
+
+inline bool IsSetterFunction(FunctionKind kind) {
+ return kind == FunctionKind::kSetterFunction;
+}
+
+inline bool IsAccessorFunction(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kGetterFunction,
+ FunctionKind::kSetterFunction);
+}
+
+inline bool IsDefaultConstructor(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kDefaultBaseConstructor,
+ FunctionKind::kDefaultDerivedConstructor);
+}
+
+inline bool IsBaseConstructor(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kBaseConstructor,
+ FunctionKind::kDefaultBaseConstructor);
+}
+
+inline bool IsDerivedConstructor(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kDefaultDerivedConstructor,
+ FunctionKind::kDerivedConstructor);
+}
+
+inline bool IsClassConstructor(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kBaseConstructor,
+ FunctionKind::kDerivedConstructor);
+}
+
+inline bool IsClassMembersInitializerFunction(FunctionKind kind) {
+ return kind == FunctionKind::kClassMembersInitializerFunction;
+}
+
+inline bool IsConstructable(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kNormalFunction,
+ FunctionKind::kDerivedConstructor);
+}
+
+inline const char* FunctionKind2String(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kNormalFunction:
+ return "NormalFunction";
+ case FunctionKind::kArrowFunction:
+ return "ArrowFunction";
+ case FunctionKind::kGeneratorFunction:
+ return "GeneratorFunction";
+ case FunctionKind::kConciseMethod:
+ return "ConciseMethod";
+ case FunctionKind::kDerivedConstructor:
+ return "DerivedConstructor";
+ case FunctionKind::kBaseConstructor:
+ return "BaseConstructor";
+ case FunctionKind::kGetterFunction:
+ return "GetterFunction";
+ case FunctionKind::kSetterFunction:
+ return "SetterFunction";
+ case FunctionKind::kAsyncFunction:
+ return "AsyncFunction";
+ case FunctionKind::kModule:
+ return "Module";
+ case FunctionKind::kClassMembersInitializerFunction:
+ return "ClassMembersInitializerFunction";
+ case FunctionKind::kDefaultBaseConstructor:
+ return "DefaultBaseConstructor";
+ case FunctionKind::kDefaultDerivedConstructor:
+ return "DefaultDerivedConstructor";
+ case FunctionKind::kAsyncArrowFunction:
+ return "AsyncArrowFunction";
+ case FunctionKind::kAsyncConciseMethod:
+ return "AsyncConciseMethod";
+ case FunctionKind::kConciseGeneratorMethod:
+ return "ConciseGeneratorMethod";
+ case FunctionKind::kAsyncConciseGeneratorMethod:
+ return "AsyncConciseGeneratorMethod";
+ case FunctionKind::kAsyncGeneratorFunction:
+ return "AsyncGeneratorFunction";
+ }
+ UNREACHABLE();
+}
+
+inline std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
+ return os << FunctionKind2String(kind);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_FUNCTION_KIND_H_
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index d65d9de083..77453721ae 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -8,10 +8,10 @@
#include "src/objects/hash-table.h"
#include "src/heap/heap.h"
-#include "src/objects-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/heap-object-inl.h"
-#include "src/roots-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/roots/roots-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -193,7 +193,7 @@ bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key) {
Object hash = key->GetHash();
- if (!hash->IsSmi()) return false;
+ if (!hash.IsSmi()) return false;
return FindEntry(ReadOnlyRoots(isolate), key, Smi::ToInt(hash)) != kNotFound;
}
@@ -207,7 +207,7 @@ uint32_t ObjectHashTableShape::Hash(Isolate* isolate, Handle<Object> key) {
uint32_t ObjectHashTableShape::HashForObject(ReadOnlyRoots roots,
Object other) {
- return Smi::ToInt(other->GetHash());
+ return Smi::ToInt(other.GetHash());
}
} // namespace internal
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 0c83d01b42..610dc9d28e 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -8,10 +8,10 @@
#include "src/base/compiler-specific.h"
#include "src/base/export-template.h"
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
#include "src/objects/smi.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -150,7 +150,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
// Find entry for key otherwise return kNotFound.
inline int FindEntry(ReadOnlyRoots roots, Key key, int32_t hash);
- int FindEntry(Isolate* isolate, Key key);
+ inline int FindEntry(Isolate* isolate, Key key);
// Rehashes the table in-place.
void Rehash(ReadOnlyRoots roots);
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
index ad82296bce..3986e9146c 100644
--- a/deps/v8/src/objects/heap-number-inl.h
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -7,8 +7,8 @@
#include "src/objects/heap-number.h"
-#include "src/objects-inl.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,30 +23,28 @@ OBJECT_CONSTRUCTORS_IMPL(MutableHeapNumber, HeapNumberBase)
CAST_ACCESSOR(HeapNumber)
CAST_ACCESSOR(MutableHeapNumber)
-double HeapNumberBase::value() const {
- return READ_DOUBLE_FIELD(*this, kValueOffset);
-}
+double HeapNumberBase::value() const { return ReadField<double>(kValueOffset); }
void HeapNumberBase::set_value(double value) {
- WRITE_DOUBLE_FIELD(*this, kValueOffset, value);
+ WriteField<double>(kValueOffset, value);
}
uint64_t HeapNumberBase::value_as_bits() const {
- return READ_UINT64_FIELD(*this, kValueOffset);
+ // Bug(v8:8875): HeapNumber's double may be unaligned.
+ return ReadUnalignedValue<uint64_t>(field_address(kValueOffset));
}
void HeapNumberBase::set_value_as_bits(uint64_t bits) {
- WRITE_UINT64_FIELD(*this, kValueOffset, bits);
+ WriteUnalignedValue<uint64_t>(field_address(kValueOffset), bits);
}
int HeapNumberBase::get_exponent() {
- return ((READ_INT_FIELD(*this, kExponentOffset) & kExponentMask) >>
- kExponentShift) -
+ return ((ReadField<int>(kExponentOffset) & kExponentMask) >> kExponentShift) -
kExponentBias;
}
int HeapNumberBase::get_sign() {
- return READ_INT_FIELD(*this, kExponentOffset) & kSignMask;
+ return ReadField<int>(kExponentOffset) & kSignMask;
}
} // namespace internal
diff --git a/deps/v8/src/objects/heap-object-inl.h b/deps/v8/src/objects/heap-object-inl.h
index be97f8bb79..3d5deeff63 100644
--- a/deps/v8/src/objects/heap-object-inl.h
+++ b/deps/v8/src/objects/heap-object-inl.h
@@ -9,7 +9,7 @@
#include "src/heap/heap-write-barrier-inl.h"
// TODO(jkummerow): Get rid of this by moving NROSO::GetIsolate elsewhere.
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -17,9 +17,6 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object)
-CAST_ACCESSOR(HeapObject)
-
HeapObject::HeapObject(Address ptr, AllowInlineSmiStorage allow_smi)
: Object(ptr) {
SLOW_DCHECK(
@@ -28,12 +25,6 @@ HeapObject::HeapObject(Address ptr, AllowInlineSmiStorage allow_smi)
}
// static
-HeapObject HeapObject::FromAddress(Address address) {
- DCHECK_TAG_ALIGNED(address);
- return HeapObject(address + kHeapObjectTag);
-}
-
-// static
Heap* NeverReadOnlySpaceObject::GetHeap(const HeapObject object) {
return GetHeapFromWritableObject(object);
}
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index f42dc05b81..9ca51bdda1 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_HEAP_OBJECT_H_
#define V8_OBJECTS_HEAP_OBJECT_H_
-#include "src/globals.h"
-#include "src/roots.h"
+#include "src/common/globals.h"
+#include "src/roots/roots.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -85,7 +85,10 @@ class HeapObject : public Object {
#undef DECL_STRUCT_PREDICATE
// Converts an address to a HeapObject pointer.
- static inline HeapObject FromAddress(Address address);
+ static inline HeapObject FromAddress(Address address) {
+ DCHECK_TAG_ALIGNED(address);
+ return HeapObject(address + kHeapObjectTag);
+ }
// Returns the address of this HeapObject.
inline Address address() const { return ptr() - kHeapObjectTag; }
@@ -197,6 +200,9 @@ class HeapObject : public Object {
OBJECT_CONSTRUCTORS(HeapObject, Object);
};
+OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object)
+CAST_ACCESSOR(HeapObject)
+
// Helper class for objects that can never be in RO space.
class NeverReadOnlySpaceObject {
public:
diff --git a/deps/v8/src/objects/instance-type-inl.h b/deps/v8/src/objects/instance-type-inl.h
index 5925c6aa92..2f867411f2 100644
--- a/deps/v8/src/objects/instance-type-inl.h
+++ b/deps/v8/src/objects/instance-type-inl.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_INSTANCE_TYPE_INL_H_
#include "src/objects/map-inl.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -19,11 +19,6 @@ namespace InstanceTypeChecker {
// Define type checkers for classes with single instance type.
INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER)
-#define TYPED_ARRAY_INSTANCE_TYPE_CHECKER(Type, type, TYPE, ctype) \
- INSTANCE_TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
-TYPED_ARRAYS(TYPED_ARRAY_INSTANCE_TYPE_CHECKER)
-#undef TYPED_ARRAY_INSTANCE_TYPE_CHECKER
-
#define STRUCT_INSTANCE_TYPE_CHECKER(TYPE, Name, name) \
INSTANCE_TYPE_CHECKER(Name, TYPE)
STRUCT_LIST(STRUCT_INSTANCE_TYPE_CHECKER)
@@ -40,8 +35,7 @@ INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE)
V8_INLINE bool IsFixedArrayBase(InstanceType instance_type) {
return IsFixedArray(instance_type) || IsFixedDoubleArray(instance_type) ||
- IsFixedTypedArrayBase(instance_type) || IsByteArray(instance_type) ||
- IsBytecodeArray(instance_type);
+ IsByteArray(instance_type) || IsBytecodeArray(instance_type);
}
V8_INLINE bool IsHeapObject(InstanceType instance_type) { return true; }
@@ -69,11 +63,6 @@ V8_INLINE bool IsJSReceiver(InstanceType instance_type) {
// pointer rather than looking up the instance type.
INSTANCE_TYPE_CHECKERS(TYPE_CHECKER)
-#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype) \
- TYPE_CHECKER(Fixed##Type##Array)
-TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
-#undef TYPED_ARRAY_TYPE_CHECKER
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index edbc428a5d..559ed34784 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_INSTANCE_TYPE_H_
#define V8_OBJECTS_INSTANCE_TYPE_H_
-#include "src/elements-kind.h"
-#include "src/objects-definitions.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/objects-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -131,17 +131,6 @@ enum InstanceType : uint16_t {
BYTE_ARRAY_TYPE,
BYTECODE_ARRAY_TYPE,
FREE_SPACE_TYPE,
- FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
- FIXED_UINT8_ARRAY_TYPE,
- FIXED_INT16_ARRAY_TYPE,
- FIXED_UINT16_ARRAY_TYPE,
- FIXED_INT32_ARRAY_TYPE,
- FIXED_UINT32_ARRAY_TYPE,
- FIXED_FLOAT32_ARRAY_TYPE,
- FIXED_FLOAT64_ARRAY_TYPE,
- FIXED_UINT8_CLAMPED_ARRAY_TYPE,
- FIXED_BIGINT64_ARRAY_TYPE,
- FIXED_BIGUINT64_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
FIXED_DOUBLE_ARRAY_TYPE,
FEEDBACK_METADATA_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
@@ -168,14 +157,18 @@ enum InstanceType : uint16_t {
PROMISE_REACTION_TYPE,
PROTOTYPE_INFO_TYPE,
SCRIPT_TYPE,
+ SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE,
STACK_FRAME_INFO_TYPE,
STACK_TRACE_FRAME_TYPE,
+ TEMPLATE_OBJECT_DESCRIPTION_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ WASM_CAPI_FUNCTION_DATA_TYPE,
WASM_DEBUG_INFO_TYPE,
WASM_EXCEPTION_TAG_TYPE,
WASM_EXPORTED_FUNCTION_DATA_TYPE,
+ WASM_JS_FUNCTION_DATA_TYPE,
CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
CALLBACK_TASK_TYPE,
@@ -190,14 +183,14 @@ enum InstanceType : uint16_t {
FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
CLOSURE_FEEDBACK_CELL_ARRAY_TYPE,
- HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE
- ORDERED_HASH_MAP_TYPE, // FIRST_DICTIONARY_TYPE
+ HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE
+ ORDERED_HASH_MAP_TYPE,
ORDERED_HASH_SET_TYPE,
ORDERED_NAME_DICTIONARY_TYPE,
NAME_DICTIONARY_TYPE,
GLOBAL_DICTIONARY_TYPE,
NUMBER_DICTIONARY_TYPE,
- SIMPLE_NUMBER_DICTIONARY_TYPE, // LAST_DICTIONARY_TYPE
+ SIMPLE_NUMBER_DICTIONARY_TYPE,
STRING_TABLE_TYPE,
EPHEMERON_HASH_TABLE_TYPE, // LAST_HASH_TABLE_TYPE
SCOPE_INFO_TYPE,
@@ -330,9 +323,6 @@ enum InstanceType : uint16_t {
// Boundaries for testing if given HeapObject is a subclass of HashTable
FIRST_HASH_TABLE_TYPE = HASH_TABLE_TYPE,
LAST_HASH_TABLE_TYPE = EPHEMERON_HASH_TABLE_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of Dictionary
- FIRST_DICTIONARY_TYPE = ORDERED_HASH_MAP_TYPE,
- LAST_DICTIONARY_TYPE = SIMPLE_NUMBER_DICTIONARY_TYPE,
// Boundaries for testing if given HeapObject is a subclass of WeakFixedArray.
FIRST_WEAK_FIXED_ARRAY_TYPE = WEAK_FIXED_ARRAY_TYPE,
LAST_WEAK_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
@@ -342,9 +332,6 @@ enum InstanceType : uint16_t {
// Boundaries for testing if given HeapObject is a subclass of Microtask.
FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
LAST_MICROTASK_TYPE = FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE,
- // Boundaries for testing for a fixed typed array.
- FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
- LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_BIGUINT64_ARRAY_TYPE,
// Boundary for promotion to old space.
LAST_DATA_TYPE = FILLER_TYPE,
// Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
@@ -385,6 +372,10 @@ STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
+// Make sure it doesn't matter whether we sign-extend or zero-extend these
+// values, because Torque treats InstanceType as signed.
+STATIC_ASSERT(LAST_TYPE < 1 << 15);
+
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
InstanceType instance_type);
@@ -471,11 +462,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE) \
V(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE) \
V(SmallOrderedNameDictionary, SMALL_ORDERED_NAME_DICTIONARY_TYPE) \
- V(SourcePositionTableWithFrameCache, TUPLE2_TYPE) \
V(StoreHandler, STORE_HANDLER_TYPE) \
V(StringTable, STRING_TABLE_TYPE) \
V(Symbol, SYMBOL_TYPE) \
- V(TemplateObjectDescription, TUPLE2_TYPE) \
V(TransitionArray, TRANSITION_ARRAY_TYPE) \
V(UncompiledDataWithoutPreparseData, \
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \
@@ -511,10 +500,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
#define INSTANCE_TYPE_CHECKERS_RANGE(V) \
V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE) \
- V(Dictionary, FIRST_DICTIONARY_TYPE, LAST_DICTIONARY_TYPE) \
V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE) \
- V(FixedTypedArrayBase, FIRST_FIXED_TYPED_ARRAY_TYPE, \
- LAST_FIXED_TYPED_ARRAY_TYPE) \
V(HashTable, FIRST_HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE) \
V(JSMapIterator, FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE) \
V(JSSetIterator, FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE) \
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index 8a43f36245..f2bc87ebac 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -13,24 +13,25 @@
#include <string>
#include <vector>
-#include "src/api-inl.h"
-#include "src/global-handles.h"
+#include "src/api/api-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/js-collator-inl.h"
#include "src/objects/js-date-time-format-inl.h"
#include "src/objects/js-locale-inl.h"
#include "src/objects/js-number-format-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/objects/string.h"
-#include "src/property-descriptor.h"
-#include "src/string-case.h"
+#include "src/strings/string-case.h"
#include "unicode/basictz.h"
#include "unicode/brkiter.h"
#include "unicode/calendar.h"
#include "unicode/coll.h"
#include "unicode/datefmt.h"
#include "unicode/decimfmt.h"
+#include "unicode/formattedvalue.h"
#include "unicode/locid.h"
#include "unicode/normalizer2.h"
#include "unicode/numfmt.h"
@@ -153,7 +154,7 @@ void ToUpperWithSharpS(const Vector<const Char>& src,
inline int FindFirstUpperOrNonAscii(String s, int length) {
for (int index = 0; index < length; ++index) {
- uint16_t ch = s->Get(index);
+ uint16_t ch = s.Get(index);
if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
return index;
}
@@ -168,11 +169,11 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
if (flat.IsOneByte()) {
if (!*dest) {
dest->reset(NewArray<uc16>(length));
- CopyChars(dest->get(), flat.ToOneByteVector().start(), length);
+ CopyChars(dest->get(), flat.ToOneByteVector().begin(), length);
}
return reinterpret_cast<const UChar*>(dest->get());
} else {
- return reinterpret_cast<const UChar*>(flat.ToUC16Vector().start());
+ return reinterpret_cast<const UChar*>(flat.ToUC16Vector().begin());
}
}
@@ -192,15 +193,23 @@ const uint8_t* Intl::ToLatin1LowerTable() { return &kToLower[0]; }
icu::UnicodeString Intl::ToICUUnicodeString(Isolate* isolate,
Handle<String> string) {
- string = String::Flatten(isolate, string);
- {
- DisallowHeapAllocation no_gc;
- std::unique_ptr<uc16[]> sap;
- return icu::UnicodeString(
- GetUCharBufferFromFlat(string->GetFlatContent(no_gc), &sap,
- string->length()),
- string->length());
+ DCHECK(string->IsFlat());
+ DisallowHeapAllocation no_gc;
+ std::unique_ptr<uc16[]> sap;
+ // Short one-byte strings can be expanded on the stack to avoid allocating a
+ // temporary buffer.
+ constexpr int kShortStringSize = 80;
+ UChar short_string_buffer[kShortStringSize];
+ const UChar* uchar_buffer = nullptr;
+ const String::FlatContent& flat = string->GetFlatContent(no_gc);
+ int32_t length = string->length();
+ if (flat.IsOneByte() && length <= kShortStringSize) {
+ CopyChars(short_string_buffer, flat.ToOneByteVector().begin(), length);
+ uchar_buffer = short_string_buffer;
+ } else {
+ uchar_buffer = GetUCharBufferFromFlat(flat, &sap, length);
}
+ return icu::UnicodeString(uchar_buffer, length);
}
namespace {
@@ -254,19 +263,19 @@ MaybeHandle<String> LocaleConvertCase(Isolate* isolate, Handle<String> s,
// one-byte sliced string with a two-byte parent string.
// Called from TF builtins.
String Intl::ConvertOneByteToLower(String src, String dst) {
- DCHECK_EQ(src->length(), dst->length());
- DCHECK(src->IsOneByteRepresentation());
- DCHECK(src->IsFlat());
- DCHECK(dst->IsSeqOneByteString());
+ DCHECK_EQ(src.length(), dst.length());
+ DCHECK(src.IsOneByteRepresentation());
+ DCHECK(src.IsFlat());
+ DCHECK(dst.IsSeqOneByteString());
DisallowHeapAllocation no_gc;
- const int length = src->length();
- String::FlatContent src_flat = src->GetFlatContent(no_gc);
- uint8_t* dst_data = SeqOneByteString::cast(dst)->GetChars(no_gc);
+ const int length = src.length();
+ String::FlatContent src_flat = src.GetFlatContent(no_gc);
+ uint8_t* dst_data = SeqOneByteString::cast(dst).GetChars(no_gc);
if (src_flat.IsOneByte()) {
- const uint8_t* src_data = src_flat.ToOneByteVector().start();
+ const uint8_t* src_data = src_flat.ToOneByteVector().begin();
bool has_changed_character = false;
int index_to_first_unprocessed =
@@ -288,7 +297,7 @@ String Intl::ConvertOneByteToLower(String src, String dst) {
int index_to_first_unprocessed = FindFirstUpperOrNonAscii(src, length);
if (index_to_first_unprocessed == length) return src;
- const uint16_t* src_data = src_flat.ToUC16Vector().start();
+ const uint16_t* src_data = src_flat.ToUC16Vector().begin();
CopyChars(dst_data, src_data, index_to_first_unprocessed);
for (int index = index_to_first_unprocessed; index < length; ++index) {
dst_data[index] = ToLatin1Lower(static_cast<uint16_t>(src_data[index]));
@@ -347,7 +356,7 @@ MaybeHandle<String> Intl::ConvertToUpper(Isolate* isolate, Handle<String> s) {
bool has_changed_character = false;
int index_to_first_unprocessed = FastAsciiConvert<false>(
reinterpret_cast<char*>(result->GetChars(no_gc)),
- reinterpret_cast<const char*>(src.start()), length,
+ reinterpret_cast<const char*>(src.begin()), length,
&has_changed_character);
if (index_to_first_unprocessed == length) {
return has_changed_character ? result : s;
@@ -968,7 +977,7 @@ MaybeHandle<Object> Intl::StringLocaleCompare(Isolate* isolate,
Handle<JSFunction> constructor = Handle<JSFunction>(
JSFunction::cast(
- isolate->context()->native_context()->intl_collator_function()),
+ isolate->context().native_context().intl_collator_function()),
isolate);
Handle<JSCollator> collator;
@@ -978,10 +987,9 @@ MaybeHandle<Object> Intl::StringLocaleCompare(Isolate* isolate,
if (can_cache) {
isolate->set_icu_object_in_cache(
Isolate::ICUObjectCacheType::kDefaultCollator,
- std::static_pointer_cast<icu::UObject>(
- collator->icu_collator()->get()));
+ std::static_pointer_cast<icu::UMemory>(collator->icu_collator().get()));
}
- icu::Collator* icu_collator = collator->icu_collator()->raw();
+ icu::Collator* icu_collator = collator->icu_collator().raw();
return Intl::CompareStrings(isolate, *icu_collator, string1, string2);
}
@@ -992,6 +1000,21 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate,
Handle<String> string2) {
Factory* factory = isolate->factory();
+ // Early return for identical strings.
+ if (string1.is_identical_to(string2)) {
+ return factory->NewNumberFromInt(UCollationResult::UCOL_EQUAL);
+ }
+
+ // Early return for empty strings.
+ if (string1->length() == 0) {
+ return factory->NewNumberFromInt(string2->length() == 0
+ ? UCollationResult::UCOL_EQUAL
+ : UCollationResult::UCOL_LESS);
+ }
+ if (string2->length() == 0) {
+ return factory->NewNumberFromInt(UCollationResult::UCOL_GREATER);
+ }
+
string1 = String::Flatten(isolate, string1);
string2 = String::Flatten(isolate, string2);
@@ -1025,9 +1048,10 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
bool can_cache =
locales->IsUndefined(isolate) && options->IsUndefined(isolate);
if (can_cache) {
- icu::NumberFormat* cached_number_format =
- static_cast<icu::NumberFormat*>(isolate->get_cached_icu_object(
- Isolate::ICUObjectCacheType::kDefaultNumberFormat));
+ icu::number::LocalizedNumberFormatter* cached_number_format =
+ static_cast<icu::number::LocalizedNumberFormatter*>(
+ isolate->get_cached_icu_object(
+ Isolate::ICUObjectCacheType::kDefaultNumberFormat));
// We may use the cached icu::NumberFormat for a fast path.
if (cached_number_format != nullptr) {
return JSNumberFormat::FormatNumeric(isolate, *cached_number_format,
@@ -1037,7 +1061,7 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
Handle<JSFunction> constructor = Handle<JSFunction>(
JSFunction::cast(
- isolate->context()->native_context()->intl_number_format_function()),
+ isolate->context().native_context().intl_number_format_function()),
isolate);
Handle<JSNumberFormat> number_format;
// 2. Let numberFormat be ? Construct(%NumberFormat%, « locales, options »).
@@ -1048,13 +1072,13 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
if (can_cache) {
isolate->set_icu_object_in_cache(
Isolate::ICUObjectCacheType::kDefaultNumberFormat,
- std::static_pointer_cast<icu::UObject>(
- number_format->icu_number_format()->get()));
+ std::static_pointer_cast<icu::UMemory>(
+ number_format->icu_number_formatter().get()));
}
// Return FormatNumber(numberFormat, x).
- icu::NumberFormat* icu_number_format =
- number_format->icu_number_format()->raw();
+ icu::number::LocalizedNumberFormatter* icu_number_format =
+ number_format->icu_number_formatter().raw();
return JSNumberFormat::FormatNumeric(isolate, *icu_number_format,
numeric_obj);
}
@@ -1116,19 +1140,17 @@ Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
} // namespace
-Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
- icu::DecimalFormat* number_format,
- Handle<JSReceiver> options,
- int mnfd_default,
- int mxfd_default) {
- CHECK_NOT_NULL(number_format);
+Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
+ Isolate* isolate, Handle<JSReceiver> options, int mnfd_default,
+ int mxfd_default) {
+ Intl::NumberFormatDigitOptions digit_options;
// 5. Let mnid be ? GetNumberOption(options, "minimumIntegerDigits,", 1, 21,
// 1).
int mnid;
if (!GetNumberOption(isolate, options, "minimumIntegerDigits", 1, 21, 1)
.To(&mnid)) {
- return Nothing<bool>();
+ return Nothing<NumberFormatDigitOptions>();
}
// 6. Let mnfd be ? GetNumberOption(options, "minimumFractionDigits", 0, 20,
@@ -1137,7 +1159,7 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
if (!GetNumberOption(isolate, options, "minimumFractionDigits", 0, 20,
mnfd_default)
.To(&mnfd)) {
- return Nothing<bool>();
+ return Nothing<NumberFormatDigitOptions>();
}
// 7. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
@@ -1149,7 +1171,7 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
if (!GetNumberOption(isolate, options, "maximumFractionDigits", mnfd, 20,
mxfd_actual_default)
.To(&mxfd)) {
- return Nothing<bool>();
+ return Nothing<NumberFormatDigitOptions>();
}
// 9. Let mnsd be ? Get(options, "minimumSignificantDigits").
@@ -1158,7 +1180,7 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
isolate->factory()->minimumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, mnsd_obj, JSReceiver::GetProperty(isolate, options, mnsd_str),
- Nothing<bool>());
+ Nothing<NumberFormatDigitOptions>());
// 10. Let mxsd be ? Get(options, "maximumSignificantDigits").
Handle<Object> mxsd_obj;
@@ -1166,45 +1188,43 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
isolate->factory()->maximumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, mxsd_obj, JSReceiver::GetProperty(isolate, options, mxsd_str),
- Nothing<bool>());
+ Nothing<NumberFormatDigitOptions>());
// 11. Set intlObj.[[MinimumIntegerDigits]] to mnid.
- number_format->setMinimumIntegerDigits(mnid);
+ digit_options.minimum_integer_digits = mnid;
// 12. Set intlObj.[[MinimumFractionDigits]] to mnfd.
- number_format->setMinimumFractionDigits(mnfd);
+ digit_options.minimum_fraction_digits = mnfd;
// 13. Set intlObj.[[MaximumFractionDigits]] to mxfd.
- number_format->setMaximumFractionDigits(mxfd);
+ digit_options.maximum_fraction_digits = mxfd;
- bool significant_digits_used = false;
// 14. If mnsd is not undefined or mxsd is not undefined, then
if (!mnsd_obj->IsUndefined(isolate) || !mxsd_obj->IsUndefined(isolate)) {
// 14. a. Let mnsd be ? DefaultNumberOption(mnsd, 1, 21, 1).
int mnsd;
if (!DefaultNumberOption(isolate, mnsd_obj, 1, 21, 1, mnsd_str).To(&mnsd)) {
- return Nothing<bool>();
+ return Nothing<NumberFormatDigitOptions>();
}
// 14. b. Let mxsd be ? DefaultNumberOption(mxsd, mnsd, 21, 21).
int mxsd;
if (!DefaultNumberOption(isolate, mxsd_obj, mnsd, 21, 21, mxsd_str)
.To(&mxsd)) {
- return Nothing<bool>();
+ return Nothing<NumberFormatDigitOptions>();
}
- significant_digits_used = true;
-
// 14. c. Set intlObj.[[MinimumSignificantDigits]] to mnsd.
- number_format->setMinimumSignificantDigits(mnsd);
+ digit_options.minimum_significant_digits = mnsd;
// 14. d. Set intlObj.[[MaximumSignificantDigits]] to mxsd.
- number_format->setMaximumSignificantDigits(mxsd);
+ digit_options.maximum_significant_digits = mxsd;
+ } else {
+ digit_options.minimum_significant_digits = 0;
+ digit_options.maximum_significant_digits = 0;
}
- number_format->setSignificantDigitsUsed(significant_digits_used);
- number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
- return Just(true);
+ return Just(digit_options);
}
namespace {
@@ -1447,36 +1467,47 @@ MaybeHandle<JSObject> Intl::SupportedLocalesOf(
}
namespace {
+
template <typename T>
bool IsValidExtension(const icu::Locale& locale, const char* key,
const std::string& value) {
+ const char* legacy_type = uloc_toLegacyType(key, value.c_str());
+ if (legacy_type == nullptr) {
+ return false;
+ }
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::StringEnumeration> enumeration(
T::getKeywordValuesForLocale(key, icu::Locale(locale.getBaseName()),
false, status));
- if (U_SUCCESS(status)) {
- int32_t length;
- std::string legacy_type(uloc_toLegacyType(key, value.c_str()));
- for (const char* item = enumeration->next(&length, status); item != nullptr;
- item = enumeration->next(&length, status)) {
- if (U_SUCCESS(status) && legacy_type == item) {
- return true;
- }
+ if (U_FAILURE(status)) {
+ return false;
+ }
+ int32_t length;
+ for (const char* item = enumeration->next(&length, status);
+ U_SUCCESS(status) && item != nullptr;
+ item = enumeration->next(&length, status)) {
+ if (strcmp(legacy_type, item) == 0) {
+ return true;
}
}
return false;
}
-bool IsValidCalendar(const icu::Locale& locale, const std::string& value) {
- return IsValidExtension<icu::Calendar>(locale, "calendar", value);
-}
-
bool IsValidCollation(const icu::Locale& locale, const std::string& value) {
std::set<std::string> invalid_values = {"standard", "search"};
if (invalid_values.find(value) != invalid_values.end()) return false;
return IsValidExtension<icu::Collator>(locale, "collation", value);
}
+} // namespace
+
+bool Intl::IsValidCalendar(const icu::Locale& locale,
+ const std::string& value) {
+ return IsValidExtension<icu::Calendar>(locale, "calendar", value);
+}
+
+namespace {
+
bool IsValidNumberingSystem(const std::string& value) {
std::set<std::string> invalid_values = {"native", "traditio", "finance"};
if (invalid_values.find(value) != invalid_values.end()) return false;
@@ -1527,7 +1558,7 @@ std::map<std::string, std::string> LookupAndValidateUnicodeExtensions(
bool is_valid_value = false;
// 8.h.ii.1.a If keyLocaleData contains requestedValue, then
if (strcmp("ca", bcp47_key) == 0) {
- is_valid_value = IsValidCalendar(*icu_locale, bcp47_value);
+ is_valid_value = Intl::IsValidCalendar(*icu_locale, bcp47_value);
} else if (strcmp("co", bcp47_key) == 0) {
is_valid_value = IsValidCollation(*icu_locale, bcp47_value);
} else if (strcmp("hc", bcp47_key) == 0) {
@@ -1649,6 +1680,7 @@ Intl::ResolvedLocale Intl::ResolveLocale(
Managed<icu::UnicodeString> Intl::SetTextToBreakIterator(
Isolate* isolate, Handle<String> text, icu::BreakIterator* break_iterator) {
+ text = String::Flatten(isolate, text);
icu::UnicodeString* u_text =
(icu::UnicodeString*)(Intl::ToICUUnicodeString(isolate, text).clone());
@@ -1858,6 +1890,29 @@ Maybe<Intl::MatcherOption> Intl::GetLocaleMatcher(Isolate* isolate,
Intl::MatcherOption::kLookup);
}
+Maybe<bool> Intl::GetNumberingSystem(Isolate* isolate,
+ Handle<JSReceiver> options,
+ const char* method,
+ std::unique_ptr<char[]>* result) {
+ const std::vector<const char*> empty_values = {};
+ Maybe<bool> maybe = Intl::GetStringOption(isolate, options, "numberingSystem",
+ empty_values, method, result);
+ MAYBE_RETURN(maybe, Nothing<bool>());
+ if (maybe.FromJust() && *result != nullptr) {
+ if (!IsValidNumberingSystem(result->get())) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(
+ MessageTemplate::kInvalid,
+ isolate->factory()->numberingSystem_string(),
+ isolate->factory()->NewStringFromAsciiChecked(result->get())),
+ Nothing<bool>());
+ }
+ return Just(true);
+ }
+ return Just(false);
+}
+
Intl::HourCycle Intl::ToHourCycle(const std::string& hc) {
if (hc == "h11") return Intl::HourCycle::kH11;
if (hc == "h12") return Intl::HourCycle::kH12;
@@ -1928,11 +1983,27 @@ Handle<String> Intl::NumberFieldToType(Isolate* isolate,
UNREACHABLE();
return Handle<String>();
+ case UNUM_COMPACT_FIELD:
+ return isolate->factory()->compact_string();
+ case UNUM_MEASURE_UNIT_FIELD:
+ return isolate->factory()->unit_string();
+
default:
UNREACHABLE();
return Handle<String>();
}
}
+// A helper function to convert the FormattedValue for several Intl objects.
+MaybeHandle<String> Intl::FormattedToString(
+ Isolate* isolate, const icu::FormattedValue& formatted) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString result = formatted.toString(status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String);
+ }
+ return Intl::ToString(isolate, result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 5adb6fa2c8..1274fa0549 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -14,9 +14,9 @@
#include <string>
#include "src/base/timezone-cache.h"
-#include "src/contexts.h"
-#include "src/objects.h"
+#include "src/objects/contexts.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
#include "unicode/locid.h"
#include "unicode/uversion.h"
@@ -25,10 +25,10 @@
namespace U_ICU_NAMESPACE {
class BreakIterator;
class Collator;
-class DecimalFormat;
+class FormattedValue;
class SimpleDateFormat;
class UnicodeString;
-}
+} // namespace U_ICU_NAMESPACE
namespace v8 {
namespace internal {
@@ -171,9 +171,16 @@ class Intl {
Handle<Object> options);
// ecma402/#sec-setnfdigitoptions
- V8_WARN_UNUSED_RESULT static Maybe<bool> SetNumberFormatDigitOptions(
- Isolate* isolate, icu::DecimalFormat* number_format,
- Handle<JSReceiver> options, int mnfd_default, int mxfd_default);
+ struct NumberFormatDigitOptions {
+ int minimum_integer_digits;
+ int minimum_fraction_digits;
+ int maximum_fraction_digits;
+ int minimum_significant_digits;
+ int maximum_significant_digits;
+ };
+ V8_WARN_UNUSED_RESULT static Maybe<NumberFormatDigitOptions>
+ SetNumberFormatDigitOptions(Isolate* isolate, Handle<JSReceiver> options,
+ int mnfd_default, int mxfd_default);
static icu::Locale CreateICULocale(const std::string& bcp47_locale);
@@ -186,6 +193,10 @@ class Intl {
Isolate* isolate, const icu::UnicodeString& string, int32_t begin,
int32_t end);
+ // Helper function to convert a FormattedValue to String
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormattedToString(
+ Isolate* isolate, const icu::FormattedValue& formatted);
+
// Helper function to convert number field id to type string.
static Handle<String> NumberFieldToType(Isolate* isolate,
Handle<Object> numeric_obj,
@@ -244,6 +255,15 @@ class Intl {
V8_WARN_UNUSED_RESULT static Maybe<MatcherOption> GetLocaleMatcher(
Isolate* isolate, Handle<JSReceiver> options, const char* method);
+ // Shared function to read the "numberingSystem" option.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> GetNumberingSystem(
+ Isolate* isolate, Handle<JSReceiver> options, const char* method,
+ std::unique_ptr<char[]>* result);
+
+ // Check the calendar is valid or not for that locale.
+ static bool IsValidCalendar(const icu::Locale& locale,
+ const std::string& value);
+
struct ResolvedLocale {
std::string locale;
icu::Locale icu_locale;
diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq
new file mode 100644
index 0000000000..67d8537feb
--- /dev/null
+++ b/deps/v8/src/objects/intl-objects.tq
@@ -0,0 +1,64 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-number-format.h'
+#include 'src/objects/js-objects.h'
+#include 'src/objects/js-plural-rules.h'
+#include 'src/objects/js-relative-time-format.h'
+#include 'src/objects/js-date-time-format.h'
+#include 'src/objects/js-list-format.h'
+#include 'src/objects/js-locale.h'
+#include 'src/objects/js-segment-iterator.h'
+#include 'src/objects/js-segmenter.h'
+
+extern class JSDateTimeFormat extends JSObject {
+ icu_locale: Foreign; // Managed<icu::Locale>
+ icu_simple_date_format: Foreign; // Managed<icu::SimpleDateFormat>
+ icu_date_interval_format: Foreign; // Managed<icu::DateIntervalFormat>
+ bound_format: JSFunction | Undefined;
+ flags: Smi;
+}
+
+extern class JSListFormat extends JSObject {
+ locale: String;
+ icu_formatter: Foreign; // Managed<icu::ListFormatter>
+ flags: Smi;
+}
+
+extern class JSNumberFormat extends JSObject {
+ locale: String;
+ icu_number_formatter:
+ Foreign; // Managed<icu::number::LocalizedNumberFormatter>
+ bound_format: JSFunction | Undefined;
+ flags: Smi;
+}
+
+extern class JSPluralRules extends JSObject {
+ locale: String;
+ flags: Smi;
+ icu_plural_rules: Foreign; // Managed<icu::PluralRules>
+ icu_decimal_format: Foreign; // Managed<icu::DecimalFormat>
+}
+
+extern class JSRelativeTimeFormat extends JSObject {
+ locale: String;
+ icu_formatter: Foreign; // Managed<icu::RelativeDateTimeFormatter>
+ flags: Smi;
+}
+
+extern class JSLocale extends JSObject {
+ icu_locale: Foreign; // Managed<icu::Locale>
+}
+
+extern class JSSegmenter extends JSObject {
+ locale: String;
+ icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
+ flags: Smi;
+}
+
+extern class JSSegmentIterator extends JSObject {
+ icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
+ unicode_string: Foreign; // Managed<icu::UnicodeString>
+ flags: Smi;
+}
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 39677093c2..061fec10f7 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -8,8 +8,8 @@
#include "src/objects/js-array-buffer.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/js-objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-engine.h"
// Has to be the last include (doesn't have include guards):
@@ -29,21 +29,19 @@ CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSDataView)
size_t JSArrayBuffer::byte_length() const {
- return READ_UINTPTR_FIELD(*this, kByteLengthOffset);
+ return ReadField<size_t>(kByteLengthOffset);
}
void JSArrayBuffer::set_byte_length(size_t value) {
- WRITE_UINTPTR_FIELD(*this, kByteLengthOffset, value);
+ WriteField<size_t>(kByteLengthOffset, value);
}
void* JSArrayBuffer::backing_store() const {
- intptr_t ptr = READ_INTPTR_FIELD(*this, kBackingStoreOffset);
- return reinterpret_cast<void*>(ptr);
+ return reinterpret_cast<void*>(ReadField<Address>(kBackingStoreOffset));
}
void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(*this, kBackingStoreOffset, ptr);
+ WriteField<Address>(kBackingStoreOffset, reinterpret_cast<Address>(value));
}
size_t JSArrayBuffer::allocation_length() const {
@@ -93,11 +91,11 @@ void JSArrayBuffer::clear_padding() {
}
void JSArrayBuffer::set_bit_field(uint32_t bits) {
- WRITE_UINT32_FIELD(*this, kBitFieldOffset, bits);
+ WriteField<uint32_t>(kBitFieldOffset, bits);
}
uint32_t JSArrayBuffer::bit_field() const {
- return READ_UINT32_FIELD(*this, kBitFieldOffset);
+ return ReadField<uint32_t>(kBitFieldOffset);
}
// |bit_field| fields.
@@ -111,49 +109,58 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
JSArrayBuffer::IsSharedBit)
size_t JSArrayBufferView::byte_offset() const {
- return READ_UINTPTR_FIELD(*this, kByteOffsetOffset);
+ return ReadField<size_t>(kByteOffsetOffset);
}
void JSArrayBufferView::set_byte_offset(size_t value) {
- WRITE_UINTPTR_FIELD(*this, kByteOffsetOffset, value);
+ WriteField<size_t>(kByteOffsetOffset, value);
}
size_t JSArrayBufferView::byte_length() const {
- return READ_UINTPTR_FIELD(*this, kByteLengthOffset);
+ return ReadField<size_t>(kByteLengthOffset);
}
void JSArrayBufferView::set_byte_length(size_t value) {
- WRITE_UINTPTR_FIELD(*this, kByteLengthOffset, value);
+ WriteField<size_t>(kByteLengthOffset, value);
}
ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
bool JSArrayBufferView::WasDetached() const {
- return JSArrayBuffer::cast(buffer())->was_detached();
+ return JSArrayBuffer::cast(buffer()).was_detached();
}
-Object JSTypedArray::length() const { return READ_FIELD(*this, kLengthOffset); }
+size_t JSTypedArray::length() const { return ReadField<size_t>(kLengthOffset); }
-size_t JSTypedArray::length_value() const {
- double val = length()->Number();
- DCHECK_LE(val, kMaxSafeInteger); // 2^53-1
- DCHECK_GE(val, -kMaxSafeInteger); // -2^53+1
- DCHECK_LE(val, std::numeric_limits<size_t>::max());
- DCHECK_GE(val, std::numeric_limits<size_t>::min());
- return static_cast<size_t>(val);
+void JSTypedArray::set_length(size_t value) {
+ WriteField<size_t>(kLengthOffset, value);
}
-void JSTypedArray::set_length(Object value, WriteBarrierMode mode) {
- WRITE_FIELD(*this, kLengthOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kLengthOffset, value, mode);
+void* JSTypedArray::external_pointer() const {
+ return reinterpret_cast<void*>(ReadField<Address>(kExternalPointerOffset));
+}
+
+void JSTypedArray::set_external_pointer(void* value) {
+ WriteField<Address>(kExternalPointerOffset, reinterpret_cast<Address>(value));
+}
+
+ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset)
+
+void* JSTypedArray::DataPtr() {
+ return reinterpret_cast<void*>(
+ base_pointer().ptr() + reinterpret_cast<intptr_t>(external_pointer()));
}
bool JSTypedArray::is_on_heap() const {
DisallowHeapAllocation no_gc;
// Checking that buffer()->backing_store() is not nullptr is not sufficient;
// it will be nullptr when byte_length is 0 as well.
- FixedTypedArrayBase fta = FixedTypedArrayBase::cast(elements());
- return fta->base_pointer()->ptr() == fta.ptr();
+ return base_pointer().ptr() == elements().ptr();
+}
+
+// static
+void* JSTypedArray::ExternalPointerForOnHeapArray() {
+ return reinterpret_cast<void*>(ByteArray::kHeaderSize - kHeapObjectTag);
}
// static
@@ -178,9 +185,13 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
return array;
}
-#ifdef VERIFY_HEAP
-ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
-#endif
+void* JSDataView::data_pointer() const {
+ return reinterpret_cast<void*>(ReadField<Address>(kDataPointerOffset));
+}
+
+void JSDataView::set_data_pointer(void* value) {
+ WriteField<Address>(kDataPointerOffset, reinterpret_cast<Address>(value));
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index f96ae7e752..a506920f95 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -5,8 +5,8 @@
#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-buffer-inl.h"
-#include "src/counters.h"
-#include "src/property-descriptor.h"
+#include "src/logging/counters.h"
+#include "src/objects/property-descriptor.h"
namespace v8 {
namespace internal {
@@ -69,7 +69,7 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
if (allocation.is_wasm_memory) {
wasm::WasmMemoryTracker* memory_tracker =
isolate->wasm_engine()->memory_tracker();
- memory_tracker->FreeMemoryIfIsWasmMemory(isolate, allocation.backing_store);
+ memory_tracker->FreeWasmMemory(isolate, allocation.backing_store);
} else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length);
@@ -150,10 +150,7 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
Isolate* isolate = typed_array->GetIsolate();
- DCHECK(IsFixedTypedArrayElementsKind(typed_array->GetElementsKind()));
-
- Handle<FixedTypedArrayBase> fixed_typed_array(
- FixedTypedArrayBase::cast(typed_array->elements()), isolate);
+ DCHECK(IsTypedArrayElementsKind(typed_array->GetElementsKind()));
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
isolate);
@@ -162,14 +159,13 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
void* backing_store =
isolate->array_buffer_allocator()->AllocateUninitialized(
- fixed_typed_array->DataSize());
+ typed_array->byte_length());
if (backing_store == nullptr) {
isolate->heap()->FatalProcessOutOfMemory(
"JSTypedArray::MaterializeArrayBuffer");
}
buffer->set_is_external(false);
- DCHECK_EQ(buffer->byte_length(),
- static_cast<uintptr_t>(fixed_typed_array->DataSize()));
+ DCHECK_EQ(buffer->byte_length(), typed_array->byte_length());
// Initialize backing store at last to avoid handling of |JSArrayBuffers| that
// are currently being constructed in the |ArrayBufferTracker|. The
// registration method below handles the case of registering a buffer that has
@@ -177,14 +173,12 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
buffer->set_backing_store(backing_store);
// RegisterNewArrayBuffer expects a valid length for adjusting counters.
isolate->heap()->RegisterNewArrayBuffer(*buffer);
- memcpy(buffer->backing_store(), fixed_typed_array->DataPtr(),
- fixed_typed_array->DataSize());
- Handle<FixedTypedArrayBase> new_elements =
- isolate->factory()->NewFixedTypedArrayWithExternalPointer(
- fixed_typed_array->length(), typed_array->type(),
- static_cast<uint8_t*>(buffer->backing_store()));
-
- typed_array->set_elements(*new_elements);
+ memcpy(buffer->backing_store(), typed_array->DataPtr(),
+ typed_array->byte_length());
+
+ typed_array->set_elements(ReadOnlyRoots(isolate).empty_byte_array());
+ typed_array->set_external_pointer(backing_store);
+ typed_array->set_base_pointer(Smi::kZero);
DCHECK(!typed_array->is_on_heap());
return buffer;
@@ -226,7 +220,7 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
}
// 3b iv. Let length be O.[[ArrayLength]].
- size_t length = o->length_value();
+ size_t length = o->length();
// 3b v. If numericIndex ≥ length, return false.
if (o->WasDetached() || index >= length) {
RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
@@ -271,13 +265,13 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
}
ExternalArrayType JSTypedArray::type() {
- switch (elements()->map()->instance_type()) {
-#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
+ switch (map().elements_kind()) {
+#define ELEMENTS_KIND_TO_ARRAY_TYPE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return kExternal##Type##Array;
- TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
-#undef INSTANCE_TYPE_TO_ARRAY_TYPE
+ TYPED_ARRAYS(ELEMENTS_KIND_TO_ARRAY_TYPE)
+#undef ELEMENTS_KIND_TO_ARRAY_TYPE
default:
UNREACHABLE();
@@ -285,13 +279,13 @@ ExternalArrayType JSTypedArray::type() {
}
size_t JSTypedArray::element_size() {
- switch (elements()->map()->instance_type()) {
-#define INSTANCE_TYPE_TO_ELEMENT_SIZE(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
+ switch (map().elements_kind()) {
+#define ELEMENTS_KIND_TO_ELEMENT_SIZE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return sizeof(ctype);
- TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENT_SIZE)
-#undef INSTANCE_TYPE_TO_ELEMENT_SIZE
+ TYPED_ARRAYS(ELEMENTS_KIND_TO_ELEMENT_SIZE)
+#undef ELEMENTS_KIND_TO_ELEMENT_SIZE
default:
UNREACHABLE();
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index b77d1c9877..b22001f04a 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -172,16 +172,26 @@ class JSArrayBufferView : public JSObject {
JS_ARRAY_BUFFER_VIEW_FIELDS)
#undef JS_ARRAY_BUFFER_VIEW_FIELDS
- class BodyDescriptor;
+ STATIC_ASSERT(IsAligned(kByteOffsetOffset, kUIntptrSize));
+ STATIC_ASSERT(IsAligned(kByteLengthOffset, kUIntptrSize));
OBJECT_CONSTRUCTORS(JSArrayBufferView, JSObject);
};
class JSTypedArray : public JSArrayBufferView {
public:
+ // TODO(v8:4153): This should be equal to JSArrayBuffer::kMaxByteLength
+ // eventually.
+ static constexpr size_t kMaxLength = v8::TypedArray::kMaxLength;
+
// [length]: length of typed array in elements.
- DECL_ACCESSORS(length, Object)
- inline size_t length_value() const;
+ DECL_PRIMITIVE_ACCESSORS(length, size_t)
+
+ // [external_pointer]: TODO(v8:4153)
+ DECL_PRIMITIVE_ACCESSORS(external_pointer, void*)
+
+ // [base_pointer]: TODO(v8:4153)
+ DECL_ACCESSORS(base_pointer, Object)
// ES6 9.4.5.3
V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
@@ -195,9 +205,14 @@ class JSTypedArray : public JSArrayBufferView {
V8_EXPORT_PRIVATE Handle<JSArrayBuffer> GetBuffer();
+ // Use with care: returns raw pointer into heap.
+ inline void* DataPtr();
+
// Whether the buffer's backing store is on-heap or off-heap.
inline bool is_on_heap() const;
+ static inline void* ExternalPointerForOnHeapArray();
+
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
const char* method_name);
@@ -207,32 +222,39 @@ class JSTypedArray : public JSArrayBufferView {
DECL_VERIFIER(JSTypedArray)
// Layout description.
-#define JS_TYPED_ARRAY_FIELDS(V) \
- /* Raw data fields. */ \
- V(kLengthOffset, kTaggedSize) \
- /* Header size. */ \
+#define JS_TYPED_ARRAY_FIELDS(V) \
+ /* Raw data fields. */ \
+ V(kLengthOffset, kUIntptrSize) \
+ V(kExternalPointerOffset, kSystemPointerSize) \
+ V(kBasePointerOffset, kTaggedSize) \
+ /* Header size. */ \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSArrayBufferView::kHeaderSize,
JS_TYPED_ARRAY_FIELDS)
#undef JS_TYPED_ARRAY_FIELDS
+ STATIC_ASSERT(IsAligned(kLengthOffset, kUIntptrSize));
+ STATIC_ASSERT(IsAligned(kExternalPointerOffset, kSystemPointerSize));
+
static const int kSizeWithEmbedderFields =
kHeaderSize +
v8::ArrayBufferView::kEmbedderFieldCount * kEmbedderDataSlotSize;
+ class BodyDescriptor;
+
private:
static Handle<JSArrayBuffer> MaterializeArrayBuffer(
Handle<JSTypedArray> typed_array);
-#ifdef VERIFY_HEAP
- DECL_ACCESSORS(raw_length, Object)
-#endif
OBJECT_CONSTRUCTORS(JSTypedArray, JSArrayBufferView);
};
class JSDataView : public JSArrayBufferView {
public:
+ // [data_pointer]: pointer to the actual data.
+ DECL_PRIMITIVE_ACCESSORS(data_pointer, void*)
+
DECL_CAST(JSDataView)
// Dispatched behavior.
@@ -240,10 +262,24 @@ class JSDataView : public JSArrayBufferView {
DECL_VERIFIER(JSDataView)
// Layout description.
+#define JS_DATA_VIEW_FIELDS(V) \
+ /* Raw data fields. */ \
+ V(kDataPointerOffset, kIntptrSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSArrayBufferView::kHeaderSize,
+ JS_DATA_VIEW_FIELDS)
+#undef JS_DATA_VIEW_FIELDS
+
+ STATIC_ASSERT(IsAligned(kDataPointerOffset, kUIntptrSize));
+
static const int kSizeWithEmbedderFields =
kHeaderSize +
v8::ArrayBufferView::kEmbedderFieldCount * kEmbedderDataSlotSize;
+ class BodyDescriptor;
+
OBJECT_CONSTRUCTORS(JSDataView, JSArrayBufferView);
};
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 31c8735f62..335fabba86 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/js-array.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -33,8 +33,8 @@ bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
}
bool JSArray::AllowsSetLength() {
- bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
- DCHECK(result == !HasFixedTypedArrayElements());
+ bool result = elements().IsFixedArray() || elements().IsFixedDoubleArray();
+ DCHECK(result == !HasTypedArrayElements());
return result;
}
@@ -55,7 +55,7 @@ void JSArray::SetContent(Handle<JSArray> array,
}
bool JSArray::HasArrayPrototype(Isolate* isolate) {
- return map()->prototype() == *isolate->initial_array_prototype();
+ return map().prototype() == *isolate->initial_array_prototype();
}
ACCESSORS(JSArrayIterator, iterated_object, Object, kIteratedObjectOffset)
@@ -63,7 +63,7 @@ ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset)
IterationKind JSArrayIterator::kind() const {
return static_cast<IterationKind>(
- Smi::cast(READ_FIELD(*this, kKindOffset))->value());
+ Smi::cast(READ_FIELD(*this, kKindOffset)).value());
}
void JSArrayIterator::set_kind(IterationKind kind) {
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 23d62c810e..4bc296e31e 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -8,7 +8,7 @@
#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
index 7c22be25f6..177d9d352b 100644
--- a/deps/v8/src/objects/js-break-iterator-inl.h
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_BREAK_ITERATOR_INL_H_
#define V8_OBJECTS_JS_BREAK_ITERATOR_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-break-iterator.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
index 4516b34aac..4879fb41a4 100644
--- a/deps/v8/src/objects/js-break-iterator.cc
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -124,7 +124,7 @@ void JSV8BreakIterator::AdoptText(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator_holder,
Handle<String> text) {
icu::BreakIterator* break_iterator =
- break_iterator_holder->break_iterator()->raw();
+ break_iterator_holder->break_iterator().raw();
CHECK_NOT_NULL(break_iterator);
Managed<icu::UnicodeString> unicode_string =
Intl::SetTextToBreakIterator(isolate, text, break_iterator);
@@ -149,24 +149,24 @@ Handle<String> JSV8BreakIterator::TypeAsString() const {
Handle<Object> JSV8BreakIterator::Current(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
return isolate->factory()->NewNumberFromInt(
- break_iterator->break_iterator()->raw()->current());
+ break_iterator->break_iterator().raw()->current());
}
Handle<Object> JSV8BreakIterator::First(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
return isolate->factory()->NewNumberFromInt(
- break_iterator->break_iterator()->raw()->first());
+ break_iterator->break_iterator().raw()->first());
}
Handle<Object> JSV8BreakIterator::Next(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
return isolate->factory()->NewNumberFromInt(
- break_iterator->break_iterator()->raw()->next());
+ break_iterator->break_iterator().raw()->next());
}
String JSV8BreakIterator::BreakType(Isolate* isolate,
Handle<JSV8BreakIterator> break_iterator) {
- int32_t status = break_iterator->break_iterator()->raw()->getRuleStatus();
+ int32_t status = break_iterator->break_iterator().raw()->getRuleStatus();
// Keep return values in sync with JavaScript BreakType enum.
if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
return ReadOnlyRoots(isolate).none_string();
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index 3eff347485..fe94c177c4 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -12,9 +12,9 @@
#include <set>
#include <string>
-#include "src/objects.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
index b6fa239c31..e82351993d 100644
--- a/deps/v8/src/objects/js-collator-inl.h
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_COLLATOR_INL_H_
#define V8_OBJECTS_JS_COLLATOR_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-collator.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index b75468c6f3..4a1e857403 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -8,9 +8,9 @@
#include "src/objects/js-collator.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
#include "src/objects/js-collator-inl.h"
+#include "src/objects/objects-inl.h"
#include "unicode/coll.h"
#include "unicode/locid.h"
#include "unicode/strenum.h"
@@ -68,7 +68,7 @@ Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
Handle<JSObject> options =
isolate->factory()->NewJSObject(isolate->object_function());
- icu::Collator* icu_collator = collator->icu_collator()->raw();
+ icu::Collator* icu_collator = collator->icu_collator().raw();
CHECK_NOT_NULL(icu_collator);
UErrorCode status = U_ZERO_ERROR;
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index e5d223aa24..2bedbf811a 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -12,12 +12,12 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
index 78b6cc5db3..fb0cf1652e 100644
--- a/deps/v8/src/objects/js-collection-inl.h
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -8,10 +8,10 @@
#include "src/objects/js-collection.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -63,8 +63,8 @@ CAST_ACCESSOR(JSWeakSet)
Object JSMapIterator::CurrentValue() {
OrderedHashMap table = OrderedHashMap::cast(this->table());
int index = Smi::ToInt(this->index());
- Object value = table->ValueAt(index);
- DCHECK(!value->IsTheHole());
+ Object value = table.ValueAt(index);
+ DCHECK(!value.IsTheHole());
return value;
}
diff --git a/deps/v8/src/objects/js-collection-iterator.h b/deps/v8/src/objects/js-collection-iterator.h
index f25753738b..4952f04a72 100644
--- a/deps/v8/src/objects/js-collection-iterator.h
+++ b/deps/v8/src/objects/js-collection-iterator.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_JS_COLLECTION_ITERATOR_H_
#define V8_OBJECTS_JS_COLLECTION_ITERATOR_H_
-#include "src/globals.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
#include "src/objects/js-objects.h"
+#include "src/objects/objects.h"
#include "src/objects/smi.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index 0450de8fb1..6dfde352ca 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_JS_COLLECTION_H_
#define V8_OBJECTS_JS_COLLECTION_H_
-#include "src/objects.h"
#include "src/objects/js-collection-iterator.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -44,6 +44,8 @@ class JSSet : public JSCollection {
// Dispatched behavior.
DECL_PRINTER(JSSet)
DECL_VERIFIER(JSSet)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSCollection::kHeaderSize,
+ TORQUE_GENERATED_JSWEAK_SET_FIELDS)
OBJECT_CONSTRUCTORS(JSSet, JSCollection);
};
@@ -72,6 +74,8 @@ class JSMap : public JSCollection {
// Dispatched behavior.
DECL_PRINTER(JSMap)
DECL_VERIFIER(JSMap)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSCollection::kHeaderSize,
+ TORQUE_GENERATED_JSWEAK_MAP_FIELDS)
OBJECT_CONSTRUCTORS(JSMap, JSCollection);
};
@@ -121,6 +125,8 @@ class JSWeakCollection : public JSObject {
// Visit the whole object.
using BodyDescriptor = BodyDescriptorImpl;
+ static const int kSizeOfAllWeakCollections = kHeaderSize;
+
OBJECT_CONSTRUCTORS(JSWeakCollection, JSObject);
};
@@ -133,6 +139,9 @@ class JSWeakMap : public JSWeakCollection {
DECL_PRINTER(JSWeakMap)
DECL_VERIFIER(JSWeakMap)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSWeakCollection::kHeaderSize,
+ TORQUE_GENERATED_JSWEAK_MAP_FIELDS)
+ STATIC_ASSERT(kSize == kSizeOfAllWeakCollections);
OBJECT_CONSTRUCTORS(JSWeakMap, JSWeakCollection);
};
@@ -144,6 +153,9 @@ class JSWeakSet : public JSWeakCollection {
// Dispatched behavior.
DECL_PRINTER(JSWeakSet)
DECL_VERIFIER(JSWeakSet)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSWeakCollection::kHeaderSize,
+ TORQUE_GENERATED_JSWEAK_SET_FIELDS)
+ STATIC_ASSERT(kSize == kSizeOfAllWeakCollections);
OBJECT_CONSTRUCTORS(JSWeakSet, JSWeakCollection);
};
diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h
index 1657241b07..8947ea7b4c 100644
--- a/deps/v8/src/objects/js-date-time-format-inl.h
+++ b/deps/v8/src/objects/js-date-time-format-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_DATE_TIME_FORMAT_INL_H_
#define V8_OBJECTS_JS_DATE_TIME_FORMAT_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-date-time-format.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -20,11 +20,11 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSDateTimeFormat, JSObject)
-ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kICULocaleOffset)
+ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kIcuLocaleOffset)
ACCESSORS(JSDateTimeFormat, icu_simple_date_format,
- Managed<icu::SimpleDateFormat>, kICUSimpleDateFormatOffset)
+ Managed<icu::SimpleDateFormat>, kIcuSimpleDateFormatOffset)
ACCESSORS(JSDateTimeFormat, icu_date_interval_format,
- Managed<icu::DateIntervalFormat>, kICUDateIntervalFormatOffset)
+ Managed<icu::DateIntervalFormat>, kIcuDateIntervalFormatOffset)
ACCESSORS(JSDateTimeFormat, bound_format, Object, kBoundFormatOffset)
SMI_ACCESSORS(JSDateTimeFormat, flags, kFlagsOffset)
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index eda95f8773..8730e0a39b 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -12,9 +12,9 @@
#include <string>
#include <vector>
-#include "src/date.h"
+#include "src/date/date.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-date-time-format-inl.h"
@@ -360,8 +360,8 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
Handle<Object> resolved_obj;
CHECK(!date_time_format->icu_locale().is_null());
- CHECK_NOT_NULL(date_time_format->icu_locale()->raw());
- icu::Locale* icu_locale = date_time_format->icu_locale()->raw();
+ CHECK_NOT_NULL(date_time_format->icu_locale().raw());
+ icu::Locale* icu_locale = date_time_format->icu_locale().raw();
Maybe<std::string> maybe_locale_str = Intl::ToLanguageTag(*icu_locale);
MAYBE_RETURN(maybe_locale_str, MaybeHandle<JSObject>());
std::string locale_str = maybe_locale_str.FromJust();
@@ -369,7 +369,7 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
factory->NewStringFromAsciiChecked(locale_str.c_str());
icu::SimpleDateFormat* icu_simple_date_format =
- date_time_format->icu_simple_date_format()->raw();
+ date_time_format->icu_simple_date_format().raw();
// calendar
const icu::Calendar* calendar = icu_simple_date_format->getCalendar();
// getType() returns legacy calendar type name instead of LDML/BCP47 calendar
@@ -580,7 +580,7 @@ MaybeHandle<String> JSDateTimeFormat::DateTimeFormat(
}
// 5. Return FormatDateTime(dtf, x).
icu::SimpleDateFormat* format =
- date_time_format->icu_simple_date_format()->raw();
+ date_time_format->icu_simple_date_format().raw();
return FormatDateTime(isolate, *format, x);
}
@@ -612,7 +612,7 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
String);
}
- double const x = Handle<JSDate>::cast(date)->value()->Number();
+ double const x = Handle<JSDate>::cast(date)->value().Number();
// 2. If x is NaN, return "Invalid Date"
if (std::isnan(x)) {
return factory->Invalid_Date_string();
@@ -640,9 +640,8 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
// 4. Let dateFormat be ? Construct(%DateTimeFormat%, « locales, options »).
Handle<JSFunction> constructor = Handle<JSFunction>(
- JSFunction::cast(isolate->context()
- ->native_context()
- ->intl_date_time_format_function()),
+ JSFunction::cast(
+ isolate->context().native_context().intl_date_time_format_function()),
isolate);
Handle<JSObject> obj;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -658,12 +657,12 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
if (can_cache) {
isolate->set_icu_object_in_cache(
- cache_type, std::static_pointer_cast<icu::UObject>(
- date_time_format->icu_simple_date_format()->get()));
+ cache_type, std::static_pointer_cast<icu::UMemory>(
+ date_time_format->icu_simple_date_format().get()));
}
// 5. Return FormatDateTime(dateFormat, x).
icu::SimpleDateFormat* format =
- date_time_format->icu_simple_date_format()->raw();
+ date_time_format->icu_simple_date_format().raw();
return FormatDateTime(isolate, *format, x);
}
@@ -779,7 +778,7 @@ MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions(
MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::UnwrapDateTimeFormat(
Isolate* isolate, Handle<JSReceiver> format_holder) {
Handle<Context> native_context =
- Handle<Context>(isolate->context()->native_context(), isolate);
+ Handle<Context>(isolate->context().native_context(), isolate);
Handle<JSFunction> constructor = Handle<JSFunction>(
JSFunction::cast(native_context->intl_date_time_format_function()),
isolate);
@@ -959,14 +958,41 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormatFromCache(
cache.Pointer()->Create(icu_locale, skeleton, generator));
}
-std::unique_ptr<icu::DateIntervalFormat> CreateICUDateIntervalFormat(
- const icu::Locale& icu_locale, const icu::UnicodeString& skeleton) {
+icu::UnicodeString SkeletonFromDateFormat(
+ const icu::SimpleDateFormat& icu_date_format) {
+ icu::UnicodeString pattern;
+ pattern = icu_date_format.toPattern(pattern);
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString skeleton =
+ icu::DateTimePatternGenerator::staticGetSkeleton(pattern, status);
+ CHECK(U_SUCCESS(status));
+ return skeleton;
+}
+
+icu::DateIntervalFormat* LazyCreateDateIntervalFormat(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format) {
+ Managed<icu::DateIntervalFormat> managed_format =
+ date_time_format->icu_date_interval_format();
+ if (managed_format.get()) {
+ return managed_format.raw();
+ }
+ icu::SimpleDateFormat* icu_simple_date_format =
+ date_time_format->icu_simple_date_format().raw();
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::DateIntervalFormat> date_interval_format(
- icu::DateIntervalFormat::createInstance(skeleton, icu_locale, status));
- if (U_FAILURE(status)) return std::unique_ptr<icu::DateIntervalFormat>();
- CHECK_NOT_NULL(date_interval_format.get());
- return date_interval_format;
+ icu::DateIntervalFormat::createInstance(
+ SkeletonFromDateFormat(*icu_simple_date_format),
+ *(date_time_format->icu_locale().raw()), status));
+ if (U_FAILURE(status)) {
+ return nullptr;
+ }
+ date_interval_format->setTimeZone(icu_simple_date_format->getTimeZone());
+ Handle<Managed<icu::DateIntervalFormat>> managed_interval_format =
+ Managed<icu::DateIntervalFormat>::FromUniquePtr(
+ isolate, 0, std::move(date_interval_format));
+ date_time_format->set_icu_date_interval_format(*managed_interval_format);
+ return (*managed_interval_format).raw();
}
Intl::HourCycle HourCycleFromPattern(const icu::UnicodeString pattern) {
@@ -1103,18 +1129,6 @@ std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
generator);
}
-icu::UnicodeString SkeletonFromDateFormat(
- const icu::SimpleDateFormat& icu_date_format) {
- icu::UnicodeString pattern;
- pattern = icu_date_format.toPattern(pattern);
-
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString skeleton =
- icu::DateTimePatternGenerator::staticGetSkeleton(pattern, status);
- CHECK(U_SUCCESS(status));
- return skeleton;
-}
-
class DateTimePatternGeneratorCache {
public:
// Return a clone copy that the caller have to free.
@@ -1146,6 +1160,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
Handle<Object> locales, Handle<Object> input_options) {
date_time_format->set_flags(0);
+ Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -1163,6 +1178,36 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
// 4. Let matcher be ? GetOption(options, "localeMatcher", "string",
// « "lookup", "best fit" », "best fit").
// 5. Set opt.[[localeMatcher]] to matcher.
+
+ std::unique_ptr<char[]> calendar_str = nullptr;
+ std::unique_ptr<char[]> numbering_system_str = nullptr;
+ if (FLAG_harmony_intl_add_calendar_numbering_system) {
+ const std::vector<const char*> empty_values = {};
+ // 6. Let calendar be ? GetOption(options, "calendar",
+ // "string", undefined, undefined).
+ Maybe<bool> maybe_calendar =
+ Intl::GetStringOption(isolate, options, "calendar", empty_values,
+ "Intl.NumberFormat", &calendar_str);
+ MAYBE_RETURN(maybe_calendar, MaybeHandle<JSDateTimeFormat>());
+ if (maybe_calendar.FromJust() && calendar_str != nullptr) {
+ icu::Locale default_locale;
+ if (!Intl::IsValidCalendar(default_locale, calendar_str.get())) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(
+ MessageTemplate::kInvalid, factory->calendar_string(),
+ factory->NewStringFromAsciiChecked(calendar_str.get())),
+ JSDateTimeFormat);
+ }
+ }
+
+ // 8. Let numberingSystem be ? GetOption(options, "numberingSystem",
+ // "string", undefined, undefined).
+ Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
+ isolate, options, "Intl.NumberFormat", &numbering_system_str);
+ MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSDateTimeFormat>());
+ }
+
Maybe<Intl::MatcherOption> maybe_locale_matcher =
Intl::GetLocaleMatcher(isolate, options, "Intl.DateTimeFormat");
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSDateTimeFormat>());
@@ -1206,6 +1251,17 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
icu::Locale icu_locale = r.icu_locale;
DCHECK(!icu_locale.isBogus());
+ UErrorCode status = U_ZERO_ERROR;
+ if (calendar_str != nullptr) {
+ icu_locale.setUnicodeKeywordValue("ca", calendar_str.get(), status);
+ CHECK(U_SUCCESS(status));
+ }
+
+ if (numbering_system_str != nullptr) {
+ icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status);
+ CHECK(U_SUCCESS(status));
+ }
+
// 17. Let timeZone be ? Get(options, "timeZone").
const std::vector<const char*> empty_values;
std::unique_ptr<char[]> timezone = nullptr;
@@ -1216,11 +1272,11 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(isolate, timezone.get());
if (tz.get() == nullptr) {
- THROW_NEW_ERROR(isolate,
- NewRangeError(MessageTemplate::kInvalidTimeZone,
- isolate->factory()->NewStringFromAsciiChecked(
- timezone.get())),
- JSDateTimeFormat);
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidTimeZone,
+ factory->NewStringFromAsciiChecked(timezone.get())),
+ JSDateTimeFormat);
}
std::unique_ptr<icu::Calendar> calendar(
@@ -1229,11 +1285,11 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
// 18.b If the result of IsValidTimeZoneName(timeZone) is false, then
// i. Throw a RangeError exception.
if (calendar.get() == nullptr) {
- THROW_NEW_ERROR(isolate,
- NewRangeError(MessageTemplate::kInvalidTimeZone,
- isolate->factory()->NewStringFromAsciiChecked(
- timezone.get())),
- JSDateTimeFormat);
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidTimeZone,
+ factory->NewStringFromAsciiChecked(timezone.get())),
+ JSDateTimeFormat);
}
static base::LazyInstance<DateTimePatternGeneratorCache>::type
@@ -1243,7 +1299,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
generator_cache.Pointer()->CreateGenerator(icu_locale));
// 15.Let hcDefault be dataLocaleData.[[hourCycle]].
- UErrorCode status = U_ZERO_ERROR;
icu::UnicodeString hour_pattern = generator->getBestPattern("jjmm", status);
CHECK(U_SUCCESS(status));
Intl::HourCycle hc_default = HourCycleFromPattern(hour_pattern);
@@ -1297,7 +1352,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
DateTimeStyle date_style = DateTimeStyle::kUndefined;
DateTimeStyle time_style = DateTimeStyle::kUndefined;
std::unique_ptr<icu::SimpleDateFormat> icu_date_format;
- std::unique_ptr<icu::DateIntervalFormat> icu_date_interval_format;
if (FLAG_harmony_intl_datetime_style) {
// 28. Let dateStyle be ? GetOption(options, "dateStyle", "string", «
@@ -1340,10 +1394,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
time_style != DateTimeStyle::kUndefined) {
icu_date_format = DateTimeStylePattern(date_style, time_style, icu_locale,
hc, *generator);
- if (FLAG_harmony_intl_date_format_range) {
- icu_date_interval_format = CreateICUDateIntervalFormat(
- icu_locale, SkeletonFromDateFormat(*icu_date_format));
- }
}
}
// 33. Else,
@@ -1397,10 +1447,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
FATAL("Failed to create ICU date format, are ICU data files missing?");
}
}
- if (FLAG_harmony_intl_date_format_range) {
- icu_date_interval_format =
- CreateICUDateIntervalFormat(icu_locale, skeleton_ustr);
- }
// g. If dateTimeFormat.[[Hour]] is not undefined, then
if (!has_hour_option) {
@@ -1449,12 +1495,10 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
Managed<icu::SimpleDateFormat>::FromUniquePtr(isolate, 0,
std::move(icu_date_format));
date_time_format->set_icu_simple_date_format(*managed_format);
- if (FLAG_harmony_intl_date_format_range) {
- Handle<Managed<icu::DateIntervalFormat>> managed_interval_format =
- Managed<icu::DateIntervalFormat>::FromUniquePtr(
- isolate, 0, std::move(icu_date_interval_format));
- date_time_format->set_icu_date_interval_format(*managed_interval_format);
- }
+
+ Handle<Managed<icu::DateIntervalFormat>> managed_interval_format =
+ Managed<icu::DateIntervalFormat>::FromRawPtr(isolate, 0, nullptr);
+ date_time_format->set_icu_date_interval_format(*managed_interval_format);
return date_time_format;
}
@@ -1518,7 +1562,7 @@ MaybeHandle<JSArray> JSDateTimeFormat::FormatToParts(
double date_value) {
Factory* factory = isolate->factory();
icu::SimpleDateFormat* format =
- date_time_format->icu_simple_date_format()->raw();
+ date_time_format->icu_simple_date_format().raw();
CHECK_NOT_NULL(format);
icu::UnicodeString formatted;
@@ -1591,75 +1635,176 @@ Handle<String> JSDateTimeFormat::HourCycleAsString() const {
}
}
-MaybeHandle<String> JSDateTimeFormat::FormatRange(
- Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
- double y) {
- // TODO(ftang): Merge the following with FormatRangeToParts after
- // the landing of ICU64 to make it cleaner.
+enum Source { kShared, kStartRange, kEndRange };
- // #sec-partitiondatetimerangepattern
- // 1. Let x be TimeClip(x).
- x = DateCache::TimeClip(x);
- // 2. If x is NaN, throw a RangeError exception.
- if (std::isnan(x)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
- String);
+namespace {
+
+class SourceTracker {
+ public:
+ SourceTracker() { start_[0] = start_[1] = limit_[0] = limit_[1] = 0; }
+ void Add(int32_t field, int32_t start, int32_t limit) {
+ CHECK_LT(field, 2);
+ start_[field] = start;
+ limit_[field] = limit;
}
- // 3. Let y be TimeClip(y).
- y = DateCache::TimeClip(y);
- // 4. If y is NaN, throw a RangeError exception.
- if (std::isnan(y)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
- String);
+
+ Source GetSource(int32_t start, int32_t limit) const {
+ Source source = Source::kShared;
+ if (FieldContains(0, start, limit)) {
+ source = Source::kStartRange;
+ } else if (FieldContains(1, start, limit)) {
+ source = Source::kEndRange;
+ }
+ return source;
}
- icu::DateIntervalFormat* date_interval_format =
- date_time_format->icu_date_interval_format()->raw();
- CHECK_NOT_NULL(date_interval_format);
- icu::DateInterval interval(x, y);
+ private:
+ int32_t start_[2];
+ int32_t limit_[2];
- icu::UnicodeString result;
- icu::FieldPosition fpos;
+ bool FieldContains(int32_t field, int32_t start, int32_t limit) const {
+ CHECK_LT(field, 2);
+ return (start_[field] <= start) && (start <= limit_[field]) &&
+ (start_[field] <= limit) && (limit <= limit_[field]);
+ }
+};
+
+Handle<String> SourceString(Isolate* isolate, Source source) {
+ switch (source) {
+ case Source::kShared:
+ return ReadOnlyRoots(isolate).shared_string_handle();
+ case Source::kStartRange:
+ return ReadOnlyRoots(isolate).startRange_string_handle();
+ case Source::kEndRange:
+ return ReadOnlyRoots(isolate).endRange_string_handle();
+ UNREACHABLE();
+ }
+}
+
+Maybe<bool> AddPartForFormatRange(Isolate* isolate, Handle<JSArray> array,
+ const icu::UnicodeString& string,
+ int32_t index, int32_t field, int32_t start,
+ int32_t end, const SourceTracker& tracker) {
+ Handle<String> substring;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, substring,
+ Intl::ToString(isolate, string, start, end),
+ Nothing<bool>());
+ Intl::AddElement(isolate, array, index,
+ IcuDateFieldIdToDateType(field, isolate), substring,
+ isolate->factory()->source_string(),
+ SourceString(isolate, tracker.GetSource(start, end)));
+ return Just(true);
+}
+
+// A helper function to convert the FormattedDateInterval to a
+// MaybeHandle<JSArray> for the implementation of formatRangeToParts.
+MaybeHandle<JSArray> FormattedDateIntervalToJSArray(
+ Isolate* isolate, const icu::FormattedValue& formatted) {
UErrorCode status = U_ZERO_ERROR;
- date_interval_format->format(&interval, result, fpos, status);
- CHECK(U_SUCCESS(status));
+ icu::UnicodeString result = formatted.toString(status);
- return Intl::ToString(isolate, result);
+ Factory* factory = isolate->factory();
+ Handle<JSArray> array = factory->NewJSArray(0);
+ icu::ConstrainedFieldPosition cfpos;
+ int index = 0;
+ int32_t previous_end_pos = 0;
+ SourceTracker tracker;
+ while (formatted.nextPosition(cfpos, status)) {
+ int32_t category = cfpos.getCategory();
+ int32_t field = cfpos.getField();
+ int32_t start = cfpos.getStart();
+ int32_t limit = cfpos.getLimit();
+
+ if (category == UFIELD_CATEGORY_DATE_INTERVAL_SPAN) {
+ CHECK_LE(field, 2);
+ tracker.Add(field, start, limit);
+ } else {
+ CHECK(category == UFIELD_CATEGORY_DATE);
+ if (start > previous_end_pos) {
+ // Add "literal" from the previous end position to the start if
+ // necessary.
+ Maybe<bool> maybe_added =
+ AddPartForFormatRange(isolate, array, result, index, -1,
+ previous_end_pos, start, tracker);
+ MAYBE_RETURN(maybe_added, Handle<JSArray>());
+ previous_end_pos = start;
+ index++;
+ }
+ Maybe<bool> maybe_added = AddPartForFormatRange(
+ isolate, array, result, index, field, start, limit, tracker);
+ MAYBE_RETURN(maybe_added, Handle<JSArray>());
+ previous_end_pos = limit;
+ ++index;
+ }
+ }
+ int32_t end = result.length();
+ // Add "literal" in the end if necessary.
+ if (end > previous_end_pos) {
+ Maybe<bool> maybe_added = AddPartForFormatRange(
+ isolate, array, result, index, -1, previous_end_pos, end, tracker);
+ MAYBE_RETURN(maybe_added, Handle<JSArray>());
+ }
+
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), JSArray);
+ }
+
+ JSObject::ValidateElements(*array);
+ return array;
}
-MaybeHandle<JSArray> JSDateTimeFormat::FormatRangeToParts(
+// The shared code between formatRange and formatRangeToParts
+template <typename T>
+MaybeHandle<T> FormatRangeCommon(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
- double y) {
- // TODO(ftang): Merge the following with FormatRangeToParts after
- // the landing of ICU64 to make it cleaner.
-
+ double y,
+ MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) {
// #sec-partitiondatetimerangepattern
// 1. Let x be TimeClip(x).
x = DateCache::TimeClip(x);
// 2. If x is NaN, throw a RangeError exception.
if (std::isnan(x)) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
- JSArray);
+ T);
}
// 3. Let y be TimeClip(y).
y = DateCache::TimeClip(y);
// 4. If y is NaN, throw a RangeError exception.
if (std::isnan(y)) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
- JSArray);
+ T);
}
+ icu::DateInterval interval(x, y);
- icu::DateIntervalFormat* date_interval_format =
- date_time_format->icu_date_interval_format()->raw();
- CHECK_NOT_NULL(date_interval_format);
- Factory* factory = isolate->factory();
- Handle<JSArray> result = factory->NewJSArray(0);
+ icu::DateIntervalFormat* format =
+ LazyCreateDateIntervalFormat(isolate, date_time_format);
+ if (format == nullptr) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), T);
+ }
- // TODO(ftang) To be implemented after ICU64 landed that support
- // DateIntervalFormat::formatToValue() and FormattedDateInterval.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::FormattedDateInterval formatted =
+ format->formatToValue(interval, status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), T);
+ }
+ return formatToResult(isolate, formatted);
+}
- JSObject::ValidateElements(*result);
- return result;
+} // namespace
+
+MaybeHandle<String> JSDateTimeFormat::FormatRange(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
+ double y) {
+ return FormatRangeCommon<String>(isolate, date_time_format, x, y,
+ Intl::FormattedToString);
+}
+
+MaybeHandle<JSArray> JSDateTimeFormat::FormatRangeToParts(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
+ double y) {
+ return FormatRangeCommon<JSArray>(isolate, date_time_format, x, y,
+ FormattedDateIntervalToJSArray);
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index cf73af2aa8..664ccdcdf7 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -12,9 +12,10 @@
#include <set>
#include <string>
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
+#include "torque-generated/field-offsets-tq.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -92,18 +93,8 @@ class JSDateTimeFormat : public JSObject {
enum class DateTimeStyle { kUndefined, kFull, kLong, kMedium, kShort };
// Layout description.
-#define JS_DATE_TIME_FORMAT_FIELDS(V) \
- V(kICULocaleOffset, kTaggedSize) \
- V(kICUSimpleDateFormatOffset, kTaggedSize) \
- V(kICUDateIntervalFormatOffset, kTaggedSize) \
- V(kBoundFormatOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_DATE_TIME_FORMAT_FIELDS)
-#undef JS_DATE_TIME_FORMAT_FIELDS
+ TORQUE_GENERATED_JSDATE_TIME_FORMAT_FIELDS)
inline void set_hour_cycle(Intl::HourCycle hour_cycle);
inline Intl::HourCycle hour_cycle() const;
diff --git a/deps/v8/src/objects/js-generator-inl.h b/deps/v8/src/objects/js-generator-inl.h
index c2895e29f9..d0fe2cd90e 100644
--- a/deps/v8/src/objects/js-generator-inl.h
+++ b/deps/v8/src/objects/js-generator-inl.h
@@ -8,7 +8,7 @@
#include "src/objects/js-generator.h"
#include "src/objects/js-promise-inl.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
index dd3f4dceb9..96e61c2205 100644
--- a/deps/v8/src/objects/js-list-format-inl.h
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_LIST_FORMAT_INL_H_
#define V8_OBJECTS_JS_LIST_FORMAT_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-list-format.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,7 +23,7 @@ OBJECT_CONSTRUCTORS_IMPL(JSListFormat, JSObject)
// Base list format accessors.
ACCESSORS(JSListFormat, locale, String, kLocaleOffset)
ACCESSORS(JSListFormat, icu_formatter, Managed<icu::ListFormatter>,
- kICUFormatterOffset)
+ kIcuFormatterOffset)
SMI_ACCESSORS(JSListFormat, flags, kFlagsOffset)
inline void JSListFormat::set_style(Style style) {
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index c4329401a4..84691194ec 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -11,15 +11,15 @@
#include <memory>
#include <vector>
-#include "src/elements-inl.h"
-#include "src/elements.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/elements-inl.h"
+#include "src/objects/elements.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-list-format-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/objects-inl.h"
#include "unicode/fieldpos.h"
#include "unicode/fpositer.h"
#include "unicode/listformatter.h"
@@ -286,8 +286,9 @@ Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
factory->NewNumber(i), factory->String_string()),
Nothing<std::vector<icu::UnicodeString>>());
}
- result.push_back(
- Intl::ToICUUnicodeString(isolate, Handle<String>::cast(item)));
+ Handle<String> item_str = Handle<String>::cast(item);
+ if (!item_str->IsFlat()) item_str = String::Flatten(isolate, item_str);
+ result.push_back(Intl::ToICUUnicodeString(isolate, item_str));
}
DCHECK(!array->HasDictionaryElements());
return Just(result);
@@ -296,7 +297,7 @@ Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
template <typename T>
MaybeHandle<T> FormatListCommon(
Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list,
- MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedList&)) {
+ MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) {
DCHECK(!list->IsUndefined());
// ecma402 #sec-createpartsfromlist
// 2. If list contains any element value such that Type(value) is not String,
@@ -306,7 +307,7 @@ MaybeHandle<T> FormatListCommon(
MAYBE_RETURN(maybe_array, Handle<T>());
std::vector<icu::UnicodeString> array = maybe_array.FromJust();
- icu::ListFormatter* formatter = format->icu_formatter()->raw();
+ icu::ListFormatter* formatter = format->icu_formatter().raw();
CHECK_NOT_NULL(formatter);
UErrorCode status = U_ZERO_ERROR;
@@ -318,18 +319,6 @@ MaybeHandle<T> FormatListCommon(
return formatToResult(isolate, formatted);
}
-// A helper function to convert the FormattedList to a
-// MaybeHandle<String> for the implementation of format.
-MaybeHandle<String> FormattedToString(Isolate* isolate,
- const icu::FormattedList& formatted) {
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString result = formatted.toString(status);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String);
- }
- return Intl::ToString(isolate, result);
-}
-
Handle<String> IcuFieldIdToType(Isolate* isolate, int32_t field_id) {
switch (field_id) {
case ULISTFMT_LITERAL_FIELD:
@@ -345,8 +334,8 @@ Handle<String> IcuFieldIdToType(Isolate* isolate, int32_t field_id) {
// A helper function to convert the FormattedList to a
// MaybeHandle<JSArray> for the implementation of formatToParts.
-MaybeHandle<JSArray> FormattedToJSArray(Isolate* isolate,
- const icu::FormattedList& formatted) {
+MaybeHandle<JSArray> FormattedListToJSArray(
+ Isolate* isolate, const icu::FormattedValue& formatted) {
Handle<JSArray> array = isolate->factory()->NewJSArray(0);
icu::ConstrainedFieldPosition cfpos;
cfpos.constrainCategory(UFIELD_CATEGORY_LIST);
@@ -375,13 +364,15 @@ MaybeHandle<JSArray> FormattedToJSArray(Isolate* isolate,
MaybeHandle<String> JSListFormat::FormatList(Isolate* isolate,
Handle<JSListFormat> format,
Handle<JSArray> list) {
- return FormatListCommon<String>(isolate, format, list, FormattedToString);
+ return FormatListCommon<String>(isolate, format, list,
+ Intl::FormattedToString);
}
// ecma42 #sec-formatlisttoparts
MaybeHandle<JSArray> JSListFormat::FormatListToParts(
Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list) {
- return FormatListCommon<JSArray>(isolate, format, list, FormattedToJSArray);
+ return FormatListCommon<JSArray>(isolate, format, list,
+ FormattedListToJSArray);
}
const std::set<std::string>& JSListFormat::GetAvailableLocales() {
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index ee576b3ff2..0284d05d42 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -12,10 +12,10 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -105,15 +105,8 @@ class JSListFormat : public JSObject {
DECL_VERIFIER(JSListFormat)
// Layout description.
-#define JS_LIST_FORMAT_FIELDS(V) \
- V(kLocaleOffset, kTaggedSize) \
- V(kICUFormatterOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_LIST_FORMAT_FIELDS)
-#undef JS_LIST_FORMAT_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSLIST_FORMAT_FIELDS)
OBJECT_CONSTRUCTORS(JSListFormat, JSObject);
};
diff --git a/deps/v8/src/objects/js-locale-inl.h b/deps/v8/src/objects/js-locale-inl.h
index 44e223ef06..17859ea6ab 100644
--- a/deps/v8/src/objects/js-locale-inl.h
+++ b/deps/v8/src/objects/js-locale-inl.h
@@ -9,9 +9,9 @@
#ifndef V8_OBJECTS_JS_LOCALE_INL_H_
#define V8_OBJECTS_JS_LOCALE_INL_H_
-#include "src/api-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
#include "src/objects/js-locale.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -21,7 +21,7 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSLocale, JSObject)
-ACCESSORS(JSLocale, icu_locale, Managed<icu::Locale>, kICULocaleOffset)
+ACCESSORS(JSLocale, icu_locale, Managed<icu::Locale>, kIcuLocaleOffset)
CAST_ACCESSOR(JSLocale)
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 4e35c16b0f..509f9a3069 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -13,14 +13,15 @@
#include <string>
#include <vector>
-#include "src/api.h"
-#include "src/global-handles.h"
+#include "src/api/api.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-locale-inl.h"
+#include "src/objects/objects-inl.h"
#include "unicode/char16ptr.h"
+#include "unicode/localebuilder.h"
#include "unicode/locid.h"
#include "unicode/uloc.h"
#include "unicode/unistr.h"
@@ -30,21 +31,6 @@ namespace internal {
namespace {
-// Helper function to check a locale is valid. It will return false if
-// the length of the extension fields are incorrect. For example, en-u-a or
-// en-u-co-b will return false.
-bool IsValidLocale(const icu::Locale& locale) {
- // icu::Locale::toLanguageTag won't return U_STRING_NOT_TERMINATED_WARNING for
- // incorrect locale yet. So we still need the following uloc_toLanguageTag
- // TODO(ftang): Change to use icu::Locale::toLanguageTag once it indicate
- // error.
- char result[ULOC_FULLNAME_CAPACITY];
- UErrorCode status = U_ZERO_ERROR;
- uloc_toLanguageTag(locale.getName(), result, ULOC_FULLNAME_CAPACITY, true,
- &status);
- return U_SUCCESS(status) && status != U_STRING_NOT_TERMINATED_WARNING;
-}
-
struct OptionData {
const char* name;
const char* key;
@@ -55,9 +41,8 @@ struct OptionData {
// Inserts tags from options into locale string.
Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
Handle<JSReceiver> options,
- icu::Locale* icu_locale) {
+ icu::LocaleBuilder* builder) {
CHECK(isolate);
- CHECK(!icu_locale->isBogus());
const std::vector<const char*> hour_cycle_values = {"h11", "h12", "h23",
"h24"};
@@ -75,7 +60,6 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
// TODO(cira): Pass in values as per the spec to make this to be
// spec compliant.
- UErrorCode status = U_ZERO_ERROR;
for (const auto& option_to_bcp47 : kOptionToUnicodeTagMap) {
std::unique_ptr<char[]> value_str = nullptr;
bool value_bool = false;
@@ -99,32 +83,18 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
DCHECK_NOT_NULL(value_str.get());
// Overwrite existing, or insert new key-value to the locale string.
- if (uloc_toLegacyType(uloc_toLegacyKey(option_to_bcp47.key),
- value_str.get())) {
- // Only call setUnicodeKeywordValue if that value is a valid one.
- icu_locale->setUnicodeKeywordValue(option_to_bcp47.key, value_str.get(),
- status);
- if (U_FAILURE(status)) {
- return Just(false);
- }
- } else {
+ if (!uloc_toLegacyType(uloc_toLegacyKey(option_to_bcp47.key),
+ value_str.get())) {
return Just(false);
}
+ builder->setUnicodeLocaleKeyword(option_to_bcp47.key, value_str.get());
}
-
- // Check all the unicode extension fields are in the right length.
- if (!IsValidLocale(*icu_locale)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<bool>());
- }
-
return Just(true);
}
Handle<Object> UnicodeKeywordValue(Isolate* isolate, Handle<JSLocale> locale,
const char* key) {
- icu::Locale* icu_locale = locale->icu_locale()->raw();
+ icu::Locale* icu_locale = locale->icu_locale().raw();
UErrorCode status = U_ZERO_ERROR;
std::string value =
icu_locale->getUnicodeKeywordValue<std::string>(key, status);
@@ -237,32 +207,29 @@ bool StartsWithUnicodeLanguageId(const std::string& value) {
return true;
}
-Maybe<std::string> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
- Handle<JSReceiver> options) {
+Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
+ Handle<JSReceiver> options,
+ icu::LocaleBuilder* builder) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
if (tag->length() == 0) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate, NewRangeError(MessageTemplate::kLocaleNotEmpty),
- Nothing<std::string>());
+ Nothing<bool>());
}
v8::String::Utf8Value bcp47_tag(v8_isolate, v8::Utils::ToLocal(tag));
+ builder->setLanguageTag({*bcp47_tag, bcp47_tag.length()});
CHECK_LT(0, bcp47_tag.length());
CHECK_NOT_NULL(*bcp47_tag);
// 2. If IsStructurallyValidLanguageTag(tag) is false, throw a RangeError
// exception.
if (!StartsWithUnicodeLanguageId(*bcp47_tag)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
+ return Just(false);
}
UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale =
- icu::Locale::forLanguageTag({*bcp47_tag, bcp47_tag.length()}, status);
+ builder->build(status);
if (U_FAILURE(status)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
+ return Just(false);
}
// 3. Let language be ? GetOption(options, "language", "string", undefined,
@@ -272,15 +239,16 @@ Maybe<std::string> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Maybe<bool> maybe_language =
Intl::GetStringOption(isolate, options, "language", empty_values,
"ApplyOptionsToTag", &language_str);
- MAYBE_RETURN(maybe_language, Nothing<std::string>());
+ MAYBE_RETURN(maybe_language, Nothing<bool>());
// 4. If language is not undefined, then
if (maybe_language.FromJust()) {
+ builder->setLanguage(language_str.get());
+ builder->build(status);
// a. If language does not match the unicode_language_subtag production,
// throw a RangeError exception.
- if (!IsUnicodeLanguageSubtag(language_str.get())) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
+ if (U_FAILURE(status) || language_str[0] == '\0' ||
+ IsAlpha(language_str.get(), 4, 4)) {
+ return Just(false);
}
}
// 5. Let script be ? GetOption(options, "script", "string", undefined,
@@ -289,15 +257,15 @@ Maybe<std::string> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Maybe<bool> maybe_script =
Intl::GetStringOption(isolate, options, "script", empty_values,
"ApplyOptionsToTag", &script_str);
- MAYBE_RETURN(maybe_script, Nothing<std::string>());
+ MAYBE_RETURN(maybe_script, Nothing<bool>());
// 6. If script is not undefined, then
if (maybe_script.FromJust()) {
+ builder->setScript(script_str.get());
+ builder->build(status);
// a. If script does not match the unicode_script_subtag production, throw
// a RangeError exception.
- if (!IsUnicodeScriptSubtag(script_str.get())) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
+ if (U_FAILURE(status) || script_str[0] == '\0') {
+ return Just(false);
}
}
// 7. Let region be ? GetOption(options, "region", "string", undefined,
@@ -306,85 +274,41 @@ Maybe<std::string> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Maybe<bool> maybe_region =
Intl::GetStringOption(isolate, options, "region", empty_values,
"ApplyOptionsToTag", &region_str);
- MAYBE_RETURN(maybe_region, Nothing<std::string>());
+ MAYBE_RETURN(maybe_region, Nothing<bool>());
// 8. If region is not undefined, then
if (maybe_region.FromJust()) {
// a. If region does not match the region production, throw a RangeError
// exception.
- if (!IsUnicodeRegionSubtag(region_str.get())) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
+ builder->setRegion(region_str.get());
+ builder->build(status);
+ if (U_FAILURE(status) || region_str[0] == '\0') {
+ return Just(false);
}
}
- // 9. Set tag to CanonicalizeLanguageTag(tag).
- Maybe<std::string> maybe_canonicalized =
- Intl::CanonicalizeLanguageTag(isolate, tag);
- MAYBE_RETURN(maybe_canonicalized, Nothing<std::string>());
-
- std::vector<std::string> tokens;
- std::string token;
- std::istringstream token_stream(maybe_canonicalized.FromJust());
- while (std::getline(token_stream, token, '-')) {
- tokens.push_back(token);
- }
+ // 9. Set tag to CanonicalizeLanguageTag(tag).
// 10. If language is not undefined,
- std::string locale_str;
- if (maybe_language.FromJust()) {
- // a. Assert: tag matches the unicode_locale_id production.
- // b. Set tag to tag with the substring corresponding to the
- // unicode_language_subtag production replaced by the string language.
- tokens[0] = language_str.get();
- }
-
+ // a. Assert: tag matches the unicode_locale_id production.
+ // b. Set tag to tag with the substring corresponding to the
+ // unicode_language_subtag production replaced by the string language.
// 11. If script is not undefined, then
- if (maybe_script.FromJust()) {
- // a. If tag does not contain a unicode_script_subtag production, then
- if (tokens.size() < 2 || !IsUnicodeScriptSubtag(tokens[1])) {
- // i. Set tag to the concatenation of the unicode_language_subtag
- // production of tag, "-", script, and the rest of tag.
- tokens.insert(tokens.begin() + 1, script_str.get());
- // b. Else,
- } else {
- // i. Set tag to tag with the substring corresponding to the
- // unicode_script_subtag production replaced by the string script.
- tokens[1] = script_str.get();
- }
- }
+ // a. If tag does not contain a unicode_script_subtag production, then
+ // i. Set tag to the concatenation of the unicode_language_subtag
+ // production of tag, "-", script, and the rest of tag.
+ // b. Else,
+ // i. Set tag to tag with the substring corresponding to the
+ // unicode_script_subtag production replaced by the string script.
// 12. If region is not undefined, then
- if (maybe_region.FromJust()) {
- // a. If tag does not contain a unicode_region_subtag production, then
- // i. Set tag to the concatenation of the unicode_language_subtag
- // production of tag, the substring corresponding to the "-"
- // unicode_script_subtag production if present, "-", region, and
- // the rest of tag.
- // b. Else,
- // i. Set tag to tag with the substring corresponding to the
- // unicode_region_subtag production replaced by the string region.
- if (tokens.size() > 1 && IsUnicodeRegionSubtag(tokens[1])) {
- tokens[1] = region_str.get();
- } else if (tokens.size() > 1 && IsUnicodeScriptSubtag(tokens[1])) {
- if (tokens.size() > 2 && IsUnicodeRegionSubtag(tokens[2])) {
- tokens[2] = region_str.get();
- } else {
- tokens.insert(tokens.begin() + 2, region_str.get());
- }
- } else {
- tokens.insert(tokens.begin() + 1, region_str.get());
- }
- }
-
- std::string replaced;
- for (auto it = tokens.begin(); it != tokens.end(); it++) {
- replaced += *it;
- if (it + 1 != tokens.end()) {
- replaced += '-';
- }
- }
-
+ // a. If tag does not contain a unicode_region_subtag production, then
+ // i. Set tag to the concatenation of the unicode_language_subtag
+ // production of tag, the substring corresponding to the "-"
+ // unicode_script_subtag production if present, "-", region, and
+ // the rest of tag.
+ // b. Else,
+ // i. Set tag to tag with the substring corresponding to the
+ // unicode_region_subtag production replaced by the string region.
// 13. Return CanonicalizeLanguageTag(tag).
- return Intl::CanonicalizeLanguageTag(isolate, replaced);
+ return Just(true);
}
} // namespace
@@ -393,21 +317,22 @@ MaybeHandle<JSLocale> JSLocale::Initialize(Isolate* isolate,
Handle<JSLocale> locale,
Handle<String> locale_str,
Handle<JSReceiver> options) {
- Maybe<std::string> maybe_locale =
- ApplyOptionsToTag(isolate, locale_str, options);
- MAYBE_RETURN(maybe_locale, MaybeHandle<JSLocale>());
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale =
- icu::Locale::forLanguageTag(maybe_locale.FromJust().c_str(), status);
- if (U_FAILURE(status)) {
+ icu::LocaleBuilder builder;
+ Maybe<bool> maybe_apply =
+ ApplyOptionsToTag(isolate, locale_str, options, &builder);
+ MAYBE_RETURN(maybe_apply, MaybeHandle<JSLocale>());
+ if (!maybe_apply.FromJust()) {
THROW_NEW_ERROR(isolate,
NewRangeError(MessageTemplate::kLocaleBadParameters),
JSLocale);
}
- Maybe<bool> error = InsertOptionsIntoLocale(isolate, options, &icu_locale);
- MAYBE_RETURN(error, MaybeHandle<JSLocale>());
- if (!error.FromJust()) {
+ Maybe<bool> maybe_insert =
+ InsertOptionsIntoLocale(isolate, options, &builder);
+ MAYBE_RETURN(maybe_insert, MaybeHandle<JSLocale>());
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale = builder.build(status);
+ if (!maybe_insert.FromJust() || U_FAILURE(status)) {
THROW_NEW_ERROR(isolate,
NewRangeError(MessageTemplate::kLocaleBadParameters),
JSLocale);
@@ -458,28 +383,28 @@ Handle<String> JSLocale::Minimize(Isolate* isolate, String locale) {
Handle<Object> JSLocale::Language(Isolate* isolate, Handle<JSLocale> locale) {
Factory* factory = isolate->factory();
- const char* language = locale->icu_locale()->raw()->getLanguage();
+ const char* language = locale->icu_locale().raw()->getLanguage();
if (strlen(language) == 0) return factory->undefined_value();
return factory->NewStringFromAsciiChecked(language);
}
Handle<Object> JSLocale::Script(Isolate* isolate, Handle<JSLocale> locale) {
Factory* factory = isolate->factory();
- const char* script = locale->icu_locale()->raw()->getScript();
+ const char* script = locale->icu_locale().raw()->getScript();
if (strlen(script) == 0) return factory->undefined_value();
return factory->NewStringFromAsciiChecked(script);
}
Handle<Object> JSLocale::Region(Isolate* isolate, Handle<JSLocale> locale) {
Factory* factory = isolate->factory();
- const char* region = locale->icu_locale()->raw()->getCountry();
+ const char* region = locale->icu_locale().raw()->getCountry();
if (strlen(region) == 0) return factory->undefined_value();
return factory->NewStringFromAsciiChecked(region);
}
Handle<String> JSLocale::BaseName(Isolate* isolate, Handle<JSLocale> locale) {
icu::Locale icu_locale =
- icu::Locale::createFromName(locale->icu_locale()->raw()->getBaseName());
+ icu::Locale::createFromName(locale->icu_locale().raw()->getBaseName());
std::string base_name = Intl::ToLanguageTag(icu_locale).FromJust();
return isolate->factory()->NewStringFromAsciiChecked(base_name.c_str());
}
@@ -502,7 +427,7 @@ Handle<Object> JSLocale::HourCycle(Isolate* isolate, Handle<JSLocale> locale) {
Handle<Object> JSLocale::Numeric(Isolate* isolate, Handle<JSLocale> locale) {
Factory* factory = isolate->factory();
- icu::Locale* icu_locale = locale->icu_locale()->raw();
+ icu::Locale* icu_locale = locale->icu_locale().raw();
UErrorCode status = U_ZERO_ERROR;
std::string numeric =
icu_locale->getUnicodeKeywordValue<std::string>("kn", status);
@@ -515,7 +440,7 @@ Handle<Object> JSLocale::NumberingSystem(Isolate* isolate,
}
std::string JSLocale::ToString(Handle<JSLocale> locale) {
- icu::Locale* icu_locale = locale->icu_locale()->raw();
+ icu::Locale* icu_locale = locale->icu_locale().raw();
return Intl::ToLanguageTag(*icu_locale).FromJust();
}
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index 120ddeb965..1a833e0e18 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -9,11 +9,11 @@
#ifndef V8_OBJECTS_JS_LOCALE_H_
#define V8_OBJECTS_JS_LOCALE_H_
-#include "src/global-handles.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -58,12 +58,8 @@ class JSLocale : public JSObject {
DECL_VERIFIER(JSLocale)
// Layout description.
-#define JS_LOCALE_FIELDS(V) \
- V(kICULocaleOffset, kTaggedSize) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_LOCALE_FIELDS)
-#undef JS_LOCALE_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSLOCALE_FIELDS)
OBJECT_CONSTRUCTORS(JSLocale, JSObject);
};
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index 3edf6f1ea3..bd76dfe556 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_NUMBER_FORMAT_INL_H_
#define V8_OBJECTS_JS_NUMBER_FORMAT_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-number-format.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -21,35 +21,51 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSNumberFormat, JSObject)
ACCESSORS(JSNumberFormat, locale, String, kLocaleOffset)
-ACCESSORS(JSNumberFormat, icu_number_format, Managed<icu::NumberFormat>,
- kICUNumberFormatOffset)
+ACCESSORS(JSNumberFormat, icu_number_formatter,
+ Managed<icu::number::LocalizedNumberFormatter>,
+ kIcuNumberFormatterOffset)
ACCESSORS(JSNumberFormat, bound_format, Object, kBoundFormatOffset)
+
+// Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits
+// uncondictionally while the unified number proposal eventually will only
+// record either (Min|Max)imumFractionDigits or (Min|Max)imumSignaficantDigits
+// Since LocalizedNumberFormatter can only remember one set, and during
+// 2019-1-17 ECMA402 meeting that the committee decide not to take a PR to
+// address that prior to the unified number proposal, we have to add these two
+// 5 bits int into flags to remember the (Min|Max)imumFractionDigits while
+// (Min|Max)imumSignaficantDigits is present.
+// TODO(ftang) remove the following once we ship int-number-format-unified
+// * SMI_ACCESSORS of flags
+// * Four inline functions: (set_)?(min|max)imum_fraction_digits
+
SMI_ACCESSORS(JSNumberFormat, flags, kFlagsOffset)
-inline void JSNumberFormat::set_style(Style style) {
- DCHECK_LT(style, Style::COUNT);
+inline int JSNumberFormat::minimum_fraction_digits() const {
+ return MinimumFractionDigitsBits::decode(flags());
+}
+
+inline void JSNumberFormat::set_minimum_fraction_digits(int digits) {
+ DCHECK_GE(MinimumFractionDigitsBits::kMax, digits);
+ DCHECK_LE(0, digits);
+ DCHECK_GE(20, digits);
int hints = flags();
- hints = StyleBits::update(hints, style);
+ hints = MinimumFractionDigitsBits::update(hints, digits);
set_flags(hints);
}
-inline JSNumberFormat::Style JSNumberFormat::style() const {
- return StyleBits::decode(flags());
+inline int JSNumberFormat::maximum_fraction_digits() const {
+ return MaximumFractionDigitsBits::decode(flags());
}
-inline void JSNumberFormat::set_currency_display(
- CurrencyDisplay currency_display) {
- DCHECK_LT(currency_display, CurrencyDisplay::COUNT);
+inline void JSNumberFormat::set_maximum_fraction_digits(int digits) {
+ DCHECK_GE(MaximumFractionDigitsBits::kMax, digits);
+ DCHECK_LE(0, digits);
+ DCHECK_GE(20, digits);
int hints = flags();
- hints = CurrencyDisplayBits::update(hints, currency_display);
+ hints = MaximumFractionDigitsBits::update(hints, digits);
set_flags(hints);
}
-inline JSNumberFormat::CurrencyDisplay JSNumberFormat::currency_display()
- const {
- return CurrencyDisplayBits::decode(flags());
-}
-
CAST_ACCESSOR(JSNumberFormat)
} // namespace internal
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index c490eeef57..67d545e0be 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -11,34 +11,269 @@
#include <set>
#include <string>
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format-inl.h"
+#include "src/objects/objects-inl.h"
+#include "unicode/currunit.h"
#include "unicode/decimfmt.h"
#include "unicode/locid.h"
+#include "unicode/nounit.h"
+#include "unicode/numberformatter.h"
#include "unicode/numfmt.h"
+#include "unicode/ucurr.h"
#include "unicode/uloc.h"
+#include "unicode/unumberformatter.h"
+#include "unicode/uvernum.h" // for U_ICU_VERSION_MAJOR_NUM
namespace v8 {
namespace internal {
namespace {
-UNumberFormatStyle ToNumberFormatStyle(
- JSNumberFormat::CurrencyDisplay currency_display) {
+// [[Style]] is one of the values "decimal", "percent", "currency",
+// or "unit" identifying the style of the number format.
+// Note: "unit" is added in proposal-unified-intl-numberformat
+enum class Style {
+ DECIMAL,
+ PERCENT,
+ CURRENCY,
+ UNIT,
+};
+
+// [[CurrencyDisplay]] is one of the values "code", "symbol", "name",
+// or "narrow-symbol" identifying the display of the currency number format.
+// Note: "narrow-symbol" is added in proposal-unified-intl-numberformat
+enum class CurrencyDisplay {
+ CODE,
+ SYMBOL,
+ NAME,
+ NARROW_SYMBOL,
+};
+
+// [[CurrencySign]] is one of the String values "standard" or "accounting",
+// specifying whether to render negative numbers in accounting format, often
+// signified by parenthesis. It is only used when [[Style]] has the value
+// "currency" and when [[SignDisplay]] is not "never".
+enum class CurrencySign {
+ STANDARD,
+ ACCOUNTING,
+};
+
+// [[UnitDisplay]] is one of the String values "short", "narrow", or "long",
+// specifying whether to display the unit as a symbol, narrow symbol, or
+// localized long name if formatting with the "unit" or "percent" style. It is
+// only used when [[Style]] has the value "unit" or "percent".
+enum class UnitDisplay {
+ SHORT,
+ NARROW,
+ LONG,
+};
+
+// [[Notation]] is one of the String values "standard", "scientific",
+// "engineering", or "compact", specifying whether the number should be
+// displayed without scaling, scaled to the units place with the power of ten
+// in scientific notation, scaled to the nearest thousand with the power of
+// ten in scientific notation, or scaled to the nearest locale-dependent
+// compact decimal notation power of ten with the corresponding compact
+// decimal notation affix.
+
+enum class Notation {
+ STANDARD,
+ SCIENTIFIC,
+ ENGINEERING,
+ COMPACT,
+};
+
+// [[CompactDisplay]] is one of the String values "short" or "long",
+// specifying whether to display compact notation affixes in short form ("5K")
+// or long form ("5 thousand") if formatting with the "compact" notation. It
+// is only used when [[Notation]] has the value "compact".
+enum class CompactDisplay {
+ SHORT,
+ LONG,
+};
+
+// [[SignDisplay]] is one of the String values "auto", "always", "never", or
+// "except-zero", specifying whether to show the sign on negative numbers
+// only, positive and negative numbers including zero, neither positive nor
+// negative numbers, or positive and negative numbers but not zero.
+enum class SignDisplay {
+ AUTO,
+ ALWAYS,
+ NEVER,
+ EXCEPT_ZERO,
+};
+
+UNumberUnitWidth ToUNumberUnitWidth(CurrencyDisplay currency_display) {
switch (currency_display) {
- case JSNumberFormat::CurrencyDisplay::SYMBOL:
- return UNUM_CURRENCY;
- case JSNumberFormat::CurrencyDisplay::CODE:
- return UNUM_CURRENCY_ISO;
- case JSNumberFormat::CurrencyDisplay::NAME:
- return UNUM_CURRENCY_PLURAL;
- case JSNumberFormat::CurrencyDisplay::COUNT:
- UNREACHABLE();
+ case CurrencyDisplay::SYMBOL:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_SHORT;
+ case CurrencyDisplay::CODE:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_ISO_CODE;
+ case CurrencyDisplay::NAME:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_FULL_NAME;
+ case CurrencyDisplay::NARROW_SYMBOL:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_NARROW;
+ }
+}
+
+UNumberUnitWidth ToUNumberUnitWidth(UnitDisplay unit_display) {
+ switch (unit_display) {
+ case UnitDisplay::SHORT:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_SHORT;
+ case UnitDisplay::LONG:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_FULL_NAME;
+ case UnitDisplay::NARROW:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_NARROW;
+ }
+}
+
+UNumberSignDisplay ToUNumberSignDisplay(SignDisplay sign_display,
+ CurrencySign currency_sign) {
+ switch (sign_display) {
+ case SignDisplay::AUTO:
+ if (currency_sign == CurrencySign::ACCOUNTING) {
+ return UNumberSignDisplay::UNUM_SIGN_ACCOUNTING;
+ }
+ DCHECK(currency_sign == CurrencySign::STANDARD);
+ return UNumberSignDisplay::UNUM_SIGN_AUTO;
+ case SignDisplay::NEVER:
+ return UNumberSignDisplay::UNUM_SIGN_NEVER;
+ case SignDisplay::ALWAYS:
+ if (currency_sign == CurrencySign::ACCOUNTING) {
+ return UNumberSignDisplay::UNUM_SIGN_ACCOUNTING_ALWAYS;
+ }
+ DCHECK(currency_sign == CurrencySign::STANDARD);
+ return UNumberSignDisplay::UNUM_SIGN_ALWAYS;
+ case SignDisplay::EXCEPT_ZERO:
+ if (currency_sign == CurrencySign::ACCOUNTING) {
+ return UNumberSignDisplay::UNUM_SIGN_ACCOUNTING_EXCEPT_ZERO;
+ }
+ DCHECK(currency_sign == CurrencySign::STANDARD);
+ return UNumberSignDisplay::UNUM_SIGN_EXCEPT_ZERO;
+ }
+}
+
+icu::number::Notation ToICUNotation(Notation notation,
+ CompactDisplay compact_display) {
+ switch (notation) {
+ case Notation::STANDARD:
+ return icu::number::Notation::simple();
+ case Notation::SCIENTIFIC:
+ return icu::number::Notation::scientific();
+ case Notation::ENGINEERING:
+ return icu::number::Notation::engineering();
+ case Notation::COMPACT:
+ if (compact_display == CompactDisplay::SHORT) {
+ return icu::number::Notation::compactShort();
+ }
+ DCHECK(compact_display == CompactDisplay::LONG);
+ return icu::number::Notation::compactLong();
}
}
+std::map<const std::string, icu::MeasureUnit> CreateUnitMap() {
+ UErrorCode status = U_ZERO_ERROR;
+ int32_t total = icu::MeasureUnit::getAvailable(nullptr, 0, status);
+ CHECK(U_FAILURE(status));
+ status = U_ZERO_ERROR;
+ // See the list in ecma402 #sec-issanctionedsimpleunitidentifier
+ std::set<std::string> sanctioned(
+ {"acre", "bit", "byte", "celsius",
+ "centimeter", "day", "degree", "fahrenheit",
+ "foot", "gigabit", "gigabyte", "gram",
+ "hectare", "hour", "inch", "kilobit",
+ "kilobyte", "kilogram", "kilometer", "megabit",
+ "megabyte", "meter", "mile", "mile-scandinavian",
+ "millimeter", "millisecond", "minute", "month",
+ "ounce", "percent", "petabyte", "pound",
+ "second", "stone", "terabit", "terabyte",
+ "week", "yard", "year"});
+ std::vector<icu::MeasureUnit> units(total);
+ total = icu::MeasureUnit::getAvailable(units.data(), total, status);
+ CHECK(U_SUCCESS(status));
+ std::map<const std::string, icu::MeasureUnit> map;
+ for (auto it = units.begin(); it != units.end(); ++it) {
+ if (sanctioned.count(it->getSubtype()) > 0) {
+ map[it->getSubtype()] = *it;
+ }
+ }
+ return map;
+}
+
+class UnitFactory {
+ public:
+ UnitFactory() : map_(CreateUnitMap()) {}
+ virtual ~UnitFactory() {}
+
+ // ecma402 #sec-issanctionedsimpleunitidentifier
+ icu::MeasureUnit create(const std::string& unitIdentifier) {
+ // 1. If unitIdentifier is in the following list, return true.
+ auto found = map_.find(unitIdentifier);
+ if (found != map_.end()) {
+ return found->second;
+ }
+ // 2. Return false.
+ return icu::NoUnit::base();
+ }
+
+ private:
+ std::map<const std::string, icu::MeasureUnit> map_;
+};
+
+// ecma402 #sec-issanctionedsimpleunitidentifier
+icu::MeasureUnit IsSanctionedUnitIdentifier(const std::string& unit) {
+ static base::LazyInstance<UnitFactory>::type factory =
+ LAZY_INSTANCE_INITIALIZER;
+ return factory.Pointer()->create(unit);
+}
+
+// ecma402 #sec-iswellformedunitidentifier
+Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> IsWellFormedUnitIdentifier(
+ Isolate* isolate, const std::string& unit) {
+ icu::MeasureUnit result = IsSanctionedUnitIdentifier(unit);
+ icu::MeasureUnit none = icu::NoUnit::base();
+ // 1. If the result of IsSanctionedUnitIdentifier(unitIdentifier) is true,
+ // then
+ if (result != none) {
+ // a. Return true.
+ std::pair<icu::MeasureUnit, icu::MeasureUnit> pair(result, none);
+ return Just(pair);
+ }
+ // 2. If the substring "-per-" does not occur exactly once in unitIdentifier,
+ // then
+ size_t first_per = unit.find("-per-");
+ if (first_per == std::string::npos ||
+ unit.find("-per-", first_per + 5) != std::string::npos) {
+ // a. Return false.
+ return Nothing<std::pair<icu::MeasureUnit, icu::MeasureUnit>>();
+ }
+ // 3. Let numerator be the substring of unitIdentifier from the beginning to
+ // just before "-per-".
+ std::string numerator = unit.substr(0, first_per);
+
+ // 4. If the result of IsSanctionedUnitIdentifier(numerator) is false, then
+ result = IsSanctionedUnitIdentifier(numerator);
+ if (result == none) {
+ // a. Return false.
+ return Nothing<std::pair<icu::MeasureUnit, icu::MeasureUnit>>();
+ }
+ // 5. Let denominator be the substring of unitIdentifier from just after
+ // "-per-" to the end.
+ std::string denominator = unit.substr(first_per + 5);
+
+ // 6. If the result of IsSanctionedUnitIdentifier(denominator) is false, then
+ icu::MeasureUnit den_result = IsSanctionedUnitIdentifier(denominator);
+ if (den_result == none) {
+ // a. Return false.
+ return Nothing<std::pair<icu::MeasureUnit, icu::MeasureUnit>>();
+ }
+ // 7. Return true.
+ std::pair<icu::MeasureUnit, icu::MeasureUnit> pair(result, den_result);
+ return Just(pair);
+}
+
// ecma-402/#sec-currencydigits
// The currency is expected to an all upper case string value.
int CurrencyDigits(const icu::UnicodeString& currency) {
@@ -69,23 +304,351 @@ bool IsWellFormedCurrencyCode(const std::string& currency) {
return (IsAToZ(currency[0]) && IsAToZ(currency[1]) && IsAToZ(currency[2]));
}
+// Parse the 'style' from the skeleton.
+Style StyleFromSkeleton(const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "percent precision-integer rounding-mode-half-up scale/100"
+ if (skeleton.indexOf("percent") >= 0 && skeleton.indexOf("scale/100") >= 0) {
+ return Style::PERCENT;
+ }
+ // Ex: skeleton as "currency/TWD .00 rounding-mode-half-up"
+ if (skeleton.indexOf("currency") >= 0) {
+ return Style::CURRENCY;
+ }
+ // Ex: skeleton as
+ // "measure-unit/length-meter .### rounding-mode-half-up unit-width-narrow"
+ // or special case for "percent .### rounding-mode-half-up"
+ if (skeleton.indexOf("measure-unit") >= 0 ||
+ skeleton.indexOf("percent") >= 0) {
+ return Style::UNIT;
+ }
+ // Ex: skeleton as ".### rounding-mode-half-up"
+ return Style::DECIMAL;
+}
+
+// Return the style as a String.
+Handle<String> StyleAsString(Isolate* isolate, Style style) {
+ switch (style) {
+ case Style::PERCENT:
+ return ReadOnlyRoots(isolate).percent_string_handle();
+ case Style::CURRENCY:
+ return ReadOnlyRoots(isolate).currency_string_handle();
+ case Style::UNIT:
+ return ReadOnlyRoots(isolate).unit_string_handle();
+ case Style::DECIMAL:
+ return ReadOnlyRoots(isolate).decimal_string_handle();
+ }
+ UNREACHABLE();
+}
+
+// Parse the 'currencyDisplay' from the skeleton.
+Handle<String> CurrencyDisplayString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up unit-width-iso-code"
+ if (skeleton.indexOf("unit-width-iso-code") >= 0) {
+ return ReadOnlyRoots(isolate).code_string_handle();
+ }
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up unit-width-full-name;"
+ if (skeleton.indexOf("unit-width-full-name") >= 0) {
+ return ReadOnlyRoots(isolate).name_string_handle();
+ }
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up unit-width-narrow;
+ if (skeleton.indexOf("unit-width-narrow") >= 0) {
+ return ReadOnlyRoots(isolate).narrow_symbol_string_handle();
+ }
+ // Ex: skeleton as "currency/TWD .00 rounding-mode-half-up"
+ return ReadOnlyRoots(isolate).symbol_string_handle();
+}
+
+// Return true if there are no "group-off" in the skeleton.
+bool UseGroupingFromSkeleton(const icu::UnicodeString& skeleton) {
+ return skeleton.indexOf("group-off") == -1;
+}
+
+// Parse currency code from skeleton. For example, skeleton as
+// "currency/TWD .00 rounding-mode-half-up unit-width-full-name;"
+std::string CurrencyFromSkeleton(const icu::UnicodeString& skeleton) {
+ std::string str;
+ str = skeleton.toUTF8String<std::string>(str);
+ std::string search("currency/");
+ size_t index = str.find(search);
+ if (index == str.npos) return "";
+ return str.substr(index + search.size(), 3);
+}
+
+// Return CurrencySign as string based on skeleton.
+Handle<String> CurrencySignString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up sign-accounting-always" OR
+ // "currency/TWD .00 rounding-mode-half-up sign-accounting-except-zero"
+ if (skeleton.indexOf("sign-accounting") >= 0) {
+ return ReadOnlyRoots(isolate).accounting_string_handle();
+ }
+ return ReadOnlyRoots(isolate).standard_string_handle();
+}
+
+// Return UnitDisplay as string based on skeleton.
+Handle<String> UnitDisplayString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "measure-unit/length-meter .### rounding-mode-half-up unit-width-full-name"
+ if (skeleton.indexOf("unit-width-full-name") >= 0) {
+ return ReadOnlyRoots(isolate).long_string_handle();
+ }
+ // Ex: skeleton as
+ // "measure-unit/length-meter .### rounding-mode-half-up unit-width-narrow".
+ if (skeleton.indexOf("unit-width-narrow") >= 0) {
+ return ReadOnlyRoots(isolate).narrow_string_handle();
+ }
+ // Ex: skeleton as
+ // "measure-unit/length-foot .### rounding-mode-half-up"
+ return ReadOnlyRoots(isolate).short_string_handle();
+}
+
+// Parse Notation from skeleton.
+Notation NotationFromSkeleton(const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "scientific .### rounding-mode-half-up"
+ if (skeleton.indexOf("scientific") >= 0) {
+ return Notation::SCIENTIFIC;
+ }
+ // Ex: skeleton as
+ // "engineering .### rounding-mode-half-up"
+ if (skeleton.indexOf("engineering") >= 0) {
+ return Notation::ENGINEERING;
+ }
+ // Ex: skeleton as
+ // "compact-short .### rounding-mode-half-up" or
+ // "compact-long .### rounding-mode-half-up
+ if (skeleton.indexOf("compact-") >= 0) {
+ return Notation::COMPACT;
+ }
+ // Ex: skeleton as
+ // "measure-unit/length-foot .### rounding-mode-half-up"
+ return Notation::STANDARD;
+}
+
+Handle<String> NotationAsString(Isolate* isolate, Notation notation) {
+ switch (notation) {
+ case Notation::SCIENTIFIC:
+ return ReadOnlyRoots(isolate).scientific_string_handle();
+ case Notation::ENGINEERING:
+ return ReadOnlyRoots(isolate).engineering_string_handle();
+ case Notation::COMPACT:
+ return ReadOnlyRoots(isolate).compact_string_handle();
+ case Notation::STANDARD:
+ return ReadOnlyRoots(isolate).standard_string_handle();
+ }
+ UNREACHABLE();
+}
+
+// Return CompactString as string based on skeleton.
+Handle<String> CompactDisplayString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "compact-long .### rounding-mode-half-up"
+ if (skeleton.indexOf("compact-long") >= 0) {
+ return ReadOnlyRoots(isolate).long_string_handle();
+ }
+ // Ex: skeleton as
+ // "compact-short .### rounding-mode-half-up"
+ DCHECK_GE(skeleton.indexOf("compact-short"), 0);
+ return ReadOnlyRoots(isolate).short_string_handle();
+}
+
+// Return SignDisplay as string based on skeleton.
+Handle<String> SignDisplayString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up sign-never"
+ if (skeleton.indexOf("sign-never") >= 0) {
+ return ReadOnlyRoots(isolate).never_string_handle();
+ }
+ // Ex: skeleton as
+ // ".### rounding-mode-half-up sign-always" or
+ // "currency/TWD .00 rounding-mode-half-up sign-accounting-always"
+ if (skeleton.indexOf("sign-always") >= 0 ||
+ skeleton.indexOf("sign-accounting-always") >= 0) {
+ return ReadOnlyRoots(isolate).always_string_handle();
+ }
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up sign-accounting-except-zero" or
+ // "currency/TWD .00 rounding-mode-half-up sign-except-zero"
+ if (skeleton.indexOf("sign-accounting-except-zero") >= 0 ||
+ skeleton.indexOf("sign-except-zero") >= 0) {
+ return ReadOnlyRoots(isolate).except_zero_string_handle();
+ }
+ return ReadOnlyRoots(isolate).auto_string_handle();
+}
+
+// Return the minimum integer digits by counting the number of '0' after
+// "integer-width/+" in the skeleton.
+// Ex: Return 15 for skeleton as
+// “currency/TWD .00 rounding-mode-half-up integer-width/+000000000000000”
+// 1
+// 123456789012345
+// Return default value as 1 if there are no "integer-width/+".
+int32_t MinimumIntegerDigitsFromSkeleton(const icu::UnicodeString& skeleton) {
+ // count the number of 0 after "integer-width/+"
+ icu::UnicodeString search("integer-width/+");
+ int32_t index = skeleton.indexOf(search);
+ if (index < 0) return 1; // return 1 if cannot find it.
+ index += search.length();
+ int32_t matched = 0;
+ while (index < skeleton.length() && skeleton[index] == '0') {
+ matched++;
+ index++;
+ }
+ CHECK_GT(matched, 0);
+ return matched;
+}
+
+// Return true if there are fraction digits, false if not.
+// The minimum fraction digits is the number of '0' after '.' in the skeleton
+// The maximum fraction digits is the number of '#' after the above '0's plus
+// the minimum fraction digits.
+// For example, as skeleton “.000#### rounding-mode-half-up”
+// 123
+// 4567
+// Set The minimum as 3 and maximum as 7.
+bool FractionDigitsFromSkeleton(const icu::UnicodeString& skeleton,
+ int32_t* minimum, int32_t* maximum) {
+ icu::UnicodeString search(".");
+ int32_t index = skeleton.indexOf(search);
+ if (index < 0) return false;
+ *minimum = 0;
+ index++; // skip the '.'
+ while (index < skeleton.length() && skeleton[index] == '0') {
+ (*minimum)++;
+ index++;
+ }
+ *maximum = *minimum;
+ while (index < skeleton.length() && skeleton[index] == '#') {
+ (*maximum)++;
+ index++;
+ }
+ return true;
+}
+
+// Return true if there are significant digits, false if not.
+// The minimum significant digits is the number of '@' in the skeleton
+// The maximum significant digits is the number of '#' after these '@'s plus
+// the minimum significant digits.
+// Ex: Skeleton as "@@@@@####### rounding-mode-half-up"
+// 12345
+// 6789012
+// Set The minimum as 5 and maximum as 12.
+bool SignificantDigitsFromSkeleton(const icu::UnicodeString& skeleton,
+ int32_t* minimum, int32_t* maximum) {
+ icu::UnicodeString search("@");
+ int32_t index = skeleton.indexOf(search);
+ if (index < 0) return false;
+ *minimum = 1;
+ index++; // skip the first '@'
+ while (index < skeleton.length() && skeleton[index] == '@') {
+ (*minimum)++;
+ index++;
+ }
+ *maximum = *minimum;
+ while (index < skeleton.length() && skeleton[index] == '#') {
+ (*maximum)++;
+ index++;
+ }
+ return true;
+}
+
+// Ex: percent .### rounding-mode-half-up
+// Special case for "percent"
+// Ex: "measure-unit/length-kilometer per-measure-unit/duration-hour .###
+// rounding-mode-half-up" should return "kilometer-per-unit".
+// Ex: "measure-unit/duration-year .### rounding-mode-half-up" should return
+// "year".
+std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) {
+ std::string str;
+ str = skeleton.toUTF8String<std::string>(str);
+ // Special case for "percent" first.
+ if (str.find("percent") != str.npos) {
+ return "percent";
+ }
+ std::string search("measure-unit/");
+ size_t begin = str.find(search);
+ if (begin == str.npos) {
+ return "";
+ }
+ // Skip the type (ex: "length").
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // b
+ begin = str.find("-", begin + search.size());
+ if (begin == str.npos) {
+ return "";
+ }
+ begin++; // Skip the '-'.
+ // Find the end of the subtype.
+ size_t end = str.find(" ", begin);
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // b e
+ if (end == str.npos) {
+ end = str.size();
+ return str.substr(begin, end - begin);
+ }
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // b e
+ // [result ]
+ std::string result = str.substr(begin, end - begin);
+ begin = end + 1;
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // [result ]eb
+ std::string search_per("per-measure-unit/");
+ begin = str.find(search_per, begin);
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // [result ]e b
+ if (begin == str.npos) {
+ return result;
+ }
+ // Skip the type (ex: "duration").
+ begin = str.find("-", begin + search_per.size());
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // [result ]e b
+ if (begin == str.npos) {
+ return result;
+ }
+ begin++; // Skip the '-'.
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // [result ]e b
+ end = str.find(" ", begin);
+ if (end == str.npos) {
+ end = str.size();
+ }
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // [result ] b e
+ return result + "-per-" + str.substr(begin, end - begin);
+}
+
} // anonymous namespace
// static
// ecma402 #sec-intl.numberformat.prototype.resolvedoptions
Handle<JSObject> JSNumberFormat::ResolvedOptions(
- Isolate* isolate, Handle<JSNumberFormat> number_format_holder) {
+ Isolate* isolate, Handle<JSNumberFormat> number_format) {
Factory* factory = isolate->factory();
+ UErrorCode status = U_ZERO_ERROR;
+ icu::number::LocalizedNumberFormatter* icu_number_formatter =
+ number_format->icu_number_formatter().raw();
+ icu::UnicodeString skeleton = icu_number_formatter->toSkeleton(status);
+ CHECK(U_SUCCESS(status));
+
+ std::string s_str;
+ s_str = skeleton.toUTF8String<std::string>(s_str);
+
// 4. Let options be ! ObjectCreate(%ObjectPrototype%).
Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
- icu::NumberFormat* number_format =
- number_format_holder->icu_number_format()->raw();
- CHECK_NOT_NULL(number_format);
-
- Handle<String> locale =
- Handle<String>(number_format_holder->locale(), isolate);
+ Handle<String> locale = Handle<String>(number_format->locale(), isolate);
std::unique_ptr<char[]> locale_str = locale->ToCString();
icu::Locale icu_locale = Intl::CreateICULocale(locale_str.get());
@@ -117,69 +680,120 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
Just(kDontThrow))
.FromJust());
}
+ Style style = StyleFromSkeleton(skeleton);
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->style_string(),
- number_format_holder->StyleAsString(), Just(kDontThrow))
+ StyleAsString(isolate, style), Just(kDontThrow))
.FromJust());
- if (number_format_holder->style() == Style::CURRENCY) {
- icu::UnicodeString currency(number_format->getCurrency());
- DCHECK(!currency.isEmpty());
+ std::string currency = CurrencyFromSkeleton(skeleton);
+ if (!currency.empty()) {
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->currency_string(),
- factory
- ->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(currency.getBuffer()),
- currency.length()))
- .ToHandleChecked(),
+ factory->NewStringFromAsciiChecked(currency.c_str()),
Just(kDontThrow))
.FromJust());
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->currencyDisplay_string(),
- number_format_holder->CurrencyDisplayAsString(), Just(kDontThrow))
+ CurrencyDisplayString(isolate, skeleton), Just(kDontThrow))
.FromJust());
+ if (FLAG_harmony_intl_numberformat_unified) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->currencySign_string(),
+ CurrencySignString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ }
}
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->minimumIntegerDigits_string(),
- factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
- Just(kDontThrow))
- .FromJust());
- CHECK(
- JSReceiver::CreateDataProperty(
- isolate, options, factory->minimumFractionDigits_string(),
- factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
- Just(kDontThrow))
- .FromJust());
+
+ if (FLAG_harmony_intl_numberformat_unified) {
+ std::string unit = UnitFromSkeleton(skeleton);
+ if (!unit.empty()) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->unit_string(),
+ isolate->factory()->NewStringFromAsciiChecked(unit.c_str()),
+ Just(kDontThrow))
+ .FromJust());
+ }
+ if (style == Style::UNIT || style == Style::PERCENT) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->unitDisplay_string(),
+ UnitDisplayString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ }
+ }
+
CHECK(
JSReceiver::CreateDataProperty(
- isolate, options, factory->maximumFractionDigits_string(),
- factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
+ isolate, options, factory->minimumIntegerDigits_string(),
+ factory->NewNumberFromInt(MinimumIntegerDigitsFromSkeleton(skeleton)),
Just(kDontThrow))
.FromJust());
- CHECK(number_format->getDynamicClassID() ==
- icu::DecimalFormat::getStaticClassID());
- icu::DecimalFormat* decimal_format =
- static_cast<icu::DecimalFormat*>(number_format);
- CHECK_NOT_NULL(decimal_format);
- if (decimal_format->areSignificantDigitsUsed()) {
+ int32_t minimum = 0, maximum = 0;
+ bool output_fraction =
+ FractionDigitsFromSkeleton(skeleton, &minimum, &maximum);
+
+ if (!FLAG_harmony_intl_numberformat_unified && !output_fraction) {
+ // Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits
+ // uncondictionally while the unified number proposal eventually will only
+ // record either (Min|Max)imumFractionDigits or
+ // (Min|Max)imumSignaficantDigits Since LocalizedNumberFormatter can only
+ // remember one set, and during 2019-1-17 ECMA402 meeting that the committee
+ // decide not to take a PR to address that prior to the unified number
+ // proposal, we have to add these two 5 bits int into flags to remember the
+ // (Min|Max)imumFractionDigits while (Min|Max)imumSignaficantDigits is
+ // present.
+ // TODO(ftang) remove the following two lines once we ship
+ // int-number-format-unified
+ output_fraction = true;
+ minimum = number_format->minimum_fraction_digits();
+ maximum = number_format->maximum_fraction_digits();
+ }
+ if (output_fraction) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->minimumFractionDigits_string(),
+ factory->NewNumberFromInt(minimum), Just(kDontThrow))
+ .FromJust());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->maximumFractionDigits_string(),
+ factory->NewNumberFromInt(maximum), Just(kDontThrow))
+ .FromJust());
+ }
+ minimum = 0;
+ maximum = 0;
+ if (SignificantDigitsFromSkeleton(skeleton, &minimum, &maximum)) {
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->minimumSignificantDigits_string(),
- factory->NewNumberFromInt(
- decimal_format->getMinimumSignificantDigits()),
- Just(kDontThrow))
+ factory->NewNumberFromInt(minimum), Just(kDontThrow))
.FromJust());
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->maximumSignificantDigits_string(),
- factory->NewNumberFromInt(
- decimal_format->getMaximumSignificantDigits()),
- Just(kDontThrow))
+ factory->NewNumberFromInt(maximum), Just(kDontThrow))
.FromJust());
}
+
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->useGrouping_string(),
- factory->ToBoolean((number_format->isGroupingUsed() == TRUE)),
+ factory->ToBoolean(UseGroupingFromSkeleton(skeleton)),
Just(kDontThrow))
.FromJust());
+ if (FLAG_harmony_intl_numberformat_unified) {
+ Notation notation = NotationFromSkeleton(skeleton);
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->notation_string(),
+ NotationAsString(isolate, notation), Just(kDontThrow))
+ .FromJust());
+ // Only output compactDisplay when notation is compact.
+ if (notation == Notation::COMPACT) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->compactDisplay_string(),
+ CompactDisplayString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ }
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->signDisplay_string(),
+ SignDisplayString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ }
return options;
}
@@ -189,7 +803,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::UnwrapNumberFormat(
// old code copy from NumberFormat::Unwrap that has no spec comment and
// compiled but fail unit tests.
Handle<Context> native_context =
- Handle<Context>(isolate->context()->native_context(), isolate);
+ Handle<Context>(isolate->context().native_context(), isolate);
Handle<JSFunction> constructor = Handle<JSFunction>(
JSFunction::cast(native_context->intl_number_format_function()), isolate);
Handle<Object> object;
@@ -216,7 +830,6 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::UnwrapNumberFormat(
MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
Isolate* isolate, Handle<JSNumberFormat> number_format,
Handle<Object> locales, Handle<Object> options_obj) {
- // set the flags to 0 ASAP.
number_format->set_flags(0);
Factory* factory = isolate->factory();
@@ -252,6 +865,19 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSNumberFormat>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
+ std::unique_ptr<char[]> numbering_system_str = nullptr;
+ if (FLAG_harmony_intl_add_calendar_numbering_system) {
+ // 7. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`,
+ // `"string"`, *undefined*, *undefined*).
+ Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
+ isolate, options, "Intl.RelativeTimeFormat", &numbering_system_str);
+ // 8. If _numberingSystem_ is not *undefined*, then
+ // a. If _numberingSystem_ does not match the
+ // `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError*
+ // exception.
+ MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSNumberFormat>());
+ }
+
// 7. Let localeData be %NumberFormat%.[[LocaleData]].
// 8. Let r be ResolveLocale(%NumberFormat%.[[AvailableLocales]],
// requestedLocales, opt, %NumberFormat%.[[RelevantExtensionKeys]],
@@ -261,24 +887,43 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
Intl::ResolveLocale(isolate, JSNumberFormat::GetAvailableLocales(),
requested_locales, matcher, relevant_extension_keys);
+ UErrorCode status = U_ZERO_ERROR;
+ if (numbering_system_str != nullptr) {
+ r.icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(),
+ status);
+ CHECK(U_SUCCESS(status));
+ r.locale = Intl::ToLanguageTag(r.icu_locale).FromJust();
+ }
+
// 9. Set numberFormat.[[Locale]] to r.[[locale]].
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
number_format->set_locale(*locale_str);
// 11. Let dataLocale be r.[[dataLocale]].
- //
+
+ icu::number::LocalizedNumberFormatter icu_number_formatter =
+ icu::number::NumberFormatter::withLocale(r.icu_locale)
+ .roundingMode(UNUM_ROUND_HALFUP);
+
// 12. Let style be ? GetOption(options, "style", "string", « "decimal",
// "percent", "currency" », "decimal").
const char* service = "Intl.NumberFormat";
+
+ std::vector<const char*> style_str_values({"decimal", "percent", "currency"});
+ std::vector<Style> style_enum_values(
+ {Style::DECIMAL, Style::PERCENT, Style::CURRENCY});
+ if (FLAG_harmony_intl_numberformat_unified) {
+ style_str_values.push_back("unit");
+ style_enum_values.push_back(Style::UNIT);
+ }
Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
- isolate, options, "style", service, {"decimal", "percent", "currency"},
- {Style::DECIMAL, Style::PERCENT, Style::CURRENCY}, Style::DECIMAL);
+ isolate, options, "style", service, style_str_values, style_enum_values,
+ Style::DECIMAL);
MAYBE_RETURN(maybe_style, MaybeHandle<JSNumberFormat>());
Style style = maybe_style.FromJust();
// 13. Set numberFormat.[[Style]] to style.
- number_format->set_style(style);
// 14. Let currency be ? GetOption(options, "currency", "string", undefined,
// undefined).
@@ -298,7 +943,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
if (!IsWellFormedCurrencyCode(currency)) {
THROW_NEW_ERROR(
isolate,
- NewRangeError(MessageTemplate::kInvalidCurrencyCode,
+ NewRangeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("currency code"),
factory->NewStringFromAsciiChecked(currency.c_str())),
JSNumberFormat);
}
@@ -324,85 +970,131 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
// 18. Let currencyDisplay be ? GetOption(options, "currencyDisplay",
// "string", « "code", "symbol", "name" », "symbol").
- Maybe<CurrencyDisplay> maybe_currencyDisplay =
+ std::vector<const char*> currency_display_str_values(
+ {"code", "symbol", "name"});
+ std::vector<CurrencyDisplay> currency_display_enum_values(
+ {CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL, CurrencyDisplay::NAME});
+ if (FLAG_harmony_intl_numberformat_unified) {
+ currency_display_str_values.push_back("narrow-symbol");
+ currency_display_enum_values.push_back(CurrencyDisplay::NARROW_SYMBOL);
+ }
+ Maybe<CurrencyDisplay> maybe_currency_display =
Intl::GetStringOption<CurrencyDisplay>(
isolate, options, "currencyDisplay", service,
- {"code", "symbol", "name"},
- {CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL,
- CurrencyDisplay::NAME},
+ currency_display_str_values, currency_display_enum_values,
CurrencyDisplay::SYMBOL);
- MAYBE_RETURN(maybe_currencyDisplay, MaybeHandle<JSNumberFormat>());
- CurrencyDisplay currency_display = maybe_currencyDisplay.FromJust();
- UNumberFormatStyle format_style = ToNumberFormatStyle(currency_display);
-
- UErrorCode status = U_ZERO_ERROR;
- std::unique_ptr<icu::NumberFormat> icu_number_format;
- icu::Locale no_extension_locale(r.icu_locale.getBaseName());
- if (style == Style::DECIMAL) {
- icu_number_format.reset(
- icu::NumberFormat::createInstance(r.icu_locale, status));
- // If the subclass is not DecimalFormat, fallback to no extension
- // because other subclass has not support the format() with
- // FieldPositionIterator yet.
- if (U_FAILURE(status) || icu_number_format.get() == nullptr ||
- icu_number_format->getDynamicClassID() !=
- icu::DecimalFormat::getStaticClassID()) {
- status = U_ZERO_ERROR;
- icu_number_format.reset(
- icu::NumberFormat::createInstance(no_extension_locale, status));
- }
- } else if (style == Style::PERCENT) {
- icu_number_format.reset(
- icu::NumberFormat::createPercentInstance(r.icu_locale, status));
- // If the subclass is not DecimalFormat, fallback to no extension
- // because other subclass has not support the format() with
- // FieldPositionIterator yet.
- if (U_FAILURE(status) || icu_number_format.get() == nullptr ||
- icu_number_format->getDynamicClassID() !=
- icu::DecimalFormat::getStaticClassID()) {
- status = U_ZERO_ERROR;
- icu_number_format.reset(icu::NumberFormat::createPercentInstance(
- no_extension_locale, status));
+ MAYBE_RETURN(maybe_currency_display, MaybeHandle<JSNumberFormat>());
+ CurrencyDisplay currency_display = maybe_currency_display.FromJust();
+
+ CurrencySign currency_sign = CurrencySign::STANDARD;
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // Let currencySign be ? GetOption(options, "currencySign", "string", «
+ // "standard", "accounting" », "standard").
+ Maybe<CurrencySign> maybe_currency_sign =
+ Intl::GetStringOption<CurrencySign>(
+ isolate, options, "currencySign", service,
+ {"standard", "accounting"},
+ {CurrencySign::STANDARD, CurrencySign::ACCOUNTING},
+ CurrencySign::STANDARD);
+ MAYBE_RETURN(maybe_currency_sign, MaybeHandle<JSNumberFormat>());
+ currency_sign = maybe_currency_sign.FromJust();
+
+ // Let unit be ? GetOption(options, "unit", "string", undefined, undefined).
+ std::unique_ptr<char[]> unit_cstr;
+ Maybe<bool> found_unit = Intl::GetStringOption(
+ isolate, options, "unit", empty_values, service, &unit_cstr);
+ MAYBE_RETURN(found_unit, MaybeHandle<JSNumberFormat>());
+
+ std::string unit;
+ if (found_unit.FromJust()) {
+ DCHECK_NOT_NULL(unit_cstr.get());
+ unit = unit_cstr.get();
}
- } else {
- DCHECK_EQ(style, Style::CURRENCY);
- icu_number_format.reset(
- icu::NumberFormat::createInstance(r.icu_locale, format_style, status));
- // If the subclass is not DecimalFormat, fallback to no extension
- // because other subclass has not support the format() with
- // FieldPositionIterator yet.
- if (U_FAILURE(status) || icu_number_format.get() == nullptr ||
- icu_number_format->getDynamicClassID() !=
- icu::DecimalFormat::getStaticClassID()) {
- status = U_ZERO_ERROR;
- icu_number_format.reset(icu::NumberFormat::createInstance(
- no_extension_locale, format_style, status));
+
+ // Let unitDisplay be ? GetOption(options, "unitDisplay", "string", «
+ // "short", "narrow", "long" », "short").
+ Maybe<UnitDisplay> maybe_unit_display = Intl::GetStringOption<UnitDisplay>(
+ isolate, options, "unitDisplay", service, {"short", "narrow", "long"},
+ {UnitDisplay::SHORT, UnitDisplay::NARROW, UnitDisplay::LONG},
+ UnitDisplay::SHORT);
+ MAYBE_RETURN(maybe_unit_display, MaybeHandle<JSNumberFormat>());
+ UnitDisplay unit_display = maybe_unit_display.FromJust();
+
+ // If style is "percent", then
+ if (style == Style::PERCENT) {
+ // Let unit be "concentr-percent".
+ unit = "percent";
}
- }
+ // If style is "unit" or "percent", then
+ if (style == Style::PERCENT || style == Style::UNIT) {
+ // If unit is undefined, throw a TypeError exception.
+ if (unit == "") {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidUnit,
+ factory->NewStringFromStaticChars("Intl.NumberFormat"),
+ factory->NewStringFromStaticChars("")),
+ JSNumberFormat);
+ }
+
+ // If the result of IsWellFormedUnitIdentifier(unit) is false, throw a
+ // RangeError exception.
+ Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> maybe_wellformed =
+ IsWellFormedUnitIdentifier(isolate, unit);
+ if (maybe_wellformed.IsNothing()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(
+ MessageTemplate::kInvalidUnit,
+ factory->NewStringFromStaticChars("Intl.NumberFormat"),
+ factory->NewStringFromAsciiChecked(unit.c_str())),
+ JSNumberFormat);
+ }
+ std::pair<icu::MeasureUnit, icu::MeasureUnit> unit_pair =
+ maybe_wellformed.FromJust();
- if (U_FAILURE(status) || icu_number_format.get() == nullptr) {
- status = U_ZERO_ERROR;
- // Remove extensions and try again.
- icu_number_format.reset(
- icu::NumberFormat::createInstance(no_extension_locale, status));
+ // Set intlObj.[[Unit]] to unit.
+ if (unit_pair.first != icu::NoUnit::base()) {
+ icu_number_formatter = icu_number_formatter.unit(unit_pair.first);
+ }
+ if (unit_pair.second != icu::NoUnit::base()) {
+ icu_number_formatter = icu_number_formatter.perUnit(unit_pair.second);
+ }
- if (U_FAILURE(status) || icu_number_format.get() == nullptr) {
- FATAL("Failed to create ICU number_format, are ICU data files missing?");
+ // The default unitWidth is SHORT in ICU and that mapped from
+ // Symbol so we can skip the setting for optimization.
+ if (unit_display != UnitDisplay::SHORT) {
+ icu_number_formatter =
+ icu_number_formatter.unitWidth(ToUNumberUnitWidth(unit_display));
+ }
}
}
- DCHECK(U_SUCCESS(status));
- CHECK_NOT_NULL(icu_number_format.get());
- CHECK(icu_number_format->getDynamicClassID() ==
- icu::DecimalFormat::getStaticClassID());
+
+ if (style == Style::PERCENT) {
+ icu_number_formatter = icu_number_formatter.unit(icu::NoUnit::percent())
+ .scale(icu::number::Scale::powerOfTen(2));
+ }
+
if (style == Style::CURRENCY) {
// 19. If style is "currency", set numberFormat.[[CurrencyDisplay]] to
// currencyDisplay.
- number_format->set_currency_display(currency_display);
// 17.b. Set numberFormat.[[Currency]] to currency.
if (!currency_ustr.isEmpty()) {
- status = U_ZERO_ERROR;
- icu_number_format->setCurrency(currency_ustr.getBuffer(), status);
+ Handle<String> currency_string;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, currency_string,
+ Intl::ToString(isolate, currency_ustr),
+ JSNumberFormat);
+
+ icu_number_formatter = icu_number_formatter.unit(
+ icu::CurrencyUnit(currency_ustr.getBuffer(), status));
+ CHECK(U_SUCCESS(status));
+ // The default unitWidth is SHORT in ICU and that mapped from
+ // Symbol so we can skip the setting for optimization.
+ if (currency_display != CurrencyDisplay::SYMBOL) {
+ icu_number_formatter = icu_number_formatter.unitWidth(
+ ToUNumberUnitWidth(currency_display));
+ }
CHECK(U_SUCCESS(status));
}
}
@@ -430,15 +1122,75 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
}
// 22. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
// mnfdDefault, mxfdDefault).
- CHECK(icu_number_format->getDynamicClassID() ==
- icu::DecimalFormat::getStaticClassID());
- icu::DecimalFormat* icu_decimal_format =
- static_cast<icu::DecimalFormat*>(icu_number_format.get());
- Maybe<bool> maybe_set_number_for_digit_options =
- Intl::SetNumberFormatDigitOptions(isolate, icu_decimal_format, options,
- mnfd_default, mxfd_default);
- MAYBE_RETURN(maybe_set_number_for_digit_options, Handle<JSNumberFormat>());
+ Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
+ Intl::SetNumberFormatDigitOptions(isolate, options, mnfd_default,
+ mxfd_default);
+ MAYBE_RETURN(maybe_digit_options, Handle<JSNumberFormat>());
+ Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
+
+ icu::number::Precision precision =
+ (digit_options.minimum_significant_digits > 0)
+ ? icu::number::Precision::minMaxSignificantDigits(
+ digit_options.minimum_significant_digits,
+ digit_options.maximum_significant_digits)
+ : icu::number::Precision::minMaxFraction(
+ digit_options.minimum_fraction_digits,
+ digit_options.maximum_fraction_digits);
+
+ if (digit_options.minimum_significant_digits > 0) {
+ // Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits
+ // uncondictionally while the unified number proposal eventually will only
+ // record either (Min|Max)imumFractionDigits or
+ // (Min|Max)imumSignaficantDigits Since LocalizedNumberFormatter can only
+ // remember one set, and during 2019-1-17 ECMA402 meeting that the committee
+ // decide not to take a PR to address that prior to the unified number
+ // proposal, we have to add these two 5 bits int into flags to remember the
+ // (Min|Max)imumFractionDigits while (Min|Max)imumSignaficantDigits is
+ // present.
+ // TODO(ftang) remove the following two lines once we ship
+ // int-number-format-unified
+ number_format->set_minimum_fraction_digits(
+ digit_options.minimum_fraction_digits);
+ number_format->set_maximum_fraction_digits(
+ digit_options.maximum_fraction_digits);
+ }
+
+ icu_number_formatter = icu_number_formatter.precision(precision);
+ if (digit_options.minimum_integer_digits > 1) {
+ icu_number_formatter =
+ icu_number_formatter.integerWidth(icu::number::IntegerWidth::zeroFillTo(
+ digit_options.minimum_integer_digits));
+ }
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // Let notation be ? GetOption(options, "notation", "string", « "standard",
+ // "scientific", "engineering", "compact" », "standard").
+ Maybe<Notation> maybe_notation = Intl::GetStringOption<Notation>(
+ isolate, options, "notation", service,
+ {"standard", "scientific", "engineering", "compact"},
+ {Notation::STANDARD, Notation::SCIENTIFIC, Notation::ENGINEERING,
+ Notation::COMPACT},
+ Notation::STANDARD);
+ MAYBE_RETURN(maybe_notation, MaybeHandle<JSNumberFormat>());
+ Notation notation = maybe_notation.FromJust();
+
+ // Let compactDisplay be ? GetOption(options, "compactDisplay", "string", «
+ // "short", "long" », "short").
+ Maybe<CompactDisplay> maybe_compact_display =
+ Intl::GetStringOption<CompactDisplay>(
+ isolate, options, "compactDisplay", service, {"short", "long"},
+ {CompactDisplay::SHORT, CompactDisplay::LONG},
+ CompactDisplay::SHORT);
+ MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>());
+ CompactDisplay compact_display = maybe_compact_display.FromJust();
+
+ // The default notation in ICU is Simple, which mapped from STANDARD
+ // so we can skip setting it.
+ if (notation != Notation::STANDARD) {
+ icu_number_formatter = icu_number_formatter.notation(
+ ToICUNotation(notation, compact_display));
+ }
+ }
// 23. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
// undefined, true).
bool use_grouping = true;
@@ -446,7 +1198,32 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
isolate, options, "useGrouping", service, &use_grouping);
MAYBE_RETURN(found_use_grouping, MaybeHandle<JSNumberFormat>());
// 24. Set numberFormat.[[UseGrouping]] to useGrouping.
- icu_number_format->setGroupingUsed(use_grouping ? TRUE : FALSE);
+ if (!use_grouping) {
+ icu_number_formatter = icu_number_formatter.grouping(
+ UNumberGroupingStrategy::UNUM_GROUPING_OFF);
+ }
+
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // Let signDisplay be ? GetOption(options, "signDisplay", "string", «
+ // "auto", "never", "always", "except-zero" », "auto").
+ Maybe<SignDisplay> maybe_sign_display = Intl::GetStringOption<SignDisplay>(
+ isolate, options, "signDisplay", service,
+ {"auto", "never", "always", "except-zero"},
+ {SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
+ SignDisplay::EXCEPT_ZERO},
+ SignDisplay::AUTO);
+ MAYBE_RETURN(maybe_sign_display, MaybeHandle<JSNumberFormat>());
+ SignDisplay sign_display = maybe_sign_display.FromJust();
+
+ // The default sign in ICU is UNUM_SIGN_AUTO which is mapped from
+ // SignDisplay::AUTO and CurrencySign::STANDARD so we can skip setting
+ // under that values for optimization.
+ if (sign_display != SignDisplay::AUTO ||
+ currency_sign != CurrencySign::STANDARD) {
+ icu_number_formatter = icu_number_formatter.sign(
+ ToUNumberSignDisplay(sign_display, currency_sign));
+ }
+ }
// 25. Let dataLocaleData be localeData.[[<dataLocale>]].
//
@@ -461,64 +1238,51 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
//
// 30. Set numberFormat.[[NegativePattern]] to
// stylePatterns.[[negativePattern]].
-
- Handle<Managed<icu::NumberFormat>> managed_number_format =
- Managed<icu::NumberFormat>::FromUniquePtr(isolate, 0,
- std::move(icu_number_format));
- number_format->set_icu_number_format(*managed_number_format);
+ //
+ Handle<Managed<icu::number::LocalizedNumberFormatter>>
+ managed_number_formatter =
+ Managed<icu::number::LocalizedNumberFormatter>::FromRawPtr(
+ isolate, 0,
+ new icu::number::LocalizedNumberFormatter(icu_number_formatter));
+ number_format->set_icu_number_formatter(*managed_number_formatter);
number_format->set_bound_format(*factory->undefined_value());
// 31. Return numberFormat.
return number_format;
}
-Handle<String> JSNumberFormat::StyleAsString() const {
- switch (style()) {
- case Style::DECIMAL:
- return GetReadOnlyRoots().decimal_string_handle();
- case Style::PERCENT:
- return GetReadOnlyRoots().percent_string_handle();
- case Style::CURRENCY:
- return GetReadOnlyRoots().currency_string_handle();
- case Style::COUNT:
- UNREACHABLE();
- }
-}
-
-Handle<String> JSNumberFormat::CurrencyDisplayAsString() const {
- switch (currency_display()) {
- case CurrencyDisplay::CODE:
- return GetReadOnlyRoots().code_string_handle();
- case CurrencyDisplay::SYMBOL:
- return GetReadOnlyRoots().symbol_string_handle();
- case CurrencyDisplay::NAME:
- return GetReadOnlyRoots().name_string_handle();
- case CurrencyDisplay::COUNT:
- UNREACHABLE();
- }
-}
-
namespace {
Maybe<icu::UnicodeString> IcuFormatNumber(
- Isolate* isolate, const icu::NumberFormat& number_format,
+ Isolate* isolate,
+ const icu::number::LocalizedNumberFormatter& number_format,
Handle<Object> numeric_obj, icu::FieldPositionIterator* fp_iter) {
- icu::UnicodeString result;
// If it is BigInt, handle it differently.
UErrorCode status = U_ZERO_ERROR;
+ icu::number::FormattedNumber formatted;
if (numeric_obj->IsBigInt()) {
Handle<BigInt> big_int = Handle<BigInt>::cast(numeric_obj);
Handle<String> big_int_string;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, big_int_string,
BigInt::ToString(isolate, big_int),
Nothing<icu::UnicodeString>());
- number_format.format(
- {big_int_string->ToCString().get(), big_int_string->length()}, result,
- fp_iter, status);
+ formatted = number_format.formatDecimal(
+ {big_int_string->ToCString().get(), big_int_string->length()}, status);
} else {
double number = numeric_obj->Number();
- number_format.format(number, result, fp_iter, status);
+ formatted = number_format.formatDouble(number, status);
}
if (U_FAILURE(status)) {
+ // This happen because of icu data trimming trim out "unit".
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=8641
+ THROW_NEW_ERROR_RETURN_VALUE(isolate,
+ NewTypeError(MessageTemplate::kIcuError),
+ Nothing<icu::UnicodeString>());
+ }
+ if (fp_iter) {
+ formatted.getAllFieldPositions(*fp_iter, status);
+ }
+ icu::UnicodeString result = formatted.toString(status);
+ if (U_FAILURE(status)) {
THROW_NEW_ERROR_RETURN_VALUE(isolate,
NewTypeError(MessageTemplate::kIcuError),
Nothing<icu::UnicodeString>());
@@ -529,17 +1293,15 @@ Maybe<icu::UnicodeString> IcuFormatNumber(
} // namespace
MaybeHandle<String> JSNumberFormat::FormatNumeric(
- Isolate* isolate, const icu::NumberFormat& number_format,
+ Isolate* isolate,
+ const icu::number::LocalizedNumberFormatter& number_format,
Handle<Object> numeric_obj) {
DCHECK(numeric_obj->IsNumeric());
Maybe<icu::UnicodeString> maybe_format =
IcuFormatNumber(isolate, number_format, numeric_obj, nullptr);
MAYBE_RETURN(maybe_format, Handle<String>());
- icu::UnicodeString result = maybe_format.FromJust();
-
- return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
+ return Intl::ToString(isolate, maybe_format.FromJust());
}
namespace {
@@ -651,19 +1413,12 @@ std::vector<NumberFormatSpan> FlattenRegionsToParts(
return out_parts;
}
-Maybe<int> JSNumberFormat::FormatToParts(Isolate* isolate,
- Handle<JSArray> result,
- int start_index,
- const icu::NumberFormat& number_format,
- Handle<Object> numeric_obj,
- Handle<String> unit) {
+namespace {
+Maybe<int> ConstructParts(Isolate* isolate, const icu::UnicodeString& formatted,
+ icu::FieldPositionIterator* fp_iter,
+ Handle<JSArray> result, int start_index,
+ Handle<Object> numeric_obj, Handle<String> unit) {
DCHECK(numeric_obj->IsNumeric());
- icu::FieldPositionIterator fp_iter;
- Maybe<icu::UnicodeString> maybe_format =
- IcuFormatNumber(isolate, number_format, numeric_obj, &fp_iter);
- MAYBE_RETURN(maybe_format, Nothing<int>());
- icu::UnicodeString formatted = maybe_format.FromJust();
-
int32_t length = formatted.length();
int index = start_index;
if (length == 0) return Just(index);
@@ -677,7 +1432,7 @@ Maybe<int> JSNumberFormat::FormatToParts(Isolate* isolate,
{
icu::FieldPosition fp;
- while (fp_iter.next(fp)) {
+ while (fp_iter->next(fp)) {
regions.push_back(NumberFormatSpan(fp.getField(), fp.getBeginIndex(),
fp.getEndIndex()));
}
@@ -708,18 +1463,26 @@ Maybe<int> JSNumberFormat::FormatToParts(Isolate* isolate,
return Just(index);
}
+} // namespace
+
MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
Isolate* isolate, Handle<JSNumberFormat> number_format,
Handle<Object> numeric_obj) {
CHECK(numeric_obj->IsNumeric());
Factory* factory = isolate->factory();
- icu::NumberFormat* fmt = number_format->icu_number_format()->raw();
+ icu::number::LocalizedNumberFormatter* fmt =
+ number_format->icu_number_formatter().raw();
CHECK_NOT_NULL(fmt);
- Handle<JSArray> result = factory->NewJSArray(0);
+ icu::FieldPositionIterator fp_iter;
+ Maybe<icu::UnicodeString> maybe_format =
+ IcuFormatNumber(isolate, *fmt, numeric_obj, &fp_iter);
+ MAYBE_RETURN(maybe_format, Handle<JSArray>());
- Maybe<int> maybe_format_to_parts = JSNumberFormat::FormatToParts(
- isolate, result, 0, *fmt, numeric_obj, Handle<String>());
+ Handle<JSArray> result = factory->NewJSArray(0);
+ Maybe<int> maybe_format_to_parts =
+ ConstructParts(isolate, maybe_format.FromJust(), &fp_iter, result, 0,
+ numeric_obj, Handle<String>());
MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
return result;
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 6857989c22..6c59e76f7a 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -12,17 +12,19 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
+#include "unicode/numberformatter.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace U_ICU_NAMESPACE {
class NumberFormat;
+class UnicodeString;
} // namespace U_ICU_NAMESPACE
namespace v8 {
@@ -47,86 +49,55 @@ class JSNumberFormat : public JSObject {
Isolate* isolate, Handle<JSNumberFormat> number_format,
Handle<Object> numeric_obj);
- // A utility function used by the above JSNumberFormat::FormatToParts()
- // and JSRelativeTimeFormat::FormatToParts().
- // Format the number by using the icu::NumberFormat to get the field
- // information. It add an object into the result array, starting from the
- // start_index and return the total number of elements in the result array.
- // For each object added as element, it set the substring of the field as
- // "value", the field type as "type". If the unit is not null, it also set
- // unit as "unit" to each added object.
- V8_WARN_UNUSED_RESULT static Maybe<int> FormatToParts(
- Isolate* isolate, Handle<JSArray> result, int start_index,
- const icu::NumberFormat& fmt, Handle<Object> numeric_obj,
- Handle<String> unit);
-
V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatNumeric(
- Isolate* isolate, const icu::NumberFormat& number_format,
+ Isolate* isolate,
+ const icu::number::LocalizedNumberFormatter& number_format,
Handle<Object> numeric_obj);
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
- Handle<String> StyleAsString() const;
- Handle<String> CurrencyDisplayAsString() const;
-
DECL_CAST(JSNumberFormat)
DECL_PRINTER(JSNumberFormat)
DECL_VERIFIER(JSNumberFormat)
- // [[Style]] is one of the values "decimal", "percent" or "currency",
- // identifying the style of the number format.
- enum class Style {
- DECIMAL,
- PERCENT,
- CURRENCY,
-
- COUNT
- };
- inline void set_style(Style style);
- inline Style style() const;
-
- // [[CurrencyDisplay]] is one of the values "code", "symbol" or "name",
- // identifying the display of the currency number format.
- enum class CurrencyDisplay {
- CODE,
- SYMBOL,
- NAME,
-
- COUNT
- };
- inline void set_currency_display(CurrencyDisplay currency_display);
- inline CurrencyDisplay currency_display() const;
-
-// Layout description.
-#define JS_NUMBER_FORMAT_FIELDS(V) \
- V(kLocaleOffset, kTaggedSize) \
- V(kICUNumberFormatOffset, kTaggedSize) \
- V(kBoundFormatOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_NUMBER_FORMAT_FIELDS)
-#undef JS_NUMBER_FORMAT_FIELDS
+ // Current ECMA 402 spec mandates to record (Min|Max)imumFractionDigits
+ // unconditionally while the unified number proposal eventually will only
+ // record either (Min|Max)imumFractionDigits or (Min|Max)imumSignaficantDigits
+ // Since LocalizedNumberFormatter can only remember one set, and during
+ // 2019-1-17 ECMA402 meeting that the committee decide not to take a PR to
+ // address that prior to the unified number proposal, we have to add these two
+ // 5 bits int into flags to remember the (Min|Max)imumFractionDigits while
+ // (Min|Max)imumSignaficantDigits is present.
+ // TODO(ftang) remove the following once we ship int-number-format-unified
+ // * Four inline functions: (set_)?(min|max)imum_fraction_digits
+ // * kFlagsOffset
+ // * #define FLAGS_BIT_FIELDS
+ // * DECL_INT_ACCESSORS(flags)
+
+ inline int minimum_fraction_digits() const;
+ inline void set_minimum_fraction_digits(int digits);
+
+ inline int maximum_fraction_digits() const;
+ inline void set_maximum_fraction_digits(int digits);
+
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSNUMBER_FORMAT_FIELDS)
// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(StyleBits, Style, 2, _) \
- V(CurrencyDisplayBits, CurrencyDisplay, 2, _)
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(MinimumFractionDigitsBits, int, 5, _) \
+ V(MaximumFractionDigitsBits, int, 5, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
- STATIC_ASSERT(Style::DECIMAL <= StyleBits::kMax);
- STATIC_ASSERT(Style::PERCENT <= StyleBits::kMax);
- STATIC_ASSERT(Style::CURRENCY <= StyleBits::kMax);
-
- STATIC_ASSERT(CurrencyDisplay::CODE <= CurrencyDisplayBits::kMax);
- STATIC_ASSERT(CurrencyDisplay::SYMBOL <= CurrencyDisplayBits::kMax);
- STATIC_ASSERT(CurrencyDisplay::NAME <= CurrencyDisplayBits::kMax);
+ STATIC_ASSERT(20 <= MinimumFractionDigitsBits::kMax);
+ STATIC_ASSERT(20 <= MaximumFractionDigitsBits::kMax);
DECL_ACCESSORS(locale, String)
- DECL_ACCESSORS(icu_number_format, Managed<icu::NumberFormat>)
+ DECL_ACCESSORS(icu_number_formatter,
+ Managed<icu::number::LocalizedNumberFormatter>)
DECL_ACCESSORS(bound_format, Object)
DECL_INT_ACCESSORS(flags)
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index bf7076b517..6b7a7d72f0 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -7,20 +7,20 @@
#include "src/objects/js-objects.h"
-#include "src/feedback-vector.h"
-#include "src/field-index-inl.h"
#include "src/heap/heap-write-barrier.h"
-#include "src/keys.h"
-#include "src/lookup-inl.h"
#include "src/objects/embedder-data-slot-inl.h"
#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/field-index-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/keys.h"
+#include "src/objects/lookup-inl.h"
#include "src/objects/property-array-inl.h"
+#include "src/objects/prototype-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots.h"
#include "src/objects/smi-inl.h"
-#include "src/prototype-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -109,21 +109,21 @@ V8_WARN_UNUSED_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject object) {
DisallowHeapAllocation no_gc;
- HeapObject prototype = HeapObject::cast(object->map()->prototype());
+ HeapObject prototype = HeapObject::cast(object.map().prototype());
ReadOnlyRoots roots(isolate);
HeapObject null = roots.null_value();
FixedArrayBase empty_fixed_array = roots.empty_fixed_array();
FixedArrayBase empty_slow_element_dictionary =
roots.empty_slow_element_dictionary();
while (prototype != null) {
- Map map = prototype->map();
- if (map->IsCustomElementsReceiverMap()) return false;
- FixedArrayBase elements = JSObject::cast(prototype)->elements();
+ Map map = prototype.map();
+ if (map.IsCustomElementsReceiverMap()) return false;
+ FixedArrayBase elements = JSObject::cast(prototype).elements();
if (elements != empty_fixed_array &&
elements != empty_slow_element_dictionary) {
return false;
}
- prototype = HeapObject::cast(map->prototype());
+ prototype = HeapObject::cast(map.prototype());
}
return true;
}
@@ -137,7 +137,7 @@ FixedArrayBase JSObject::elements() const {
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
JSObject::ValidateElements(*object);
- ElementsKind elements_kind = object->map()->elements_kind();
+ ElementsKind elements_kind = object->map().elements_kind();
if (!IsObjectElementsKind(elements_kind)) {
if (IsHoleyElementsKind(elements_kind)) {
TransitionElementsKind(object, HOLEY_ELEMENTS);
@@ -167,8 +167,8 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object, TSlot objects,
if (current == the_hole) {
is_holey = true;
target_kind = GetHoleyElementsKind(target_kind);
- } else if (!current->IsSmi()) {
- if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
+ } else if (!current.IsSmi()) {
+ if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current.IsNumber()) {
if (IsSmiElementsKind(target_kind)) {
if (is_holey) {
target_kind = HOLEY_DOUBLE_ELEMENTS;
@@ -226,13 +226,13 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
void JSObject::SetMapAndElements(Handle<JSObject> object, Handle<Map> new_map,
Handle<FixedArrayBase> value) {
JSObject::MigrateToMap(object, new_map);
- DCHECK((object->map()->has_fast_smi_or_object_elements() ||
+ DCHECK((object->map().has_fast_smi_or_object_elements() ||
(*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
- object->map()->has_fast_string_wrapper_elements()) ==
+ object->map().has_fast_string_wrapper_elements()) ==
(value->map() == object->GetReadOnlyRoots().fixed_array_map() ||
value->map() == object->GetReadOnlyRoots().fixed_cow_array_map()));
DCHECK((*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
- (object->map()->has_fast_double_elements() ==
+ (object->map().has_fast_double_elements() ==
value->IsFixedDoubleArray()));
object->set_elements(*value);
}
@@ -243,16 +243,16 @@ void JSObject::set_elements(FixedArrayBase value, WriteBarrierMode mode) {
}
void JSObject::initialize_elements() {
- FixedArrayBase elements = map()->GetInitialElements();
+ FixedArrayBase elements = map().GetInitialElements();
WRITE_FIELD(*this, kElementsOffset, elements);
}
InterceptorInfo JSObject::GetIndexedInterceptor() {
- return map()->GetIndexedInterceptor();
+ return map().GetIndexedInterceptor();
}
InterceptorInfo JSObject::GetNamedInterceptor() {
- return map()->GetNamedInterceptor();
+ return map().GetNamedInterceptor();
}
int JSObject::GetHeaderSize() const { return GetHeaderSize(map()); }
@@ -261,10 +261,10 @@ int JSObject::GetHeaderSize(const Map map) {
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
- InstanceType instance_type = map->instance_type();
+ InstanceType instance_type = map.instance_type();
return instance_type == JS_OBJECT_TYPE
? JSObject::kHeaderSize
- : GetHeaderSize(instance_type, map->has_prototype_slot());
+ : GetHeaderSize(instance_type, map.has_prototype_slot());
}
// static
@@ -279,7 +279,7 @@ int JSObject::GetEmbedderFieldsStartOffset() {
// static
int JSObject::GetEmbedderFieldCount(const Map map) {
- int instance_size = map->instance_size();
+ int instance_size = map.instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
// Embedder fields are located after the object header, whereas in-object
// properties are located at the end of the object. We don't have to round up
@@ -288,7 +288,7 @@ int JSObject::GetEmbedderFieldCount(const Map map) {
// kSystemPointerSize) anyway.
return (((instance_size - GetEmbedderFieldsStartOffset(map)) >>
kTaggedSizeLog2) -
- map->GetInObjectProperties()) /
+ map.GetInObjectProperties()) /
kEmbedderDataSlotSizeInTaggedSlots;
}
@@ -316,7 +316,7 @@ void JSObject::SetEmbedderField(int index, Smi value) {
bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
if (!FLAG_unbox_double_fields) return false;
- return map()->IsUnboxedDoubleField(index);
+ return map().IsUnboxedDoubleField(index);
}
// Access fast-case object properties at index. The use of these routines
@@ -327,27 +327,35 @@ Object JSObject::RawFastPropertyAt(FieldIndex index) {
if (index.is_inobject()) {
return READ_FIELD(*this, index.offset());
} else {
- return property_array()->get(index.outobject_array_index());
+ return property_array().get(index.outobject_array_index());
}
}
double JSObject::RawFastDoublePropertyAt(FieldIndex index) {
DCHECK(IsUnboxedDoubleField(index));
- return READ_DOUBLE_FIELD(*this, index.offset());
+ return ReadField<double>(index.offset());
}
uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) {
DCHECK(IsUnboxedDoubleField(index));
- return READ_UINT64_FIELD(*this, index.offset());
+ return ReadField<uint64_t>(index.offset());
}
-void JSObject::RawFastPropertyAtPut(FieldIndex index, Object value) {
+void JSObject::RawFastInobjectPropertyAtPut(FieldIndex index, Object value,
+ WriteBarrierMode mode) {
+ DCHECK(index.is_inobject());
+ int offset = index.offset();
+ WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+void JSObject::RawFastPropertyAtPut(FieldIndex index, Object value,
+ WriteBarrierMode mode) {
if (index.is_inobject()) {
- int offset = index.offset();
- WRITE_FIELD(*this, offset, value);
- WRITE_BARRIER(*this, offset, value);
+ RawFastInobjectPropertyAtPut(index, value, mode);
} else {
- property_array()->set(index.outobject_array_index(), value);
+ DCHECK_EQ(UPDATE_WRITE_BARRIER, mode);
+ property_array().set(index.outobject_array_index(), value);
}
}
@@ -363,10 +371,10 @@ void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
void JSObject::FastPropertyAtPut(FieldIndex index, Object value) {
if (IsUnboxedDoubleField(index)) {
- DCHECK(value->IsMutableHeapNumber());
+ DCHECK(value.IsMutableHeapNumber());
// Ensure that all bits of the double value are preserved.
RawFastDoublePropertyAsBitsAtPut(
- index, MutableHeapNumber::cast(value)->value_as_bits());
+ index, MutableHeapNumber::cast(value).value_as_bits());
} else {
RawFastPropertyAtPut(index, value);
}
@@ -379,26 +387,24 @@ void JSObject::WriteToField(int descriptor, PropertyDetails details,
DisallowHeapAllocation no_gc;
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
if (details.representation().IsDouble()) {
- // Nothing more to be done.
- if (value->IsUninitialized()) {
- return;
- }
// Manipulating the signaling NaN used for the hole and uninitialized
// double field sentinel in C++, e.g. with bit_cast or value()/set_value(),
// will change its value on ia32 (the x87 stack is used to return values
// and stores to the stack silently clear the signalling bit).
uint64_t bits;
- if (value->IsSmi()) {
+ if (value.IsSmi()) {
bits = bit_cast<uint64_t>(static_cast<double>(Smi::ToInt(value)));
+ } else if (value.IsUninitialized()) {
+ bits = kHoleNanInt64;
} else {
- DCHECK(value->IsHeapNumber());
- bits = HeapNumber::cast(value)->value_as_bits();
+ DCHECK(value.IsHeapNumber());
+ bits = HeapNumber::cast(value).value_as_bits();
}
if (IsUnboxedDoubleField(index)) {
RawFastDoublePropertyAsBitsAtPut(index, bits);
} else {
auto box = MutableHeapNumber::cast(RawFastPropertyAt(index));
- box->set_value_as_bits(bits);
+ box.set_value_as_bits(bits);
}
} else {
RawFastPropertyAtPut(index, value);
@@ -406,7 +412,7 @@ void JSObject::WriteToField(int descriptor, PropertyDetails details,
}
int JSObject::GetInObjectPropertyOffset(int index) {
- return map()->GetInObjectPropertyOffset(index);
+ return map().GetInObjectPropertyOffset(index);
}
Object JSObject::InObjectPropertyAt(int index) {
@@ -425,15 +431,15 @@ Object JSObject::InObjectPropertyAtPut(int index, Object value,
void JSObject::InitializeBody(Map map, int start_offset,
Object pre_allocated_value, Object filler_value) {
- DCHECK_IMPLIES(filler_value->IsHeapObject(),
+ DCHECK_IMPLIES(filler_value.IsHeapObject(),
!ObjectInYoungGeneration(filler_value));
- DCHECK_IMPLIES(pre_allocated_value->IsHeapObject(),
+ DCHECK_IMPLIES(pre_allocated_value.IsHeapObject(),
!ObjectInYoungGeneration(pre_allocated_value));
- int size = map->instance_size();
+ int size = map.instance_size();
int offset = start_offset;
if (filler_value != pre_allocated_value) {
int end_of_pre_allocated_offset =
- size - (map->UnusedPropertyFields() * kTaggedSize);
+ size - (map.UnusedPropertyFields() * kTaggedSize);
DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
while (offset < end_of_pre_allocated_offset) {
WRITE_FIELD(*this, offset, pre_allocated_value);
@@ -464,12 +470,12 @@ ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
FeedbackVector JSFunction::feedback_vector() const {
DCHECK(has_feedback_vector());
- return FeedbackVector::cast(raw_feedback_cell()->value());
+ return FeedbackVector::cast(raw_feedback_cell().value());
}
ClosureFeedbackCellArray JSFunction::closure_feedback_cell_array() const {
DCHECK(has_closure_feedback_cell_array());
- return ClosureFeedbackCellArray::cast(raw_feedback_cell()->value());
+ return ClosureFeedbackCellArray::cast(raw_feedback_cell().value());
}
// Code objects that are marked for deoptimization are not considered to be
@@ -479,69 +485,69 @@ ClosureFeedbackCellArray JSFunction::closure_feedback_cell_array() const {
// TODO(jupvfranco): rename this function. Maybe RunOptimizedCode,
// or IsValidOptimizedCode.
bool JSFunction::IsOptimized() {
- return is_compiled() && code()->kind() == Code::OPTIMIZED_FUNCTION &&
- !code()->marked_for_deoptimization();
+ return is_compiled() && code().kind() == Code::OPTIMIZED_FUNCTION &&
+ !code().marked_for_deoptimization();
}
bool JSFunction::HasOptimizedCode() {
return IsOptimized() ||
- (has_feedback_vector() && feedback_vector()->has_optimized_code() &&
- !feedback_vector()->optimized_code()->marked_for_deoptimization());
+ (has_feedback_vector() && feedback_vector().has_optimized_code() &&
+ !feedback_vector().optimized_code().marked_for_deoptimization());
}
bool JSFunction::HasOptimizationMarker() {
- return has_feedback_vector() && feedback_vector()->has_optimization_marker();
+ return has_feedback_vector() && feedback_vector().has_optimization_marker();
}
void JSFunction::ClearOptimizationMarker() {
DCHECK(has_feedback_vector());
- feedback_vector()->ClearOptimizationMarker();
+ feedback_vector().ClearOptimizationMarker();
}
// Optimized code marked for deoptimization will tier back down to running
// interpreted on its next activation, and already doesn't count as IsOptimized.
bool JSFunction::IsInterpreted() {
- return is_compiled() && (code()->is_interpreter_trampoline_builtin() ||
- (code()->kind() == Code::OPTIMIZED_FUNCTION &&
- code()->marked_for_deoptimization()));
+ return is_compiled() && (code().is_interpreter_trampoline_builtin() ||
+ (code().kind() == Code::OPTIMIZED_FUNCTION &&
+ code().marked_for_deoptimization()));
}
bool JSFunction::ChecksOptimizationMarker() {
- return code()->checks_optimization_marker();
+ return code().checks_optimization_marker();
}
bool JSFunction::IsMarkedForOptimization() {
- return has_feedback_vector() && feedback_vector()->optimization_marker() ==
+ return has_feedback_vector() && feedback_vector().optimization_marker() ==
OptimizationMarker::kCompileOptimized;
}
bool JSFunction::IsMarkedForConcurrentOptimization() {
return has_feedback_vector() &&
- feedback_vector()->optimization_marker() ==
+ feedback_vector().optimization_marker() ==
OptimizationMarker::kCompileOptimizedConcurrent;
}
bool JSFunction::IsInOptimizationQueue() {
- return has_feedback_vector() && feedback_vector()->optimization_marker() ==
+ return has_feedback_vector() && feedback_vector().optimization_marker() ==
OptimizationMarker::kInOptimizationQueue;
}
void JSFunction::CompleteInobjectSlackTrackingIfActive() {
if (!has_prototype_slot()) return;
- if (has_initial_map() && initial_map()->IsInobjectSlackTrackingInProgress()) {
- initial_map()->CompleteInobjectSlackTracking(GetIsolate());
+ if (has_initial_map() && initial_map().IsInobjectSlackTrackingInProgress()) {
+ initial_map().CompleteInobjectSlackTracking(GetIsolate());
}
}
AbstractCode JSFunction::abstract_code() {
if (IsInterpreted()) {
- return AbstractCode::cast(shared()->GetBytecodeArray());
+ return AbstractCode::cast(shared().GetBytecodeArray());
} else {
return AbstractCode::cast(code());
}
}
-int JSFunction::length() { return shared()->length(); }
+int JSFunction::length() { return shared().length(); }
Code JSFunction::code() const {
return Code::cast(RELAXED_READ_FIELD(*this, kCodeOffset));
@@ -570,14 +576,14 @@ void JSFunction::set_shared(SharedFunctionInfo value, WriteBarrierMode mode) {
}
void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
- if (has_feedback_vector() && feedback_vector()->has_optimized_code()) {
+ if (has_feedback_vector() && feedback_vector().has_optimized_code()) {
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code feedback slot (%s) for ",
reason);
ShortPrint();
PrintF("]\n");
}
- feedback_vector()->ClearOptimizedCode();
+ feedback_vector().ClearOptimizedCode();
}
}
@@ -586,17 +592,17 @@ void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
DCHECK(ChecksOptimizationMarker());
DCHECK(!HasOptimizedCode());
- feedback_vector()->SetOptimizationMarker(marker);
+ feedback_vector().SetOptimizationMarker(marker);
}
bool JSFunction::has_feedback_vector() const {
- return shared()->is_compiled() &&
- raw_feedback_cell()->value()->IsFeedbackVector();
+ return shared().is_compiled() &&
+ raw_feedback_cell().value().IsFeedbackVector();
}
bool JSFunction::has_closure_feedback_cell_array() const {
- return shared()->is_compiled() &&
- raw_feedback_cell()->value()->IsClosureFeedbackCellArray();
+ return shared().is_compiled() &&
+ raw_feedback_cell().value().IsClosureFeedbackCellArray();
}
Context JSFunction::context() {
@@ -604,57 +610,57 @@ Context JSFunction::context() {
}
bool JSFunction::has_context() const {
- return READ_FIELD(*this, kContextOffset)->IsContext();
+ return READ_FIELD(*this, kContextOffset).IsContext();
}
-JSGlobalProxy JSFunction::global_proxy() { return context()->global_proxy(); }
+JSGlobalProxy JSFunction::global_proxy() { return context().global_proxy(); }
NativeContext JSFunction::native_context() {
- return context()->native_context();
+ return context().native_context();
}
void JSFunction::set_context(Object value) {
- DCHECK(value->IsUndefined() || value->IsContext());
+ DCHECK(value.IsUndefined() || value.IsContext());
WRITE_FIELD(*this, kContextOffset, value);
WRITE_BARRIER(*this, kContextOffset, value);
}
ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object,
- kPrototypeOrInitialMapOffset, map()->has_prototype_slot())
+ kPrototypeOrInitialMapOffset, map().has_prototype_slot())
bool JSFunction::has_prototype_slot() const {
- return map()->has_prototype_slot();
+ return map().has_prototype_slot();
}
Map JSFunction::initial_map() { return Map::cast(prototype_or_initial_map()); }
bool JSFunction::has_initial_map() {
DCHECK(has_prototype_slot());
- return prototype_or_initial_map()->IsMap();
+ return prototype_or_initial_map().IsMap();
}
bool JSFunction::has_instance_prototype() {
DCHECK(has_prototype_slot());
- return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
+ return has_initial_map() || !prototype_or_initial_map().IsTheHole();
}
bool JSFunction::has_prototype() {
DCHECK(has_prototype_slot());
- return map()->has_non_instance_prototype() || has_instance_prototype();
+ return map().has_non_instance_prototype() || has_instance_prototype();
}
bool JSFunction::has_prototype_property() {
return (has_prototype_slot() && IsConstructor()) ||
- IsGeneratorFunction(shared()->kind());
+ IsGeneratorFunction(shared().kind());
}
bool JSFunction::PrototypeRequiresRuntimeLookup() {
- return !has_prototype_property() || map()->has_non_instance_prototype();
+ return !has_prototype_property() || map().has_non_instance_prototype();
}
HeapObject JSFunction::instance_prototype() {
DCHECK(has_instance_prototype());
- if (has_initial_map()) return initial_map()->prototype();
+ if (has_initial_map()) return initial_map().prototype();
// When there is no initial map and the prototype is a JSReceiver, the
// initial map field is used for the prototype field.
return HeapObject::cast(prototype_or_initial_map());
@@ -664,19 +670,19 @@ Object JSFunction::prototype() {
DCHECK(has_prototype());
// If the function's prototype property has been set to a non-JSReceiver
// value, that value is stored in the constructor field of the map.
- if (map()->has_non_instance_prototype()) {
- Object prototype = map()->GetConstructor();
+ if (map().has_non_instance_prototype()) {
+ Object prototype = map().GetConstructor();
// The map must have a prototype in that field, not a back pointer.
- DCHECK(!prototype->IsMap());
- DCHECK(!prototype->IsFunctionTemplateInfo());
+ DCHECK(!prototype.IsMap());
+ DCHECK(!prototype.IsFunctionTemplateInfo());
return prototype;
}
return instance_prototype();
}
bool JSFunction::is_compiled() const {
- return code()->builtin_index() != Builtins::kCompileLazy &&
- shared()->is_compiled();
+ return code().builtin_index() != Builtins::kCompileLazy &&
+ shared().is_compiled();
}
bool JSFunction::NeedsResetDueToFlushedBytecode() {
@@ -686,14 +692,14 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
Object maybe_code = RELAXED_READ_FIELD(*this, kCodeOffset);
- if (!maybe_shared->IsSharedFunctionInfo() || !maybe_code->IsCode()) {
+ if (!maybe_shared.IsSharedFunctionInfo() || !maybe_code.IsCode()) {
return false;
}
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
Code code = Code::cast(maybe_code);
- return !shared->is_compiled() &&
- code->builtin_index() != Builtins::kCompileLazy;
+ return !shared.is_compiled() &&
+ code.builtin_index() != Builtins::kCompileLazy;
}
void JSFunction::ResetIfBytecodeFlushed() {
@@ -701,8 +707,7 @@ void JSFunction::ResetIfBytecodeFlushed() {
// Bytecode was flushed and function is now uncompiled, reset JSFunction
// by setting code to CompileLazy and clearing the feedback vector.
set_code(GetIsolate()->builtins()->builtin(i::Builtins::kCompileLazy));
- raw_feedback_cell()->set_value(
- ReadOnlyRoots(GetIsolate()).undefined_value());
+ raw_feedback_cell().reset();
}
}
@@ -718,22 +723,40 @@ ACCESSORS(JSDate, hour, Object, kHourOffset)
ACCESSORS(JSDate, min, Object, kMinOffset)
ACCESSORS(JSDate, sec, Object, kSecOffset)
+bool JSMessageObject::DidEnsureSourcePositionsAvailable() const {
+ return shared_info().IsUndefined();
+}
+
+int JSMessageObject::GetStartPosition() const {
+ DCHECK(DidEnsureSourcePositionsAvailable());
+ return start_position();
+}
+
+int JSMessageObject::GetEndPosition() const {
+ DCHECK(DidEnsureSourcePositionsAvailable());
+ return end_position();
+}
+
MessageTemplate JSMessageObject::type() const {
Object value = READ_FIELD(*this, kMessageTypeOffset);
return MessageTemplateFromInt(Smi::ToInt(value));
}
+
void JSMessageObject::set_type(MessageTemplate value) {
WRITE_FIELD(*this, kMessageTypeOffset, Smi::FromInt(static_cast<int>(value)));
}
+
ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Script, kScriptOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
+ACCESSORS(JSMessageObject, shared_info, HeapObject, kSharedInfoOffset)
+ACCESSORS(JSMessageObject, bytecode_offset, Smi, kBytecodeOffsetOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
ElementsKind JSObject::GetElementsKind() const {
- ElementsKind kind = map()->elements_kind();
+ ElementsKind kind = map().elements_kind();
#if VERIFY_HEAP && DEBUG
FixedArrayBase fixed_array =
FixedArrayBase::unchecked_cast(READ_FIELD(*this, kElementsOffset));
@@ -741,22 +764,21 @@ ElementsKind JSObject::GetElementsKind() const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
if (ElementsAreSafeToExamine()) {
- Map map = fixed_array->map();
+ Map map = fixed_array.map();
if (IsSmiOrObjectElementsKind(kind)) {
DCHECK(map == GetReadOnlyRoots().fixed_array_map() ||
map == GetReadOnlyRoots().fixed_cow_array_map());
} else if (IsDoubleElementsKind(kind)) {
- DCHECK(fixed_array->IsFixedDoubleArray() ||
+ DCHECK(fixed_array.IsFixedDoubleArray() ||
fixed_array == GetReadOnlyRoots().empty_fixed_array());
} else if (kind == DICTIONARY_ELEMENTS) {
- DCHECK(fixed_array->IsFixedArray());
- DCHECK(fixed_array->IsDictionary());
+ DCHECK(fixed_array.IsFixedArray());
+ DCHECK(fixed_array.IsNumberDictionary());
} else {
- DCHECK(kind > DICTIONARY_ELEMENTS ||
- IsPackedFrozenOrSealedElementsKind(kind));
+ DCHECK(kind > DICTIONARY_ELEMENTS || IsFrozenOrSealedElementsKind(kind));
}
DCHECK(!IsSloppyArgumentsElementsKind(kind) ||
- (elements()->IsFixedArray() && elements()->length() >= 2));
+ (elements().IsFixedArray() && elements().length() >= 2));
}
#endif
return kind;
@@ -797,7 +819,11 @@ bool JSObject::HasPackedElements() {
}
bool JSObject::HasFrozenOrSealedElements() {
- return IsPackedFrozenOrSealedElementsKind(GetElementsKind());
+ return IsFrozenOrSealedElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasSealedElements() {
+ return IsSealedElementsKind(GetElementsKind());
}
bool JSObject::HasFastArgumentsElements() {
@@ -824,25 +850,24 @@ bool JSObject::HasSlowStringWrapperElements() {
return GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS;
}
-bool JSObject::HasFixedTypedArrayElements() {
+bool JSObject::HasTypedArrayElements() {
DCHECK(!elements().is_null());
- return map()->has_fixed_typed_array_elements();
+ return map().has_typed_array_elements();
}
-#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
- bool JSObject::HasFixed##Type##Elements() { \
- FixedArrayBase array = elements(); \
- return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
+#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
+ bool JSObject::HasFixed##Type##Elements() { \
+ return map().elements_kind() == TYPE##_ELEMENTS; \
}
TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
#undef FIXED_TYPED_ELEMENTS_CHECK
-bool JSObject::HasNamedInterceptor() { return map()->has_named_interceptor(); }
+bool JSObject::HasNamedInterceptor() { return map().has_named_interceptor(); }
bool JSObject::HasIndexedInterceptor() {
- return map()->has_indexed_interceptor();
+ return map().has_indexed_interceptor();
}
void JSGlobalObject::set_global_dictionary(GlobalDictionary dictionary) {
@@ -865,7 +890,7 @@ void JSReceiver::initialize_properties() {
ReadOnlyRoots roots = GetReadOnlyRoots();
DCHECK(!ObjectInYoungGeneration(roots.empty_fixed_array()));
DCHECK(!ObjectInYoungGeneration(roots.empty_property_dictionary()));
- if (map()->is_dictionary_map()) {
+ if (map().is_dictionary_map()) {
WRITE_FIELD(*this, kPropertiesOrHashOffset,
roots.empty_property_dictionary());
} else {
@@ -874,10 +899,11 @@ void JSReceiver::initialize_properties() {
}
bool JSReceiver::HasFastProperties() const {
- DCHECK(
- raw_properties_or_hash()->IsSmi() ||
- (raw_properties_or_hash()->IsDictionary() == map()->is_dictionary_map()));
- return !map()->is_dictionary_map();
+ DCHECK(raw_properties_or_hash().IsSmi() ||
+ ((raw_properties_or_hash().IsGlobalDictionary() ||
+ raw_properties_or_hash().IsNameDictionary()) ==
+ map().is_dictionary_map()));
+ return !map().is_dictionary_map();
}
NameDictionary JSReceiver::property_dictionary() const {
@@ -885,7 +911,7 @@ NameDictionary JSReceiver::property_dictionary() const {
DCHECK(!HasFastProperties());
Object prop = raw_properties_or_hash();
- if (prop->IsSmi()) {
+ if (prop.IsSmi()) {
return GetReadOnlyRoots().empty_property_dictionary();
}
@@ -898,7 +924,7 @@ PropertyArray JSReceiver::property_array() const {
DCHECK(HasFastProperties());
Object prop = raw_properties_or_hash();
- if (prop->IsSmi() || prop == GetReadOnlyRoots().empty_fixed_array()) {
+ if (prop.IsSmi() || prop == GetReadOnlyRoots().empty_fixed_array()) {
return GetReadOnlyRoots().empty_property_array();
}
@@ -969,7 +995,7 @@ Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttributes(
}
bool JSGlobalObject::IsDetached() {
- return global_proxy()->IsDetachedFrom(*this);
+ return global_proxy().IsDetachedFrom(*this);
}
bool JSGlobalProxy::IsDetachedFrom(JSGlobalObject global) const {
@@ -992,6 +1018,17 @@ ACCESSORS(JSAsyncFromSyncIterator, next, Object, kNextOffset)
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
+// If the fast-case backing storage takes up much more memory than a dictionary
+// backing storage would, the object should have slow elements.
+// static
+static inline bool ShouldConvertToSlowElements(uint32_t used_elements,
+ uint32_t new_capacity) {
+ uint32_t size_threshold = NumberDictionary::kPreferFastElementsSizeFactor *
+ NumberDictionary::ComputeCapacity(used_elements) *
+ NumberDictionary::kEntrySize;
+ return size_threshold <= new_capacity;
+}
+
static inline bool ShouldConvertToSlowElements(JSObject object,
uint32_t capacity,
uint32_t index,
@@ -1011,13 +1048,8 @@ static inline bool ShouldConvertToSlowElements(JSObject object,
ObjectInYoungGeneration(object))) {
return false;
}
- // If the fast-case backing storage takes up much more memory than a
- // dictionary backing storage would, the object should have slow elements.
- int used_elements = object->GetFastElementsUsage();
- uint32_t size_threshold = NumberDictionary::kPreferFastElementsSizeFactor *
- NumberDictionary::ComputeCapacity(used_elements) *
- NumberDictionary::kEntrySize;
- return size_threshold <= *new_capacity;
+ return ShouldConvertToSlowElements(object.GetFastElementsUsage(),
+ *new_capacity);
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 5191f237f6..a0dc33909a 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -4,31 +4,31 @@
#include "src/objects/js-objects.h"
-#include "src/api-arguments-inl.h"
-#include "src/arguments.h"
-#include "src/bootstrapper.h"
-#include "src/compiler.h"
-#include "src/counters.h"
-#include "src/date.h"
-#include "src/elements.h"
-#include "src/field-type.h"
-#include "src/handles-inl.h"
+#include "src/api/api-arguments-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/date/date.h"
+#include "src/execution/arguments.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/maybe-handles.h"
#include "src/heap/heap-inl.h"
#include "src/ic/ic.h"
-#include "src/isolate.h"
-#include "src/layout-descriptor.h"
-#include "src/log.h"
-#include "src/lookup.h"
-#include "src/maybe-handles.h"
-#include "src/objects-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/dictionary.h"
+#include "src/objects/elements.h"
+#include "src/objects/field-type.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/layout-descriptor.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator.h"
#include "src/objects/js-collator.h"
@@ -57,15 +57,15 @@
#include "src/objects/module.h"
#include "src/objects/oddball.h"
#include "src/objects/property-cell.h"
+#include "src/objects/property-descriptor.h"
+#include "src/objects/property.h"
#include "src/objects/prototype-info.h"
+#include "src/objects/prototype.h"
#include "src/objects/shared-function-info.h"
-#include "src/ostreams.h"
-#include "src/property-descriptor.h"
-#include "src/property.h"
-#include "src/prototype.h"
-#include "src/string-builder-inl.h"
-#include "src/string-stream.h"
-#include "src/transitions.h"
+#include "src/objects/transitions.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/strings/string-stream.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -191,19 +191,19 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
// Non-empty strings are the only non-JSReceivers that need to be handled
// explicitly by Object.assign.
if (!source->IsJSReceiver()) {
- return Just(!source->IsString() || String::cast(*source)->length() == 0);
+ return Just(!source->IsString() || String::cast(*source).length() == 0);
}
// If the target is deprecated, the object will be updated on first store. If
// the source for that store equals the target, this will invalidate the
// cached representation of the source. Preventively upgrade the target.
// Do this on each iteration since any property load could cause deprecation.
- if (target->map()->is_deprecated()) {
+ if (target->map().is_deprecated()) {
JSObject::MigrateInstance(Handle<JSObject>::cast(target));
}
Isolate* isolate = target->GetIsolate();
- Handle<Map> map(JSReceiver::cast(*source)->map(), isolate);
+ Handle<Map> map(JSReceiver::cast(*source).map(), isolate);
if (!map->IsJSObjectMap()) return Just(false);
if (!map->OnlyHasSimpleProperties()) return Just(false);
@@ -348,7 +348,7 @@ String JSReceiver::class_name() {
if (IsJSArgumentsObject()) return roots.Arguments_string();
if (IsJSArray()) return roots.Array_string();
if (IsJSArrayBuffer()) {
- if (JSArrayBuffer::cast(*this)->is_shared()) {
+ if (JSArrayBuffer::cast(*this).is_shared()) {
return roots.SharedArrayBuffer_string();
}
return roots.ArrayBuffer_string();
@@ -360,45 +360,45 @@ String JSReceiver::class_name() {
if (IsJSMap()) return roots.Map_string();
if (IsJSMapIterator()) return roots.MapIterator_string();
if (IsJSProxy()) {
- return map()->is_callable() ? roots.Function_string()
- : roots.Object_string();
+ return map().is_callable() ? roots.Function_string()
+ : roots.Object_string();
}
if (IsJSRegExp()) return roots.RegExp_string();
if (IsJSSet()) return roots.Set_string();
if (IsJSSetIterator()) return roots.SetIterator_string();
if (IsJSTypedArray()) {
-#define SWITCH_KIND(Type, type, TYPE, ctype) \
- if (map()->elements_kind() == TYPE##_ELEMENTS) { \
- return roots.Type##Array_string(); \
+#define SWITCH_KIND(Type, type, TYPE, ctype) \
+ if (map().elements_kind() == TYPE##_ELEMENTS) { \
+ return roots.Type##Array_string(); \
}
TYPED_ARRAYS(SWITCH_KIND)
#undef SWITCH_KIND
}
if (IsJSValue()) {
- Object value = JSValue::cast(*this)->value();
- if (value->IsBoolean()) return roots.Boolean_string();
- if (value->IsString()) return roots.String_string();
- if (value->IsNumber()) return roots.Number_string();
- if (value->IsBigInt()) return roots.BigInt_string();
- if (value->IsSymbol()) return roots.Symbol_string();
- if (value->IsScript()) return roots.Script_string();
+ Object value = JSValue::cast(*this).value();
+ if (value.IsBoolean()) return roots.Boolean_string();
+ if (value.IsString()) return roots.String_string();
+ if (value.IsNumber()) return roots.Number_string();
+ if (value.IsBigInt()) return roots.BigInt_string();
+ if (value.IsSymbol()) return roots.Symbol_string();
+ if (value.IsScript()) return roots.Script_string();
UNREACHABLE();
}
if (IsJSWeakMap()) return roots.WeakMap_string();
if (IsJSWeakSet()) return roots.WeakSet_string();
if (IsJSGlobalProxy()) return roots.global_string();
- Object maybe_constructor = map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
+ Object maybe_constructor = map().GetConstructor();
+ if (maybe_constructor.IsJSFunction()) {
JSFunction constructor = JSFunction::cast(maybe_constructor);
- if (constructor->shared()->IsApiFunction()) {
- maybe_constructor = constructor->shared()->get_api_func_data();
+ if (constructor.shared().IsApiFunction()) {
+ maybe_constructor = constructor.shared().get_api_func_data();
}
}
- if (maybe_constructor->IsFunctionTemplateInfo()) {
+ if (maybe_constructor.IsFunctionTemplateInfo()) {
FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
- if (info->class_name()->IsString()) return String::cast(info->class_name());
+ if (info.class_name().IsString()) return String::cast(info.class_name());
}
return roots.Object_string();
@@ -413,23 +413,22 @@ std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
// constructor on the map provides the most accurate name.
// Don't provide the info for prototypes, since their constructors are
// reclaimed and replaced by Object in OptimizeAsPrototype.
- if (!receiver->IsJSProxy() && receiver->map()->new_target_is_base() &&
- !receiver->map()->is_prototype_map()) {
- Object maybe_constructor = receiver->map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
+ if (!receiver->IsJSProxy() && receiver->map().new_target_is_base() &&
+ !receiver->map().is_prototype_map()) {
+ Object maybe_constructor = receiver->map().GetConstructor();
+ if (maybe_constructor.IsJSFunction()) {
JSFunction constructor = JSFunction::cast(maybe_constructor);
- String name = constructor->shared()->DebugName();
- if (name->length() != 0 &&
- !name->Equals(ReadOnlyRoots(isolate).Object_string())) {
+ String name = constructor.shared().DebugName();
+ if (name.length() != 0 &&
+ !name.Equals(ReadOnlyRoots(isolate).Object_string())) {
return std::make_pair(handle(constructor, isolate),
handle(name, isolate));
}
- } else if (maybe_constructor->IsFunctionTemplateInfo()) {
+ } else if (maybe_constructor.IsFunctionTemplateInfo()) {
FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
- if (info->class_name()->IsString()) {
- return std::make_pair(
- MaybeHandle<JSFunction>(),
- handle(String::cast(info->class_name()), isolate));
+ if (info.class_name().IsString()) {
+ return std::make_pair(MaybeHandle<JSFunction>(),
+ handle(String::cast(info.class_name()), isolate));
}
}
}
@@ -452,10 +451,10 @@ std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
Handle<Object> maybe_constructor = JSReceiver::GetDataProperty(&it);
if (maybe_constructor->IsJSFunction()) {
JSFunction constructor = JSFunction::cast(*maybe_constructor);
- String name = constructor->shared()->DebugName();
+ String name = constructor.shared().DebugName();
- if (name->length() != 0 &&
- !name->Equals(ReadOnlyRoots(isolate).Object_string())) {
+ if (name.length() != 0 &&
+ !name.Equals(ReadOnlyRoots(isolate).Object_string())) {
return std::make_pair(handle(constructor, isolate),
handle(name, isolate));
}
@@ -480,26 +479,26 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
Handle<NativeContext> JSReceiver::GetCreationContext() {
JSReceiver receiver = *this;
// Externals are JSObjects with null as a constructor.
- DCHECK(!receiver->IsExternal(GetIsolate()));
- Object constructor = receiver->map()->GetConstructor();
+ DCHECK(!receiver.IsExternal(GetIsolate()));
+ Object constructor = receiver.map().GetConstructor();
JSFunction function;
- if (constructor->IsJSFunction()) {
+ if (constructor.IsJSFunction()) {
function = JSFunction::cast(constructor);
- } else if (constructor->IsFunctionTemplateInfo()) {
+ } else if (constructor.IsFunctionTemplateInfo()) {
// Remote objects don't have a creation context.
return Handle<NativeContext>::null();
- } else if (receiver->IsJSGeneratorObject()) {
- function = JSGeneratorObject::cast(receiver)->function();
+ } else if (receiver.IsJSGeneratorObject()) {
+ function = JSGeneratorObject::cast(receiver).function();
} else {
// Functions have null as a constructor,
// but any JSFunction knows its context immediately.
- CHECK(receiver->IsJSFunction());
+ CHECK(receiver.IsJSFunction());
function = JSFunction::cast(receiver);
}
- return function->has_context()
- ? Handle<NativeContext>(function->context()->native_context(),
- receiver->GetIsolate())
+ return function.has_context()
+ ? Handle<NativeContext>(function.context().native_context(),
+ receiver.GetIsolate())
: Handle<NativeContext>::null();
}
@@ -583,50 +582,50 @@ Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
DCHECK(PropertyArray::HashField::is_valid(hash));
- ReadOnlyRoots roots = properties->GetReadOnlyRoots();
+ ReadOnlyRoots roots = properties.GetReadOnlyRoots();
if (properties == roots.empty_fixed_array() ||
properties == roots.empty_property_array() ||
properties == roots.empty_property_dictionary()) {
return Smi::FromInt(hash);
}
- if (properties->IsPropertyArray()) {
- PropertyArray::cast(properties)->SetHash(hash);
- DCHECK_LT(0, PropertyArray::cast(properties)->length());
+ if (properties.IsPropertyArray()) {
+ PropertyArray::cast(properties).SetHash(hash);
+ DCHECK_LT(0, PropertyArray::cast(properties).length());
return properties;
}
- if (properties->IsGlobalDictionary()) {
- GlobalDictionary::cast(properties)->SetHash(hash);
+ if (properties.IsGlobalDictionary()) {
+ GlobalDictionary::cast(properties).SetHash(hash);
return properties;
}
- DCHECK(properties->IsNameDictionary());
- NameDictionary::cast(properties)->SetHash(hash);
+ DCHECK(properties.IsNameDictionary());
+ NameDictionary::cast(properties).SetHash(hash);
return properties;
}
int GetIdentityHashHelper(JSReceiver object) {
DisallowHeapAllocation no_gc;
- Object properties = object->raw_properties_or_hash();
- if (properties->IsSmi()) {
+ Object properties = object.raw_properties_or_hash();
+ if (properties.IsSmi()) {
return Smi::ToInt(properties);
}
- if (properties->IsPropertyArray()) {
- return PropertyArray::cast(properties)->Hash();
+ if (properties.IsPropertyArray()) {
+ return PropertyArray::cast(properties).Hash();
}
- if (properties->IsNameDictionary()) {
- return NameDictionary::cast(properties)->Hash();
+ if (properties.IsNameDictionary()) {
+ return NameDictionary::cast(properties).Hash();
}
- if (properties->IsGlobalDictionary()) {
- return GlobalDictionary::cast(properties)->Hash();
+ if (properties.IsGlobalDictionary()) {
+ return GlobalDictionary::cast(properties).Hash();
}
#ifdef DEBUG
- ReadOnlyRoots roots = object->GetReadOnlyRoots();
+ ReadOnlyRoots roots = object.GetReadOnlyRoots();
DCHECK(properties == roots.empty_fixed_array() ||
properties == roots.empty_property_dictionary());
#endif
@@ -646,8 +645,8 @@ void JSReceiver::SetIdentityHash(int hash) {
}
void JSReceiver::SetProperties(HeapObject properties) {
- DCHECK_IMPLIES(properties->IsPropertyArray() &&
- PropertyArray::cast(properties)->length() == 0,
+ DCHECK_IMPLIES(properties.IsPropertyArray() &&
+ PropertyArray::cast(properties).length() == 0,
properties == GetReadOnlyRoots().empty_property_array());
DisallowHeapAllocation no_gc;
int hash = GetIdentityHashHelper(*this);
@@ -679,7 +678,7 @@ Smi JSReceiver::CreateIdentityHash(Isolate* isolate, JSReceiver key) {
int hash = isolate->GenerateIdentityHash(PropertyArray::HashField::kMax);
DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
- key->SetIdentityHash(hash);
+ key.SetIdentityHash(hash);
return Smi::FromInt(hash);
}
@@ -702,7 +701,7 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
if (object->IsJSGlobalObject()) {
// If we have a global object, invalidate the cell and swap in a new one.
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*object)->global_dictionary(), isolate);
+ JSGlobalObject::cast(*object).global_dictionary(), isolate);
DCHECK_NE(GlobalDictionary::kNotFound, entry);
auto cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
@@ -716,7 +715,7 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
dictionary = NameDictionary::DeleteEntry(isolate, dictionary, entry);
object->SetProperties(*dictionary);
}
- if (object->map()->is_prototype_map()) {
+ if (object->map().is_prototype_map()) {
// Invalidate prototype validity cell as this may invalidate transitioning
// store IC handlers.
JSObject::InvalidatePrototypeChains(object->map());
@@ -985,7 +984,7 @@ MaybeHandle<Object> GetPropertyWithInterceptorInternal(
// interceptor calls.
AssertNoContextChange ncc(isolate);
- if (interceptor->getter()->IsUndefined(isolate)) {
+ if (interceptor->getter().IsUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
@@ -1031,7 +1030,7 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
}
PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
*holder, Just(kDontThrow));
- if (!interceptor->query()->IsUndefined(isolate)) {
+ if (!interceptor->query().IsUndefined(isolate)) {
Handle<Object> result;
if (it->IsElement()) {
result = args.CallIndexedQuery(interceptor, it->index());
@@ -1043,7 +1042,7 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
CHECK(result->ToInt32(&value));
return Just(static_cast<PropertyAttributes>(value));
}
- } else if (!interceptor->getter()->IsUndefined(isolate)) {
+ } else if (!interceptor->getter().IsUndefined(isolate)) {
// TODO(verwaest): Use GetPropertyWithInterceptor?
Handle<Object> result;
if (it->IsElement()) {
@@ -1066,7 +1065,7 @@ Maybe<bool> SetPropertyWithInterceptorInternal(
// interceptor calls.
AssertNoContextChange ncc(isolate);
- if (interceptor->setter()->IsUndefined(isolate)) return Just(false);
+ if (interceptor->setter().IsUndefined(isolate)) return Just(false);
Handle<JSObject> holder = it->GetHolder<JSObject>();
bool result;
@@ -1099,7 +1098,7 @@ Maybe<bool> DefinePropertyWithInterceptorInternal(
// interceptor calls.
AssertNoContextChange ncc(isolate);
- if (interceptor->definer()->IsUndefined(isolate)) return Just(false);
+ if (interceptor->definer().IsUndefined(isolate)) return Just(false);
Handle<JSObject> holder = it->GetHolder<JSObject>();
bool result;
@@ -1521,7 +1520,7 @@ Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
Isolate* isolate = it->isolate();
Handle<InterceptorInfo> interceptor = it->GetInterceptor();
- if (interceptor->descriptor()->IsUndefined(isolate)) return Just(false);
+ if (interceptor->descriptor().IsUndefined(isolate)) return Just(false);
Handle<Object> result;
Handle<JSObject> holder = it->GetHolder<JSObject>();
@@ -1729,7 +1728,7 @@ Maybe<bool> GenericTestIntegrityLevel(Handle<JSReceiver> receiver,
Maybe<bool> JSReceiver::TestIntegrityLevel(Handle<JSReceiver> receiver,
IntegrityLevel level) {
- if (!receiver->map()->IsCustomElementsReceiverMap()) {
+ if (!receiver->map().IsCustomElementsReceiverMap()) {
return JSObject::TestIntegrityLevel(Handle<JSObject>::cast(receiver),
level);
}
@@ -1817,7 +1816,7 @@ MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
Isolate* isolate, Handle<JSReceiver> receiver, bool get_entries,
Handle<FixedArray>* result) {
- Handle<Map> map(JSReceiver::cast(*receiver)->map(), isolate);
+ Handle<Map> map(JSReceiver::cast(*receiver).map(), isolate);
if (!map->IsJSObjectMap()) return Just(false);
if (!map->OnlyHasSimpleProperties()) return Just(false);
@@ -1964,17 +1963,6 @@ MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
try_fast_path, true);
}
-Handle<FixedArray> JSReceiver::GetOwnElementIndices(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<JSObject> object) {
- KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
- ALL_PROPERTIES);
- accumulator.CollectOwnElementIndices(receiver, object);
- Handle<FixedArray> keys =
- accumulator.GetKeys(GetKeysConversion::kKeepNumbers);
- DCHECK(keys->ContainsSortedNumbers());
- return keys;
-}
Maybe<bool> JSReceiver::SetPrototype(Handle<JSReceiver> object,
Handle<Object> value, bool from_javascript,
ShouldThrow should_throw) {
@@ -1990,21 +1978,11 @@ bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
for (PrototypeIterator iter(isolate, *this, kStartAtReceiver,
PrototypeIterator::END_AT_NULL);
!iter.IsAtEnd(); iter.AdvanceIgnoringProxies()) {
- if (iter.GetCurrent()->IsJSProxy()) return true;
+ if (iter.GetCurrent().IsJSProxy()) return true;
}
return false;
}
-bool JSReceiver::HasComplexElements() {
- if (IsJSProxy()) return true;
- JSObject this_object = JSObject::cast(*this);
- if (this_object->HasIndexedInterceptor()) {
- return true;
- }
- if (!this_object->HasDictionaryElements()) return false;
- return this_object->element_dictionary()->HasComplexElements();
-}
-
// static
MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
Handle<JSReceiver> new_target,
@@ -2018,7 +1996,7 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
DCHECK(constructor->IsConstructor());
DCHECK(new_target->IsConstructor());
DCHECK(!constructor->has_initial_map() ||
- constructor->initial_map()->instance_type() != JS_FUNCTION_TYPE);
+ constructor->initial_map().instance_type() != JS_FUNCTION_TYPE);
Handle<Map> initial_map;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -2063,7 +2041,7 @@ void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
object->HasFrozenOrSealedElements());
FixedArray raw_elems = FixedArray::cast(object->elements());
Isolate* isolate = object->GetIsolate();
- if (raw_elems->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) return;
+ if (raw_elems.map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) return;
Handle<FixedArray> elems(raw_elems, isolate);
Handle<FixedArray> writable_elems = isolate->factory()->CopyFixedArrayWithMap(
elems, isolate->factory()->fixed_array_map());
@@ -2198,7 +2176,7 @@ bool JSObject::AllCanRead(LookupIterator* it) {
if (it->state() == LookupIterator::ACCESSOR) {
auto accessors = it->GetAccessors();
if (accessors->IsAccessorInfo()) {
- if (AccessorInfo::cast(*accessors)->all_can_read()) return true;
+ if (AccessorInfo::cast(*accessors).all_can_read()) return true;
}
} else if (it->state() == LookupIterator::INTERCEPTOR) {
if (it->GetInterceptor()->all_can_read()) return true;
@@ -2241,7 +2219,7 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
// Cross-Origin [[Get]] of Well-Known Symbols does not throw, and returns
// undefined.
Handle<Name> name = it->GetName();
- if (name->IsSymbol() && Symbol::cast(*name)->is_well_known_symbol()) {
+ if (name->IsSymbol() && Symbol::cast(*name).is_well_known_symbol()) {
return it->factory()->undefined_value();
}
@@ -2283,7 +2261,7 @@ bool JSObject::AllCanWrite(LookupIterator* it) {
if (it->state() == LookupIterator::ACCESSOR) {
Handle<Object> accessors = it->GetAccessors();
if (accessors->IsAccessorInfo()) {
- if (AccessorInfo::cast(*accessors)->all_can_write()) return true;
+ if (AccessorInfo::cast(*accessors).all_can_write()) return true;
}
}
}
@@ -2327,7 +2305,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
int entry = dictionary->FindEntry(ReadOnlyRoots(isolate), name, hash);
if (entry == GlobalDictionary::kNotFound) {
- DCHECK_IMPLIES(global_obj->map()->is_prototype_map(),
+ DCHECK_IMPLIES(global_obj->map().is_prototype_map(),
Map::IsPrototypeChainInvalidated(global_obj->map()));
auto cell = isolate->factory()->NewPropertyCell(name);
cell->set_value(*value);
@@ -2349,7 +2327,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
int entry = dictionary->FindEntry(isolate, name);
if (entry == NameDictionary::kNotFound) {
- DCHECK_IMPLIES(object->map()->is_prototype_map(),
+ DCHECK_IMPLIES(object->map().is_prototype_map(),
Map::IsPrototypeChainInvalidated(object->map()));
dictionary =
NameDictionary::Add(isolate, dictionary, name, value, details);
@@ -2365,11 +2343,11 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
}
void JSObject::JSObjectShortPrint(StringStream* accumulator) {
- switch (map()->instance_type()) {
+ switch (map().instance_type()) {
case JS_ARRAY_TYPE: {
- double length = JSArray::cast(*this)->length()->IsUndefined()
+ double length = JSArray::cast(*this).length().IsUndefined()
? 0
- : JSArray::cast(*this)->length()->Number();
+ : JSArray::cast(*this).length().Number();
accumulator->Add("<JSArray[%u]>", static_cast<uint32_t>(length));
break;
}
@@ -2378,7 +2356,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JSBoundFunction");
accumulator->Add(" (BoundTargetFunction %p)>",
reinterpret_cast<void*>(
- bound_function->bound_target_function().ptr()));
+ bound_function.bound_target_function().ptr()));
break;
}
case JS_WEAK_MAP_TYPE: {
@@ -2392,9 +2370,9 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
case JS_REGEXP_TYPE: {
accumulator->Add("<JSRegExp");
JSRegExp regexp = JSRegExp::cast(*this);
- if (regexp->source()->IsString()) {
+ if (regexp.source().IsString()) {
accumulator->Add(" ");
- String::cast(regexp->source())->StringShortPrint(accumulator);
+ String::cast(regexp.source()).StringShortPrint(accumulator);
}
accumulator->Add(">");
@@ -2402,11 +2380,11 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
case JS_FUNCTION_TYPE: {
JSFunction function = JSFunction::cast(*this);
- Object fun_name = function->shared()->DebugName();
+ Object fun_name = function.shared().DebugName();
bool printed = false;
- if (fun_name->IsString()) {
+ if (fun_name.IsString()) {
String str = String::cast(fun_name);
- if (str->length() > 0) {
+ if (str.length() > 0) {
accumulator->Add("<JSFunction ");
accumulator->Put(str);
printed = true;
@@ -2416,10 +2394,10 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JSFunction");
}
if (FLAG_trace_file_names) {
- Object source_name = Script::cast(function->shared()->script())->name();
- if (source_name->IsString()) {
+ Object source_name = Script::cast(function.shared().script()).name();
+ if (source_name.IsString()) {
String str = String::cast(source_name);
- if (str->length() > 0) {
+ if (str.length() > 0) {
accumulator->Add(" <");
accumulator->Put(str);
accumulator->Add(">");
@@ -2427,7 +2405,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
}
accumulator->Add(" (sfi = %p)",
- reinterpret_cast<void*>(function->shared().ptr()));
+ reinterpret_cast<void*>(function.shared().ptr()));
accumulator->Put('>');
break;
}
@@ -2449,30 +2427,29 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
default: {
Map map_of_this = map();
Heap* heap = GetHeap();
- Object constructor = map_of_this->GetConstructor();
+ Object constructor = map_of_this.GetConstructor();
bool printed = false;
- if (constructor->IsHeapObject() &&
+ if (constructor.IsHeapObject() &&
!heap->Contains(HeapObject::cast(constructor))) {
accumulator->Add("!!!INVALID CONSTRUCTOR!!!");
} else {
bool global_object = IsJSGlobalProxy();
- if (constructor->IsJSFunction()) {
- if (!heap->Contains(JSFunction::cast(constructor)->shared())) {
+ if (constructor.IsJSFunction()) {
+ if (!heap->Contains(JSFunction::cast(constructor).shared())) {
accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
} else {
String constructor_name =
- JSFunction::cast(constructor)->shared()->Name();
- if (constructor_name->length() > 0) {
+ JSFunction::cast(constructor).shared().Name();
+ if (constructor_name.length() > 0) {
accumulator->Add(global_object ? "<GlobalObject " : "<");
accumulator->Put(constructor_name);
- accumulator->Add(
- " %smap = %p",
- map_of_this->is_deprecated() ? "deprecated-" : "",
- map_of_this);
+ accumulator->Add(" %smap = %p",
+ map_of_this.is_deprecated() ? "deprecated-" : "",
+ map_of_this);
printed = true;
}
}
- } else if (constructor->IsFunctionTemplateInfo()) {
+ } else if (constructor.IsFunctionTemplateInfo()) {
accumulator->Add(global_object ? "<RemoteObject>" : "<RemoteObject>");
printed = true;
}
@@ -2482,7 +2459,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
if (IsJSValue()) {
accumulator->Add(" value = ");
- JSValue::cast(*this)->value()->ShortPrint(accumulator);
+ JSValue::cast(*this).value().ShortPrint(accumulator);
}
accumulator->Put('>');
break;
@@ -2512,52 +2489,52 @@ void JSObject::PrintElementsTransition(FILE* file, Handle<JSObject> object,
void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
Map new_map) {
- if (new_map->is_dictionary_map()) {
+ if (new_map.is_dictionary_map()) {
PrintF(file, "[migrating to slow]\n");
return;
}
PrintF(file, "[migrating]");
- DescriptorArray o = original_map->instance_descriptors();
- DescriptorArray n = new_map->instance_descriptors();
- for (int i = 0; i < original_map->NumberOfOwnDescriptors(); i++) {
- Representation o_r = o->GetDetails(i).representation();
- Representation n_r = n->GetDetails(i).representation();
+ DescriptorArray o = original_map.instance_descriptors();
+ DescriptorArray n = new_map.instance_descriptors();
+ for (int i = 0; i < original_map.NumberOfOwnDescriptors(); i++) {
+ Representation o_r = o.GetDetails(i).representation();
+ Representation n_r = n.GetDetails(i).representation();
if (!o_r.Equals(n_r)) {
- String::cast(o->GetKey(i))->PrintOn(file);
+ String::cast(o.GetKey(i)).PrintOn(file);
PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
- } else if (o->GetDetails(i).location() == kDescriptor &&
- n->GetDetails(i).location() == kField) {
- Name name = o->GetKey(i);
- if (name->IsString()) {
- String::cast(name)->PrintOn(file);
+ } else if (o.GetDetails(i).location() == kDescriptor &&
+ n.GetDetails(i).location() == kField) {
+ Name name = o.GetKey(i);
+ if (name.IsString()) {
+ String::cast(name).PrintOn(file);
} else {
PrintF(file, "{symbol %p}", reinterpret_cast<void*>(name.ptr()));
}
PrintF(file, " ");
}
}
- if (original_map->elements_kind() != new_map->elements_kind()) {
- PrintF(file, "elements_kind[%i->%i]", original_map->elements_kind(),
- new_map->elements_kind());
+ if (original_map.elements_kind() != new_map.elements_kind()) {
+ PrintF(file, "elements_kind[%i->%i]", original_map.elements_kind(),
+ new_map.elements_kind());
}
PrintF(file, "\n");
}
bool JSObject::IsUnmodifiedApiObject(FullObjectSlot o) {
Object object = *o;
- if (object->IsSmi()) return false;
+ if (object.IsSmi()) return false;
HeapObject heap_object = HeapObject::cast(object);
- if (!object->IsJSObject()) return false;
+ if (!object.IsJSObject()) return false;
JSObject js_object = JSObject::cast(object);
- if (!js_object->IsDroppableApiWrapper()) return false;
- Object maybe_constructor = js_object->map()->GetConstructor();
- if (!maybe_constructor->IsJSFunction()) return false;
+ if (!js_object.IsDroppableApiWrapper()) return false;
+ Object maybe_constructor = js_object.map().GetConstructor();
+ if (!maybe_constructor.IsJSFunction()) return false;
JSFunction constructor = JSFunction::cast(maybe_constructor);
- if (js_object->elements()->length() != 0) return false;
+ if (js_object.elements().length() != 0) return false;
// Check that the object is not a key in a WeakMap (over-approximation).
- if (!js_object->GetIdentityHash()->IsUndefined()) return false;
+ if (!js_object.GetIdentityHash().IsUndefined()) return false;
- return constructor->initial_map() == heap_object->map();
+ return constructor.initial_map() == heap_object.map();
}
// static
@@ -2571,16 +2548,16 @@ void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
old_map->set_prototype_info(Smi::kZero);
if (FLAG_trace_prototype_users) {
PrintF("Moving prototype_info %p from map %p to map %p.\n",
- reinterpret_cast<void*>(new_map->prototype_info()->ptr()),
+ reinterpret_cast<void*>(new_map->prototype_info().ptr()),
reinterpret_cast<void*>(old_map->ptr()),
reinterpret_cast<void*>(new_map->ptr()));
}
if (was_registered) {
- if (new_map->prototype_info()->IsPrototypeInfo()) {
+ if (new_map->prototype_info().IsPrototypeInfo()) {
// The new map isn't registered with its prototype yet; reflect this fact
// in the PrototypeInfo it just inherited from the old map.
PrototypeInfo::cast(new_map->prototype_info())
- ->set_registry_slot(PrototypeInfo::UNREGISTERED);
+ .set_registry_slot(PrototypeInfo::UNREGISTERED);
}
JSObject::LazyRegisterPrototypeUser(new_map, isolate);
}
@@ -2602,6 +2579,7 @@ void JSObject::NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
}
namespace {
+
// To migrate a fast instance to a fast map:
// - First check whether the instance needs to be rewritten. If not, simply
// change the map.
@@ -2629,31 +2607,28 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
return;
}
+ // If the map adds a new kDescriptor property, simply set the map.
PropertyDetails details = new_map->GetLastDescriptorDetails();
- int target_index = details.field_index() - new_map->GetInObjectProperties();
- int property_array_length = object->property_array()->length();
- bool have_space = old_map->UnusedPropertyFields() > 0 ||
- (details.location() == kField && target_index >= 0 &&
- property_array_length > target_index);
- // Either new_map adds an kDescriptor property, or a kField property for
- // which there is still space, and which does not require a mutable double
- // box (an out-of-object double).
- if (details.location() == kDescriptor ||
- (have_space && ((FLAG_unbox_double_fields && target_index < 0) ||
- !details.representation().IsDouble()))) {
+ if (details.location() == kDescriptor) {
object->synchronized_set_map(*new_map);
return;
}
- // If there is still space in the object, we need to allocate a mutable
- // double box.
- if (have_space) {
- FieldIndex index =
- FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
- DCHECK(details.representation().IsDouble());
- DCHECK(!new_map->IsUnboxedDoubleField(index));
- auto value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
- object->RawFastPropertyAtPut(index, *value);
+ // Check if we still have space in the {object}, in which case we
+ // can also simply set the map (modulo a special case for mutable
+ // double boxes).
+ FieldIndex index =
+ FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
+ if (index.is_inobject() ||
+ index.outobject_array_index() < object->property_array().length()) {
+ // We still need to allocate MutableHeapNumbers for double fields
+ // if either double field unboxing is disabled or the double field
+ // is in the PropertyArray backing store (where we don't support
+ // double field unboxing).
+ if (index.is_double() && !new_map->IsUnboxedDoubleField(index)) {
+ auto value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+ object->RawFastPropertyAtPut(index, *value);
+ }
object->synchronized_set_map(*new_map);
return;
}
@@ -2674,8 +2649,8 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
DCHECK_EQ(kField, details.location());
DCHECK_EQ(kData, details.kind());
- DCHECK_GE(target_index, 0); // Must be a backing store index.
- new_storage->set(target_index, *value);
+ DCHECK(!index.is_inobject()); // Must be a backing store index.
+ new_storage->set(index.outobject_array_index(), *value);
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
@@ -2808,10 +2783,10 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// Can't use JSObject::FastPropertyAtPut() because proper map was not set
// yet.
if (new_map->IsUnboxedDoubleField(index)) {
- DCHECK(value->IsMutableHeapNumber());
+ DCHECK(value.IsMutableHeapNumber());
// Ensure that all bits of the double value are preserved.
object->RawFastDoublePropertyAsBitsAtPut(
- index, MutableHeapNumber::cast(value)->value_as_bits());
+ index, MutableHeapNumber::cast(value).value_as_bits());
if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
// Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object, object->RawField(index.offset()));
@@ -2985,7 +2960,7 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
// Ensure that no transition was inserted for prototype migrations.
DCHECK_EQ(0, TransitionsAccessor(object->GetIsolate(), old_map)
.NumberOfTransitions());
- DCHECK(new_map->GetBackPointer()->IsUndefined());
+ DCHECK(new_map->GetBackPointer().IsUndefined());
DCHECK(object->map() != *old_map);
}
} else {
@@ -3025,15 +3000,14 @@ Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
// static
MaybeHandle<NativeContext> JSObject::GetFunctionRealm(Handle<JSObject> object) {
- DCHECK(object->map()->is_constructor());
+ DCHECK(object->map().is_constructor());
DCHECK(!object->IsJSFunction());
return object->GetCreationContext();
}
void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
- DCHECK(object->map()->GetInObjectProperties() ==
- map->GetInObjectProperties());
- ElementsKind obj_kind = object->map()->elements_kind();
+ DCHECK(object->map().GetInObjectProperties() == map->GetInObjectProperties());
+ ElementsKind obj_kind = object->map().elements_kind();
ElementsKind map_kind = map->elements_kind();
if (map_kind != obj_kind) {
ElementsKind to_kind = GetMoreGeneralElementsKind(map_kind, obj_kind);
@@ -3143,7 +3117,7 @@ void JSObject::AddProperty(Isolate* isolate, Handle<JSObject> object,
Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
DCHECK(maybe.IsJust());
DCHECK(!it.IsFound());
- DCHECK(object->map()->is_extensible() || name->IsPrivate());
+ DCHECK(object->map().is_extensible() || name->IsPrivate());
#endif
CHECK(Object::AddDataProperty(&it, value, attributes,
Just(ShouldThrow::kThrowOnError),
@@ -3242,7 +3216,7 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
// Special case: properties of typed arrays cannot be reconfigured to
// non-writable nor to non-enumerable.
- if (it->IsElement() && object->HasFixedTypedArrayElements()) {
+ if (it->IsElement() && object->HasTypedArrayElements()) {
return Object::RedefineIncompatibleProperty(
it->isolate(), it->GetName(), value, should_throw);
}
@@ -3329,14 +3303,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
PropertyKind kind = dictionary->DetailsAt(index).kind();
if (kind == kData) {
- if (FLAG_track_constant_fields) {
- number_of_fields += 1;
- } else {
- Object value = dictionary->ValueAt(index);
- if (!value->IsJSFunction()) {
- number_of_fields += 1;
- }
- }
+ number_of_fields += 1;
}
}
@@ -3395,7 +3362,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Name k = dictionary->NameAt(index);
// Dictionary keys are internalized upon insertion.
// TODO(jkummerow): Turn this into a DCHECK if it's not hit in the wild.
- CHECK(k->IsUniqueName());
+ CHECK(k.IsUniqueName());
Handle<Name> key(k, isolate);
// Properly mark the {new_map} if the {key} is an "interesting symbol".
@@ -3411,22 +3378,15 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Descriptor d;
if (details.kind() == kData) {
- if (!FLAG_track_constant_fields && value->IsJSFunction()) {
- d = Descriptor::DataConstant(key, handle(value, isolate),
- details.attributes());
- } else {
- // Ensure that we make constant field only when elements kind is not
- // transitionable.
- PropertyConstness constness =
- FLAG_track_constant_fields && !is_transitionable_elements_kind
- ? PropertyConstness::kConst
- : PropertyConstness::kMutable;
- d = Descriptor::DataField(
- key, current_offset, details.attributes(), constness,
- // TODO(verwaest): value->OptimalRepresentation();
- Representation::Tagged(),
- MaybeObjectHandle(FieldType::Any(isolate)));
- }
+ // Ensure that we make constant field only when elements kind is not
+ // transitionable.
+ PropertyConstness constness = is_transitionable_elements_kind
+ ? PropertyConstness::kMutable
+ : PropertyConstness::kConst;
+ d = Descriptor::DataField(
+ key, current_offset, details.attributes(), constness,
+ // TODO(verwaest): value->OptimalRepresentation();
+ Representation::Tagged(), MaybeObjectHandle(FieldType::Any(isolate)));
} else {
DCHECK_EQ(kAccessor, details.kind());
d = Descriptor::AccessorConstant(key, handle(value, isolate),
@@ -3474,9 +3434,9 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
}
void JSObject::RequireSlowElements(NumberDictionary dictionary) {
- if (dictionary->requires_slow_elements()) return;
- dictionary->set_requires_slow_elements();
- if (map()->is_prototype_map()) {
+ if (dictionary.requires_slow_elements()) return;
+ dictionary.set_requires_slow_elements();
+ if (map().is_prototype_map()) {
// If this object is a prototype (the callee will check), invalidate any
// prototype chains involving it.
InvalidatePrototypeChains(map());
@@ -3484,7 +3444,7 @@ void JSObject::RequireSlowElements(NumberDictionary dictionary) {
}
Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
- DCHECK(!object->HasFixedTypedArrayElements());
+ DCHECK(!object->HasTypedArrayElements());
Isolate* isolate = object->GetIsolate();
bool is_sloppy_arguments = object->HasSloppyArgumentsElements();
{
@@ -3492,17 +3452,17 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
FixedArrayBase elements = object->elements();
if (is_sloppy_arguments) {
- elements = SloppyArgumentsElements::cast(elements)->arguments();
+ elements = SloppyArgumentsElements::cast(elements).arguments();
}
- if (elements->IsNumberDictionary()) {
+ if (elements.IsNumberDictionary()) {
return handle(NumberDictionary::cast(elements), isolate);
}
}
DCHECK(object->HasSmiOrObjectElements() || object->HasDoubleElements() ||
object->HasFastArgumentsElements() ||
- object->HasFastStringWrapperElements());
+ object->HasFastStringWrapperElements() || object->HasSealedElements());
Handle<NumberDictionary> dictionary =
object->GetElementsAccessor()->Normalize(object);
@@ -3519,7 +3479,7 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
if (is_sloppy_arguments) {
SloppyArgumentsElements::cast(object->elements())
- ->set_arguments(*dictionary);
+ .set_arguments(*dictionary);
} else {
object->set_elements(*dictionary);
}
@@ -3549,7 +3509,7 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
Handle<InterceptorInfo> interceptor(it->GetInterceptor());
- if (interceptor->deleter()->IsUndefined(isolate)) return Nothing<bool>();
+ if (interceptor->deleter().IsUndefined(isolate)) return Nothing<bool>();
Handle<JSObject> holder = it->GetHolder<JSObject>();
Handle<Object> receiver = it->GetReceiver();
@@ -3615,12 +3575,12 @@ bool TestDictionaryPropertiesIntegrityLevel(Dictionary dict,
PropertyAttributes level) {
DCHECK(level == SEALED || level == FROZEN);
- uint32_t capacity = dict->Capacity();
+ uint32_t capacity = dict.Capacity();
for (uint32_t i = 0; i < capacity; i++) {
Object key;
- if (!dict->ToKey(roots, i, &key)) continue;
- if (key->FilterKey(ALL_PROPERTIES)) continue;
- PropertyDetails details = dict->DetailsAt(i);
+ if (!dict.ToKey(roots, i, &key)) continue;
+ if (key.FilterKey(ALL_PROPERTIES)) continue;
+ PropertyDetails details = dict.DetailsAt(i);
if (details.IsConfigurable()) return false;
if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
return false;
@@ -3631,14 +3591,14 @@ bool TestDictionaryPropertiesIntegrityLevel(Dictionary dict,
bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
DCHECK(level == SEALED || level == FROZEN);
- DCHECK(!map->IsCustomElementsReceiverMap());
- DCHECK(!map->is_dictionary_map());
+ DCHECK(!map.IsCustomElementsReceiverMap());
+ DCHECK(!map.is_dictionary_map());
- DescriptorArray descriptors = map->instance_descriptors();
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ DescriptorArray descriptors = map.instance_descriptors();
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
- if (descriptors->GetKey(i)->IsPrivate()) continue;
- PropertyDetails details = descriptors->GetDetails(i);
+ if (descriptors.GetKey(i).IsPrivate()) continue;
+ PropertyDetails details = descriptors.GetDetails(i);
if (details.IsConfigurable()) return false;
if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
return false;
@@ -3648,28 +3608,28 @@ bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
}
bool TestPropertiesIntegrityLevel(JSObject object, PropertyAttributes level) {
- DCHECK(!object->map()->IsCustomElementsReceiverMap());
+ DCHECK(!object.map().IsCustomElementsReceiverMap());
- if (object->HasFastProperties()) {
- return TestFastPropertiesIntegrityLevel(object->map(), level);
+ if (object.HasFastProperties()) {
+ return TestFastPropertiesIntegrityLevel(object.map(), level);
}
return TestDictionaryPropertiesIntegrityLevel(
- object->property_dictionary(), object->GetReadOnlyRoots(), level);
+ object.property_dictionary(), object.GetReadOnlyRoots(), level);
}
bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) {
- DCHECK(!object->HasSloppyArgumentsElements());
+ DCHECK(!object.HasSloppyArgumentsElements());
- ElementsKind kind = object->GetElementsKind();
+ ElementsKind kind = object.GetElementsKind();
if (IsDictionaryElementsKind(kind)) {
return TestDictionaryPropertiesIntegrityLevel(
- NumberDictionary::cast(object->elements()), object->GetReadOnlyRoots(),
+ NumberDictionary::cast(object.elements()), object.GetReadOnlyRoots(),
level);
}
- if (IsFixedTypedArrayElementsKind(kind)) {
- if (level == FROZEN && JSArrayBufferView::cast(object)->byte_length() > 0)
+ if (IsTypedArrayElementsKind(kind)) {
+ if (level == FROZEN && JSArrayBufferView::cast(object).byte_length() > 0)
return false; // TypedArrays with elements can't be frozen.
return TestPropertiesIntegrityLevel(object, level);
}
@@ -3683,9 +3643,9 @@ bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) {
}
bool FastTestIntegrityLevel(JSObject object, PropertyAttributes level) {
- DCHECK(!object->map()->IsCustomElementsReceiverMap());
+ DCHECK(!object.map().IsCustomElementsReceiverMap());
- return !object->map()->is_extensible() &&
+ return !object.map().is_extensible() &&
TestElementsIntegrityLevel(object, level) &&
TestPropertiesIntegrityLevel(object, level);
}
@@ -3694,7 +3654,7 @@ bool FastTestIntegrityLevel(JSObject object, PropertyAttributes level) {
Maybe<bool> JSObject::TestIntegrityLevel(Handle<JSObject> object,
IntegrityLevel level) {
- if (!object->map()->IsCustomElementsReceiverMap() &&
+ if (!object->map().IsCustomElementsReceiverMap() &&
!object->HasSloppyArgumentsElements()) {
return Just(FastTestIntegrityLevel(*object, level));
}
@@ -3717,7 +3677,7 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
NewTypeError(MessageTemplate::kNoAccess));
}
- if (!object->map()->is_extensible()) return Just(true);
+ if (!object->map().is_extensible()) return Just(true);
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
@@ -3727,13 +3687,13 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
should_throw);
}
- if (object->map()->has_named_interceptor() ||
- object->map()->has_indexed_interceptor()) {
+ if (object->map().has_named_interceptor() ||
+ object->map().has_indexed_interceptor()) {
RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kCannotPreventExt));
}
- if (!object->HasFixedTypedArrayElements()) {
+ if (!object->HasTypedArrayElements()) {
// If there are fast elements we normalize.
Handle<NumberDictionary> dictionary = NormalizeElements(object);
DCHECK(object->HasDictionaryElements() ||
@@ -3751,7 +3711,7 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
new_map->set_is_extensible(false);
JSObject::MigrateToMap(object, new_map);
- DCHECK(!object->map()->is_extensible());
+ DCHECK(!object->map().is_extensible());
return Just(true);
}
@@ -3765,10 +3725,10 @@ bool JSObject::IsExtensible(Handle<JSObject> object) {
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, *object);
if (iter.IsAtEnd()) return false;
- DCHECK(iter.GetCurrent()->IsJSGlobalObject());
- return iter.GetCurrent<JSObject>()->map()->is_extensible();
+ DCHECK(iter.GetCurrent().IsJSGlobalObject());
+ return iter.GetCurrent<JSObject>().map().is_extensible();
}
- return object->map()->is_extensible();
+ return object->map().is_extensible();
}
template <typename Dictionary>
@@ -3779,13 +3739,13 @@ void JSObject::ApplyAttributesToDictionary(
for (int i = 0; i < capacity; i++) {
Object k;
if (!dictionary->ToKey(roots, i, &k)) continue;
- if (k->FilterKey(ALL_PROPERTIES)) continue;
+ if (k.FilterKey(ALL_PROPERTIES)) continue;
PropertyDetails details = dictionary->DetailsAt(i);
int attrs = attributes;
// READ_ONLY is an invalid attribute for JS setters/getters.
if ((attributes & READ_ONLY) && details.kind() == kAccessor) {
Object v = dictionary->ValueAt(i);
- if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
+ if (v.IsAccessorPair()) attrs &= ~READ_ONLY;
}
details = details.CopyAddAttributes(static_cast<PropertyAttributes>(attrs));
dictionary->DetailsAtPut(isolate, i, details);
@@ -3811,8 +3771,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
NewTypeError(MessageTemplate::kNoAccess));
}
- if (attrs == NONE && !object->map()->is_extensible()) return Just(true);
- ElementsKind old_elements_kind = object->map()->elements_kind();
+ if (attrs == NONE && !object->map().is_extensible()) return Just(true);
+ ElementsKind old_elements_kind = object->map().elements_kind();
if (attrs != FROZEN && IsSealedElementsKind(old_elements_kind))
return Just(true);
if (old_elements_kind == PACKED_FROZEN_ELEMENTS) return Just(true);
@@ -3825,8 +3785,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
PrototypeIterator::GetCurrent<JSObject>(iter), should_throw);
}
- if (object->map()->has_named_interceptor() ||
- object->map()->has_indexed_interceptor()) {
+ if (object->map().has_named_interceptor() ||
+ object->map().has_indexed_interceptor()) {
MessageTemplate message = MessageTemplate::kNone;
switch (attrs) {
case NONE:
@@ -3845,12 +3805,11 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
Handle<NumberDictionary> new_element_dictionary;
- if (!object->HasFixedTypedArrayElements() &&
- !object->HasDictionaryElements() &&
+ if (!object->HasTypedArrayElements() && !object->HasDictionaryElements() &&
!object->HasSlowStringWrapperElements()) {
int length = object->IsJSArray()
? Smi::ToInt(Handle<JSArray>::cast(object)->length())
- : object->elements()->length();
+ : object->elements().length();
new_element_dictionary =
length == 0 ? isolate->factory()->empty_slow_element_dictionary()
: object->GetElementsAccessor()->Normalize(object);
@@ -3873,7 +3832,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
if (!transition.is_null()) {
Handle<Map> transition_map(transition, isolate);
DCHECK(transition_map->has_dictionary_elements() ||
- transition_map->has_fixed_typed_array_elements() ||
+ transition_map->has_typed_array_elements() ||
transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS ||
transition_map->has_frozen_or_sealed_elements());
DCHECK(!transition_map->is_extensible());
@@ -3907,7 +3866,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
ReadOnlyRoots roots(isolate);
if (object->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*object)->global_dictionary(), isolate);
+ JSGlobalObject::cast(*object).global_dictionary(), isolate);
JSObject::ApplyAttributesToDictionary(isolate, roots, dictionary,
attrs);
} else {
@@ -3919,15 +3878,14 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
}
- if (object->map()->has_frozen_or_sealed_elements()) {
+ if (object->map().has_frozen_or_sealed_elements()) {
return Just(true);
}
// Both seal and preventExtensions always go through without modifications to
// typed array elements. Freeze works only if there are no actual elements.
- if (object->HasFixedTypedArrayElements()) {
- if (attrs == FROZEN &&
- JSArrayBufferView::cast(*object)->byte_length() > 0) {
+ if (object->HasTypedArrayElements()) {
+ if (attrs == FROZEN && JSArrayBufferView::cast(*object).byte_length() > 0) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kCannotFreezeArrayBufferView));
return Nothing<bool>();
@@ -3935,8 +3893,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
return Just(true);
}
- DCHECK(object->map()->has_dictionary_elements() ||
- object->map()->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
+ DCHECK(object->map().has_dictionary_elements() ||
+ object->map().elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
if (!new_element_dictionary.is_null()) {
object->set_elements(*new_element_dictionary);
}
@@ -3960,6 +3918,7 @@ Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
FieldIndex index) {
Isolate* isolate = object->GetIsolate();
if (object->IsUnboxedDoubleField(index)) {
+ DCHECK(representation.IsDouble());
double value = object->RawFastDoublePropertyAt(index);
return isolate->factory()->NewHeapNumber(value);
}
@@ -3971,39 +3930,41 @@ Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
bool JSObject::HasEnumerableElements() {
// TODO(cbruni): cleanup
JSObject object = *this;
- switch (object->GetElementsKind()) {
+ switch (object.GetElementsKind()) {
case PACKED_SMI_ELEMENTS:
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
case PACKED_DOUBLE_ELEMENTS: {
- int length = object->IsJSArray()
- ? Smi::ToInt(JSArray::cast(object)->length())
- : object->elements()->length();
+ int length = object.IsJSArray()
+ ? Smi::ToInt(JSArray::cast(object).length())
+ : object.elements().length();
return length > 0;
}
case HOLEY_SMI_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case HOLEY_ELEMENTS: {
- FixedArray elements = FixedArray::cast(object->elements());
- int length = object->IsJSArray()
- ? Smi::ToInt(JSArray::cast(object)->length())
- : elements->length();
+ FixedArray elements = FixedArray::cast(object.elements());
+ int length = object.IsJSArray()
+ ? Smi::ToInt(JSArray::cast(object).length())
+ : elements.length();
Isolate* isolate = GetIsolate();
for (int i = 0; i < length; i++) {
- if (!elements->is_the_hole(isolate, i)) return true;
+ if (!elements.is_the_hole(isolate, i)) return true;
}
return false;
}
case HOLEY_DOUBLE_ELEMENTS: {
- int length = object->IsJSArray()
- ? Smi::ToInt(JSArray::cast(object)->length())
- : object->elements()->length();
+ int length = object.IsJSArray()
+ ? Smi::ToInt(JSArray::cast(object).length())
+ : object.elements().length();
// Zero-length arrays would use the empty FixedArray...
if (length == 0) return false;
// ...so only cast to FixedDoubleArray otherwise.
- FixedDoubleArray elements = FixedDoubleArray::cast(object->elements());
+ FixedDoubleArray elements = FixedDoubleArray::cast(object.elements());
for (int i = 0; i < length; i++) {
- if (!elements->is_the_hole(i)) return true;
+ if (!elements.is_the_hole(i)) return true;
}
return false;
}
@@ -4012,12 +3973,12 @@ bool JSObject::HasEnumerableElements() {
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
{
- int length = object->elements()->length();
+ size_t length = JSTypedArray::cast(object).length();
return length > 0;
}
case DICTIONARY_ELEMENTS: {
- NumberDictionary elements = NumberDictionary::cast(object->elements());
- return elements->NumberOfEnumerableProperties() > 0;
+ NumberDictionary elements = NumberDictionary::cast(object.elements());
+ return elements.NumberOfEnumerableProperties() > 0;
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -4025,10 +3986,10 @@ bool JSObject::HasEnumerableElements() {
return true;
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
- if (String::cast(JSValue::cast(object)->value())->length() > 0) {
+ if (String::cast(JSValue::cast(object).value()).length() > 0) {
return true;
}
- return object->elements()->length() > 0;
+ return object.elements().length() > 0;
case NO_ELEMENTS:
return false;
}
@@ -4066,7 +4027,7 @@ MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
// Ignore accessors on typed arrays.
- if (it->IsElement() && object->HasFixedTypedArrayElements()) {
+ if (it->IsElement() && object->HasTypedArrayElements()) {
return it->factory()->undefined_value();
}
@@ -4103,7 +4064,7 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
}
// Ignore accessors on typed arrays.
- if (it.IsElement() && object->HasFixedTypedArrayElements()) {
+ if (it.IsElement() && object->HasTypedArrayElements()) {
return it.factory()->undefined_value();
}
@@ -4122,59 +4083,59 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Object JSObject::SlowReverseLookup(Object value) {
if (HasFastProperties()) {
- int number_of_own_descriptors = map()->NumberOfOwnDescriptors();
- DescriptorArray descs = map()->instance_descriptors();
- bool value_is_number = value->IsNumber();
+ int number_of_own_descriptors = map().NumberOfOwnDescriptors();
+ DescriptorArray descs = map().instance_descriptors();
+ bool value_is_number = value.IsNumber();
for (int i = 0; i < number_of_own_descriptors; i++) {
- PropertyDetails details = descs->GetDetails(i);
+ PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
if (IsUnboxedDoubleField(field_index)) {
if (value_is_number) {
double property = RawFastDoublePropertyAt(field_index);
- if (property == value->Number()) {
- return descs->GetKey(i);
+ if (property == value.Number()) {
+ return descs.GetKey(i);
}
}
} else {
Object property = RawFastPropertyAt(field_index);
if (field_index.is_double()) {
- DCHECK(property->IsMutableHeapNumber());
- if (value_is_number && property->Number() == value->Number()) {
- return descs->GetKey(i);
+ DCHECK(property.IsMutableHeapNumber());
+ if (value_is_number && property.Number() == value.Number()) {
+ return descs.GetKey(i);
}
} else if (property == value) {
- return descs->GetKey(i);
+ return descs.GetKey(i);
}
}
} else {
DCHECK_EQ(kDescriptor, details.location());
if (details.kind() == kData) {
- if (descs->GetStrongValue(i) == value) {
- return descs->GetKey(i);
+ if (descs.GetStrongValue(i) == value) {
+ return descs.GetKey(i);
}
}
}
}
return GetReadOnlyRoots().undefined_value();
} else if (IsJSGlobalObject()) {
- return JSGlobalObject::cast(*this)->global_dictionary()->SlowReverseLookup(
+ return JSGlobalObject::cast(*this).global_dictionary().SlowReverseLookup(
value);
} else {
- return property_dictionary()->SlowReverseLookup(value);
+ return property_dictionary().SlowReverseLookup(value);
}
}
void JSObject::PrototypeRegistryCompactionCallback(HeapObject value,
int old_index,
int new_index) {
- DCHECK(value->IsMap() && Map::cast(value)->is_prototype_map());
+ DCHECK(value.IsMap() && Map::cast(value).is_prototype_map());
Map map = Map::cast(value);
- DCHECK(map->prototype_info()->IsPrototypeInfo());
- PrototypeInfo proto_info = PrototypeInfo::cast(map->prototype_info());
- DCHECK_EQ(old_index, proto_info->registry_slot());
- proto_info->set_registry_slot(new_index);
+ DCHECK(map.prototype_info().IsPrototypeInfo());
+ PrototypeInfo proto_info = PrototypeInfo::cast(map.prototype_info());
+ DCHECK_EQ(old_index, proto_info.registry_slot());
+ proto_info.set_registry_slot(new_index);
}
// static
@@ -4189,10 +4150,10 @@ void JSObject::MakePrototypesFast(Handle<Object> receiver,
if (!current->IsJSObject()) return;
Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
Map current_map = current_obj->map();
- if (current_map->is_prototype_map()) {
+ if (current_map.is_prototype_map()) {
// If the map is already marked as should be fast, we're done. Its
// prototypes will have been marked already as well.
- if (current_map->should_be_fast_prototype_map()) return;
+ if (current_map.should_be_fast_prototype_map()) return;
Handle<Map> map(current_map, isolate);
Map::SetShouldBeFastPrototypeMap(map, true, isolate);
JSObject::OptimizeAsPrototype(current_obj);
@@ -4205,8 +4166,8 @@ static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
if (!object->HasFastProperties()) return false;
if (object->IsJSGlobalProxy()) return false;
if (object->GetIsolate()->bootstrapper()->IsActive()) return false;
- return !object->map()->is_prototype_map() ||
- !object->map()->should_be_fast_prototype_map();
+ return !object->map().is_prototype_map() ||
+ !object->map().should_be_fast_prototype_map();
}
// static
@@ -4218,8 +4179,8 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
"NormalizeAsPrototype");
}
- if (object->map()->is_prototype_map()) {
- if (object->map()->should_be_fast_prototype_map() &&
+ if (object->map().is_prototype_map()) {
+ if (object->map().should_be_fast_prototype_map() &&
!object->HasFastProperties()) {
JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
}
@@ -4228,18 +4189,18 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
handle(object->map(), object->GetIsolate()),
"CopyAsPrototype");
JSObject::MigrateToMap(object, new_map);
- object->map()->set_is_prototype_map(true);
+ object->map().set_is_prototype_map(true);
// Replace the pointer to the exact constructor with the Object function
// from the same context if undetectable from JS. This is to avoid keeping
// memory alive unnecessarily.
- Object maybe_constructor = object->map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
+ Object maybe_constructor = object->map().GetConstructor();
+ if (maybe_constructor.IsJSFunction()) {
JSFunction constructor = JSFunction::cast(maybe_constructor);
- if (!constructor->shared()->IsApiFunction()) {
- Context context = constructor->context()->native_context();
- JSFunction object_function = context->object_function();
- object->map()->SetConstructor(object_function);
+ if (!constructor.shared().IsApiFunction()) {
+ Context context = constructor.context().native_context();
+ JSFunction object_function = context.object_function();
+ object->map().SetConstructor(object_function);
}
}
}
@@ -4247,8 +4208,8 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
// static
void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) {
- if (!object->map()->is_prototype_map()) return;
- if (!object->map()->should_be_fast_prototype_map()) return;
+ if (!object->map().is_prototype_map()) return;
+ if (!object->map().should_be_fast_prototype_map()) return;
OptimizeAsPrototype(object);
}
@@ -4290,7 +4251,7 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
PrintF("Registering %p as a user of prototype %p (map=%p).\n",
reinterpret_cast<void*>(current_user->ptr()),
reinterpret_cast<void*>(proto->ptr()),
- reinterpret_cast<void*>(proto->map()->ptr()));
+ reinterpret_cast<void*>(proto->map().ptr()));
}
current_user = handle(proto->map(), isolate);
@@ -4304,23 +4265,23 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
DCHECK(user->is_prototype_map());
// If it doesn't have a PrototypeInfo, it was never registered.
- if (!user->prototype_info()->IsPrototypeInfo()) return false;
+ if (!user->prototype_info().IsPrototypeInfo()) return false;
// If it had no prototype before, see if it had users that might expect
// registration.
- if (!user->prototype()->IsJSObject()) {
+ if (!user->prototype().IsJSObject()) {
Object users =
- PrototypeInfo::cast(user->prototype_info())->prototype_users();
- return users->IsWeakArrayList();
+ PrototypeInfo::cast(user->prototype_info()).prototype_users();
+ return users.IsWeakArrayList();
}
Handle<JSObject> prototype(JSObject::cast(user->prototype()), isolate);
Handle<PrototypeInfo> user_info =
Map::GetOrCreatePrototypeInfo(user, isolate);
int slot = user_info->registry_slot();
if (slot == PrototypeInfo::UNREGISTERED) return false;
- DCHECK(prototype->map()->is_prototype_map());
- Object maybe_proto_info = prototype->map()->prototype_info();
+ DCHECK(prototype->map().is_prototype_map());
+ Object maybe_proto_info = prototype->map().prototype_info();
// User knows its registry slot, prototype info and user registry must exist.
- DCHECK(maybe_proto_info->IsPrototypeInfo());
+ DCHECK(maybe_proto_info.IsPrototypeInfo());
Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(maybe_proto_info),
isolate);
Handle<WeakArrayList> prototype_users(
@@ -4341,36 +4302,35 @@ namespace {
// AccessorAssembler::InvalidateValidityCellIfPrototype() which does pre-checks
// before jumping here.
void InvalidateOnePrototypeValidityCellInternal(Map map) {
- DCHECK(map->is_prototype_map());
+ DCHECK(map.is_prototype_map());
if (FLAG_trace_prototype_users) {
PrintF("Invalidating prototype map %p 's cell\n",
reinterpret_cast<void*>(map.ptr()));
}
- Object maybe_cell = map->prototype_validity_cell();
- if (maybe_cell->IsCell()) {
+ Object maybe_cell = map.prototype_validity_cell();
+ if (maybe_cell.IsCell()) {
// Just set the value; the cell will be replaced lazily.
Cell cell = Cell::cast(maybe_cell);
- cell->set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
+ cell.set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
}
}
void InvalidatePrototypeChainsInternal(Map map) {
InvalidateOnePrototypeValidityCellInternal(map);
- Object maybe_proto_info = map->prototype_info();
- if (!maybe_proto_info->IsPrototypeInfo()) return;
+ Object maybe_proto_info = map.prototype_info();
+ if (!maybe_proto_info.IsPrototypeInfo()) return;
PrototypeInfo proto_info = PrototypeInfo::cast(maybe_proto_info);
- if (!proto_info->prototype_users()->IsWeakArrayList()) {
+ if (!proto_info.prototype_users().IsWeakArrayList()) {
return;
}
WeakArrayList prototype_users =
- WeakArrayList::cast(proto_info->prototype_users());
+ WeakArrayList::cast(proto_info.prototype_users());
// For now, only maps register themselves as users.
- for (int i = PrototypeUsers::kFirstIndex; i < prototype_users->length();
- ++i) {
+ for (int i = PrototypeUsers::kFirstIndex; i < prototype_users.length(); ++i) {
HeapObject heap_object;
- if (prototype_users->Get(i)->GetHeapObjectIfWeak(&heap_object) &&
- heap_object->IsMap()) {
+ if (prototype_users.Get(i)->GetHeapObjectIfWeak(&heap_object) &&
+ heap_object.IsMap()) {
// Walk the prototype chain (backwards, towards leaf objects) if
// necessary.
InvalidatePrototypeChainsInternal(Map::cast(heap_object));
@@ -4397,7 +4357,7 @@ Map JSObject::InvalidatePrototypeChains(Map map) {
// static
void JSObject::InvalidatePrototypeValidityCell(JSGlobalObject global) {
DisallowHeapAllocation no_gc;
- InvalidateOnePrototypeValidityCellInternal(global->map());
+ InvalidateOnePrototypeValidityCellInternal(global.map());
}
Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
@@ -4425,7 +4385,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
// SpiderMonkey behaves this way.
if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true);
- bool all_extensible = object->map()->is_extensible();
+ bool all_extensible = object->map().is_extensible();
Handle<JSObject> real_receiver = object;
if (from_javascript) {
// Find the first object in the chain whose prototype object is not
@@ -4437,7 +4397,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
// JSProxies.
real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
iter.Advance();
- all_extensible = all_extensible && real_receiver->map()->is_extensible();
+ all_extensible = all_extensible && real_receiver->map().is_extensible();
}
}
Handle<Map> map(real_receiver->map(), isolate);
@@ -4524,14 +4484,14 @@ ElementsAccessor* JSObject::GetElementsAccessor() {
void JSObject::ValidateElements(JSObject object) {
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
- object->GetElementsAccessor()->Validate(object);
+ object.GetElementsAccessor()->Validate(object);
}
#endif
}
bool JSObject::WouldConvertToSlowElements(uint32_t index) {
if (!HasFastElements()) return false;
- uint32_t capacity = static_cast<uint32_t>(elements()->length());
+ uint32_t capacity = static_cast<uint32_t>(elements().length());
uint32_t new_capacity;
return ShouldConvertToSlowElements(*this, capacity, index, &new_capacity);
}
@@ -4542,23 +4502,23 @@ static bool ShouldConvertToFastElements(JSObject object,
uint32_t* new_capacity) {
// If properties with non-standard attributes or accessors were added, we
// cannot go back to fast elements.
- if (dictionary->requires_slow_elements()) return false;
+ if (dictionary.requires_slow_elements()) return false;
// Adding a property with this index will require slow elements.
if (index >= static_cast<uint32_t>(Smi::kMaxValue)) return false;
- if (object->IsJSArray()) {
- Object length = JSArray::cast(object)->length();
- if (!length->IsSmi()) return false;
+ if (object.IsJSArray()) {
+ Object length = JSArray::cast(object).length();
+ if (!length.IsSmi()) return false;
*new_capacity = static_cast<uint32_t>(Smi::ToInt(length));
- } else if (object->IsJSSloppyArgumentsObject()) {
+ } else if (object.IsJSSloppyArgumentsObject()) {
return false;
} else {
- *new_capacity = dictionary->max_number_key() + 1;
+ *new_capacity = dictionary.max_number_key() + 1;
}
*new_capacity = Max(index + 1, *new_capacity);
- uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
+ uint32_t dictionary_size = static_cast<uint32_t>(dictionary.Capacity()) *
NumberDictionary::kEntrySize;
// Turn fast if the dictionary only saves 50% space.
@@ -4566,24 +4526,24 @@ static bool ShouldConvertToFastElements(JSObject object,
}
static ElementsKind BestFittingFastElementsKind(JSObject object) {
- if (!object->map()->CanHaveFastTransitionableElementsKind()) {
+ if (!object.map().CanHaveFastTransitionableElementsKind()) {
return HOLEY_ELEMENTS;
}
- if (object->HasSloppyArgumentsElements()) {
+ if (object.HasSloppyArgumentsElements()) {
return FAST_SLOPPY_ARGUMENTS_ELEMENTS;
}
- if (object->HasStringWrapperElements()) {
+ if (object.HasStringWrapperElements()) {
return FAST_STRING_WRAPPER_ELEMENTS;
}
- DCHECK(object->HasDictionaryElements());
- NumberDictionary dictionary = object->element_dictionary();
+ DCHECK(object.HasDictionaryElements());
+ NumberDictionary dictionary = object.element_dictionary();
ElementsKind kind = HOLEY_SMI_ELEMENTS;
- for (int i = 0; i < dictionary->Capacity(); i++) {
- Object key = dictionary->KeyAt(i);
- if (key->IsNumber()) {
- Object value = dictionary->ValueAt(i);
- if (!value->IsNumber()) return HOLEY_ELEMENTS;
- if (!value->IsSmi()) {
+ for (int i = 0; i < dictionary.Capacity(); i++) {
+ Object key = dictionary.KeyAt(i);
+ if (key.IsNumber()) {
+ Object value = dictionary.ValueAt(i);
+ if (!value.IsNumber()) return HOLEY_ELEMENTS;
+ if (!value.IsSmi()) {
if (!FLAG_unbox_double_arrays) return HOLEY_ELEMENTS;
kind = HOLEY_DOUBLE_ELEMENTS;
}
@@ -4596,7 +4556,7 @@ static ElementsKind BestFittingFastElementsKind(JSObject object) {
void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
Handle<Object> value,
PropertyAttributes attributes) {
- DCHECK(object->map()->is_extensible());
+ DCHECK(object->map().is_extensible());
Isolate* isolate = object->GetIsolate();
@@ -4604,14 +4564,14 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
uint32_t new_capacity = 0;
if (object->IsJSArray()) {
- CHECK(JSArray::cast(*object)->length()->ToArrayLength(&old_length));
+ CHECK(JSArray::cast(*object).length().ToArrayLength(&old_length));
}
ElementsKind kind = object->GetElementsKind();
FixedArrayBase elements = object->elements();
ElementsKind dictionary_kind = DICTIONARY_ELEMENTS;
if (IsSloppyArgumentsElementsKind(kind)) {
- elements = SloppyArgumentsElements::cast(elements)->arguments();
+ elements = SloppyArgumentsElements::cast(elements).arguments();
dictionary_kind = SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
} else if (IsStringWrapperElementsKind(kind)) {
dictionary_kind = SLOW_STRING_WRAPPER_ELEMENTS;
@@ -4619,13 +4579,13 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
if (attributes != NONE) {
kind = dictionary_kind;
- } else if (elements->IsNumberDictionary()) {
+ } else if (elements.IsNumberDictionary()) {
kind = ShouldConvertToFastElements(
*object, NumberDictionary::cast(elements), index, &new_capacity)
? BestFittingFastElementsKind(*object)
: dictionary_kind;
} else if (ShouldConvertToSlowElements(
- *object, static_cast<uint32_t>(elements->length()), index,
+ *object, static_cast<uint32_t>(elements.length()), index,
&new_capacity)) {
kind = dictionary_kind;
}
@@ -4642,7 +4602,7 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
if (object->IsJSArray() && index >= old_length) {
Handle<Object> new_length =
isolate->factory()->NewNumberFromUint(index + 1);
- JSArray::cast(*object)->set_length(*new_length);
+ JSArray::cast(*object).set_length(*new_length);
}
}
@@ -4665,7 +4625,7 @@ bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
if (memento.is_null()) return false;
// Walk through to the Allocation Site
- site = handle(memento->GetAllocationSite(), heap->isolate());
+ site = handle(memento.GetAllocationSite(), heap->isolate());
}
return AllocationSite::DigestTransitionFeedback<update_or_check>(site,
to_kind);
@@ -4707,19 +4667,19 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
} else {
DCHECK((IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
(IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
- uint32_t c = static_cast<uint32_t>(object->elements()->length());
+ uint32_t c = static_cast<uint32_t>(object->elements().length());
ElementsAccessor::ForKind(to_kind)->GrowCapacityAndConvert(object, c);
}
}
template <typename BackingStore>
static int HoleyElementsUsage(JSObject object, BackingStore store) {
- Isolate* isolate = object->GetIsolate();
- int limit = object->IsJSArray() ? Smi::ToInt(JSArray::cast(object)->length())
- : store->length();
+ Isolate* isolate = object.GetIsolate();
+ int limit = object.IsJSArray() ? Smi::ToInt(JSArray::cast(object).length())
+ : store.length();
int used = 0;
for (int i = 0; i < limit; ++i) {
- if (!store->is_the_hole(isolate, i)) ++used;
+ if (!store.is_the_hole(isolate, i)) ++used;
}
return used;
}
@@ -4732,17 +4692,19 @@ int JSObject::GetFastElementsUsage() {
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
- return IsJSArray() ? Smi::ToInt(JSArray::cast(*this)->length())
- : store->length();
+ return IsJSArray() ? Smi::ToInt(JSArray::cast(*this).length())
+ : store.length();
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- store = SloppyArgumentsElements::cast(store)->arguments();
+ store = SloppyArgumentsElements::cast(store).arguments();
V8_FALLTHROUGH;
case HOLEY_SMI_ELEMENTS:
case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
return HoleyElementsUsage(*this, FixedArray::cast(store));
case HOLEY_DOUBLE_ELEMENTS:
- if (elements()->length() == 0) return 0;
+ if (elements().length() == 0) return 0;
return HoleyElementsUsage(*this, FixedDoubleArray::cast(store));
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -4792,7 +4754,7 @@ bool JSObject::IsApiWrapper() {
// These object types can carry information relevant for embedders. The
// *_API_* types are generated through templates which can have embedder
// fields. The other types have their embedder fields added at compile time.
- auto instance_type = map()->instance_type();
+ auto instance_type = map().instance_type();
return instance_type == JS_API_OBJECT_TYPE ||
instance_type == JS_ARRAY_BUFFER_TYPE ||
instance_type == JS_DATA_VIEW_TYPE ||
@@ -4801,7 +4763,7 @@ bool JSObject::IsApiWrapper() {
}
bool JSObject::IsDroppableApiWrapper() {
- auto instance_type = map()->instance_type();
+ auto instance_type = map().instance_type();
return instance_type == JS_API_OBJECT_TYPE ||
instance_type == JS_SPECIAL_API_OBJECT_TYPE;
}
@@ -4809,7 +4771,7 @@ bool JSObject::IsDroppableApiWrapper() {
// static
MaybeHandle<NativeContext> JSBoundFunction::GetFunctionRealm(
Handle<JSBoundFunction> function) {
- DCHECK(function->map()->is_constructor());
+ DCHECK(function->map().is_constructor());
return JSReceiver::GetFunctionRealm(
handle(function->bound_target_function(), function->GetIsolate()));
}
@@ -4821,14 +4783,14 @@ MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
Handle<String> target_name = prefix;
Factory* factory = isolate->factory();
// Concatenate the "bound " up to the last non-bound target.
- while (function->bound_target_function()->IsJSBoundFunction()) {
+ while (function->bound_target_function().IsJSBoundFunction()) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, target_name,
factory->NewConsString(prefix, target_name),
String);
function = handle(JSBoundFunction::cast(function->bound_target_function()),
isolate);
}
- if (function->bound_target_function()->IsJSFunction()) {
+ if (function->bound_target_function().IsJSFunction()) {
Handle<JSFunction> target(
JSFunction::cast(function->bound_target_function()), isolate);
Handle<Object> name = JSFunction::GetName(isolate, target);
@@ -4842,14 +4804,14 @@ MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
// static
Maybe<int> JSBoundFunction::GetLength(Isolate* isolate,
Handle<JSBoundFunction> function) {
- int nof_bound_arguments = function->bound_arguments()->length();
- while (function->bound_target_function()->IsJSBoundFunction()) {
+ int nof_bound_arguments = function->bound_arguments().length();
+ while (function->bound_target_function().IsJSBoundFunction()) {
function = handle(JSBoundFunction::cast(function->bound_target_function()),
isolate);
// Make sure we never overflow {nof_bound_arguments}, the number of
// arguments of a function is strictly limited by the max length of an
// JSAarray, Smi::kMaxValue is thus a reasonably good overestimate.
- int length = function->bound_arguments()->length();
+ int length = function->bound_arguments().length();
if (V8_LIKELY(Smi::kMaxValue - nof_bound_arguments > length)) {
nof_bound_arguments += length;
} else {
@@ -4875,17 +4837,17 @@ Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
// static
Handle<Object> JSFunction::GetName(Isolate* isolate,
Handle<JSFunction> function) {
- if (function->shared()->name_should_print_as_anonymous()) {
+ if (function->shared().name_should_print_as_anonymous()) {
return isolate->factory()->anonymous_string();
}
- return handle(function->shared()->Name(), isolate);
+ return handle(function->shared().Name(), isolate);
}
// static
Handle<NativeContext> JSFunction::GetFunctionRealm(
Handle<JSFunction> function) {
- DCHECK(function->map()->is_constructor());
- return handle(function->context()->native_context(), function->GetIsolate());
+ DCHECK(function->map().is_constructor());
+ return handle(function->context().native_context(), function->GetIsolate());
}
void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
@@ -4896,11 +4858,11 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
}
DCHECK(!is_compiled() || IsInterpreted());
- DCHECK(shared()->IsInterpreted());
+ DCHECK(shared().IsInterpreted());
DCHECK(!IsOptimized());
DCHECK(!HasOptimizedCode());
- DCHECK(shared()->allows_lazy_compilation() ||
- !shared()->optimization_disabled());
+ DCHECK(shared().allows_lazy_compilation() ||
+ !shared().optimization_disabled());
if (mode == ConcurrencyMode::kConcurrent) {
if (IsInOptimizationQueue()) {
@@ -4926,16 +4888,16 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
// static
void JSFunction::EnsureClosureFeedbackCellArray(Handle<JSFunction> function) {
Isolate* const isolate = function->GetIsolate();
- DCHECK(function->shared()->is_compiled());
- DCHECK(function->shared()->HasFeedbackMetadata());
+ DCHECK(function->shared().is_compiled());
+ DCHECK(function->shared().HasFeedbackMetadata());
if (function->has_closure_feedback_cell_array() ||
function->has_feedback_vector()) {
return;
}
- if (function->shared()->HasAsmWasmData()) return;
+ if (function->shared().HasAsmWasmData()) return;
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- DCHECK(function->shared()->HasBytecodeArray());
+ DCHECK(function->shared().HasBytecodeArray());
Handle<HeapObject> feedback_cell_array =
ClosureFeedbackCellArray::New(isolate, shared);
// Many closure cell is used as a way to specify that there is no
@@ -4949,20 +4911,20 @@ void JSFunction::EnsureClosureFeedbackCellArray(Handle<JSFunction> function) {
isolate->factory()->NewOneClosureCell(feedback_cell_array);
function->set_raw_feedback_cell(*feedback_cell);
} else {
- function->raw_feedback_cell()->set_value(*feedback_cell_array);
+ function->raw_feedback_cell().set_value(*feedback_cell_array);
}
}
// static
void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
Isolate* const isolate = function->GetIsolate();
- DCHECK(function->shared()->is_compiled());
- DCHECK(function->shared()->HasFeedbackMetadata());
+ DCHECK(function->shared().is_compiled());
+ DCHECK(function->shared().HasFeedbackMetadata());
if (function->has_feedback_vector()) return;
- if (function->shared()->HasAsmWasmData()) return;
+ if (function->shared().HasAsmWasmData()) return;
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- DCHECK(function->shared()->HasBytecodeArray());
+ DCHECK(function->shared().HasBytecodeArray());
EnsureClosureFeedbackCellArray(function);
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
@@ -4974,15 +4936,24 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
// for more details.
DCHECK(function->raw_feedback_cell() !=
isolate->heap()->many_closures_cell());
- function->raw_feedback_cell()->set_value(*feedback_vector);
+ function->raw_feedback_cell().set_value(*feedback_vector);
}
// static
void JSFunction::InitializeFeedbackCell(Handle<JSFunction> function) {
- if (FLAG_lazy_feedback_allocation) {
- EnsureClosureFeedbackCellArray(function);
- } else {
+ Isolate* const isolate = function->GetIsolate();
+ bool needs_feedback_vector = !FLAG_lazy_feedback_allocation;
+ // We need feedback vector for certain log events, collecting type profile
+ // and more precise code coverage.
+ if (FLAG_log_function_events) needs_feedback_vector = true;
+ if (!isolate->is_best_effort_code_coverage()) needs_feedback_vector = true;
+ if (isolate->is_collecting_type_profile()) needs_feedback_vector = true;
+ if (FLAG_always_opt) needs_feedback_vector = true;
+
+ if (needs_feedback_vector) {
EnsureFeedbackVector(function);
+ } else {
+ EnsureClosureFeedbackCellArray(function);
}
}
@@ -5014,7 +4985,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
// If the function is used as the global Array function, cache the
// updated initial maps (and transitioned versions) in the native context.
- Handle<Context> native_context(function->context()->native_context(),
+ Handle<Context> native_context(function->context().native_context(),
isolate);
Handle<Object> array_function(
native_context->get(Context::ARRAY_FUNCTION_INDEX), isolate);
@@ -5025,7 +4996,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
}
// Deoptimize all code that embeds the previous initial map.
- initial_map->dependent_code()->DeoptimizeDependentCodeGroup(
+ initial_map->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kInitialMapChangedGroup);
} else {
// Put the value in the initial map field until an initial map is
@@ -5044,7 +5015,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
void JSFunction::SetPrototype(Handle<JSFunction> function,
Handle<Object> value) {
DCHECK(function->IsConstructor() ||
- IsGeneratorFunction(function->shared()->kind()));
+ IsGeneratorFunction(function->shared().kind()));
Isolate* isolate = function->GetIsolate();
Handle<JSReceiver> construct_prototype;
@@ -5063,8 +5034,8 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
new_map->SetConstructor(*value);
new_map->set_has_non_instance_prototype(true);
- FunctionKind kind = function->shared()->kind();
- Handle<Context> native_context(function->context()->native_context(),
+ FunctionKind kind = function->shared().kind();
+ Handle<Context> native_context(function->context().native_context(),
isolate);
construct_prototype = Handle<JSReceiver>(
@@ -5076,7 +5047,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
isolate);
} else {
construct_prototype = Handle<JSReceiver>::cast(value);
- function->map()->set_has_non_instance_prototype(false);
+ function->map().set_has_non_instance_prototype(false);
}
SetInstancePrototype(isolate, function, construct_prototype);
@@ -5090,22 +5061,22 @@ void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
map->SetConstructor(*function);
if (FLAG_trace_maps) {
LOG(function->GetIsolate(), MapEvent("InitialMap", Map(), *map, "",
- function->shared()->DebugName()));
+ function->shared().DebugName()));
}
}
void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
DCHECK(function->has_prototype_slot());
DCHECK(function->IsConstructor() ||
- IsResumableFunction(function->shared()->kind()));
+ IsResumableFunction(function->shared().kind()));
if (function->has_initial_map()) return;
Isolate* isolate = function->GetIsolate();
// First create a new map with the size and number of in-object properties
// suggested by the function.
InstanceType instance_type;
- if (IsResumableFunction(function->shared()->kind())) {
- instance_type = IsAsyncGeneratorFunction(function->shared()->kind())
+ if (IsResumableFunction(function->shared().kind())) {
+ instance_type = IsAsyncGeneratorFunction(function->shared().kind())
? JS_ASYNC_GENERATOR_OBJECT_TYPE
: JS_GENERATOR_OBJECT_TYPE;
} else {
@@ -5249,8 +5220,8 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
// Check that |function|'s initial map still in sync with the |constructor|,
// otherwise we must create a new initial map for |function|.
if (new_target->has_initial_map() &&
- new_target->initial_map()->GetConstructor() == *constructor) {
- DCHECK(new_target->instance_prototype()->IsJSReceiver());
+ new_target->initial_map().GetConstructor() == *constructor) {
+ DCHECK(new_target->instance_prototype().IsJSReceiver());
return true;
}
InstanceType instance_type = constructor_initial_map->instance_type();
@@ -5260,7 +5231,7 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
// Link initial map and constructor function if the new.target is actually a
// subclass constructor.
- if (!IsDerivedConstructor(new_target->shared()->kind())) return false;
+ if (!IsDerivedConstructor(new_target->shared().kind())) return false;
int instance_size;
int in_object_properties;
@@ -5282,7 +5253,7 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
map->set_new_target_is_base(false);
Handle<HeapObject> prototype(new_target->instance_prototype(), isolate);
JSFunction::SetInitialMap(new_target, map, prototype);
- DCHECK(new_target->instance_prototype()->IsJSReceiver());
+ DCHECK(new_target->instance_prototype().IsJSReceiver());
map->SetConstructor(*constructor);
map->set_construction_counter(Map::kNoSlackTracking);
map->StartInobjectSlackTracking();
@@ -5366,15 +5337,15 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) {
CHECK(has_initial_map());
- if (initial_map()->IsInobjectSlackTrackingInProgress()) {
- int slack = initial_map()->ComputeMinObjectSlack(isolate);
- return initial_map()->InstanceSizeFromSlack(slack);
+ if (initial_map().IsInobjectSlackTrackingInProgress()) {
+ int slack = initial_map().ComputeMinObjectSlack(isolate);
+ return initial_map().InstanceSizeFromSlack(slack);
}
- return initial_map()->instance_size();
+ return initial_map().instance_size();
}
void JSFunction::PrintName(FILE* out) {
- std::unique_ptr<char[]> name = shared()->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared().DebugName().ToCString();
PrintF(out, "%s", name.get());
}
@@ -5383,7 +5354,7 @@ Handle<String> JSFunction::GetName(Handle<JSFunction> function) {
Handle<Object> name =
JSReceiver::GetDataProperty(function, isolate->factory()->name_string());
if (name->IsString()) return Handle<String>::cast(name);
- return handle(function->shared()->DebugName(), isolate);
+ return handle(function->shared().DebugName(), isolate);
}
Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
@@ -5447,10 +5418,10 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
if (maybe_class_positions->IsClassPositions()) {
ClassPositions class_positions =
ClassPositions::cast(*maybe_class_positions);
- int start_position = class_positions->start();
- int end_position = class_positions->end();
+ int start_position = class_positions.start();
+ int end_position = class_positions.end();
Handle<String> script_source(
- String::cast(Script::cast(shared_info->script())->source()), isolate);
+ String::cast(Script::cast(shared_info->script()).source()), isolate);
return isolate->factory()->NewSubString(script_source, start_position,
end_position);
}
@@ -5554,7 +5525,7 @@ void JSFunction::ClearTypeFeedbackInfo() {
if (has_feedback_vector()) {
FeedbackVector vector = feedback_vector();
Isolate* isolate = GetIsolate();
- if (vector->ClearSlots(isolate)) {
+ if (vector.ClearSlots(isolate)) {
IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(), *this,
"ClearTypeFeedbackInfo");
}
@@ -5588,7 +5559,7 @@ Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
PropertyCellType original_cell_type = cell->property_details().cell_type();
DCHECK(original_cell_type == PropertyCellType::kInvalidated ||
original_cell_type == PropertyCellType::kUninitialized);
- DCHECK(cell->value()->IsTheHole(isolate));
+ DCHECK(cell->value().IsTheHole(isolate));
if (original_cell_type == PropertyCellType::kInvalidated) {
cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
}
@@ -5640,8 +5611,8 @@ Address JSDate::GetField(Address raw_object, Address smi_index) {
Object object(raw_object);
Smi index(smi_index);
return JSDate::cast(object)
- ->DoGetField(static_cast<FieldIndex>(index->value()))
- ->ptr();
+ .DoGetField(static_cast<FieldIndex>(index.value()))
+ .ptr();
}
Object JSDate::DoGetField(FieldIndex index) {
@@ -5651,10 +5622,10 @@ Object JSDate::DoGetField(FieldIndex index) {
if (index < kFirstUncachedField) {
Object stamp = cache_stamp();
- if (stamp != date_cache->stamp() && stamp->IsSmi()) {
+ if (stamp != date_cache->stamp() && stamp.IsSmi()) {
// Since the stamp is not NaN, the value is also not NaN.
int64_t local_time_ms =
- date_cache->ToLocal(static_cast<int64_t>(value()->Number()));
+ date_cache->ToLocal(static_cast<int64_t>(value().Number()));
SetCachedFields(local_time_ms, date_cache);
}
switch (index) {
@@ -5678,10 +5649,10 @@ Object JSDate::DoGetField(FieldIndex index) {
}
if (index >= kFirstUTCField) {
- return GetUTCField(index, value()->Number(), date_cache);
+ return GetUTCField(index, value().Number(), date_cache);
}
- double time = value()->Number();
+ double time = value().Number();
if (std::isnan(time)) return GetReadOnlyRoots().nan_value();
int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(time));
@@ -5787,7 +5758,27 @@ void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
}
+// static
+void JSMessageObject::EnsureSourcePositionsAvailable(
+ Isolate* isolate, Handle<JSMessageObject> message) {
+ if (!message->DidEnsureSourcePositionsAvailable()) {
+ DCHECK_EQ(message->start_position(), -1);
+ DCHECK_GE(message->bytecode_offset().value(), 0);
+ Handle<SharedFunctionInfo> shared_info(
+ SharedFunctionInfo::cast(message->shared_info()), isolate);
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
+ DCHECK(shared_info->HasBytecodeArray());
+ int position = shared_info->abstract_code().SourcePosition(
+ message->bytecode_offset().value());
+ DCHECK_GE(position, 0);
+ message->set_start_position(position);
+ message->set_end_position(position + 1);
+ message->set_shared_info(ReadOnlyRoots(isolate).undefined_value());
+ }
+}
+
int JSMessageObject::GetLineNumber() const {
+ DCHECK(DidEnsureSourcePositionsAvailable());
if (start_position() == -1) return Message::kNoLineNumberInfo;
Handle<Script> the_script(script(), GetIsolate());
@@ -5803,6 +5794,7 @@ int JSMessageObject::GetLineNumber() const {
}
int JSMessageObject::GetColumnNumber() const {
+ DCHECK(DidEnsureSourcePositionsAvailable());
if (start_position() == -1) return -1;
Handle<Script> the_script(script(), GetIsolate());
@@ -5827,6 +5819,7 @@ Handle<String> JSMessageObject::GetSourceLine() const {
Script::PositionInfo info;
const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
+ DCHECK(DidEnsureSourcePositionsAvailable());
if (!Script::GetPositionInfo(the_script, start_position(), &info,
offset_flag)) {
return isolate->factory()->empty_string();
@@ -5836,10 +5829,5 @@ Handle<String> JSMessageObject::GetSourceLine() const {
return isolate->factory()->NewSubString(src, info.line_start, info.line_end);
}
-// Explicit instantiation definitions.
-template void JSObject::ApplyAttributesToDictionary(
- Isolate* isolate, ReadOnlyRoots roots, Handle<NumberDictionary> dictionary,
- const PropertyAttributes attributes);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index c67f70c207..5ac1751c48 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_JS_OBJECTS_H_
#define V8_OBJECTS_JS_OBJECTS_H_
-#include "src/objects.h"
#include "src/objects/embedder-data-slot.h"
+#include "src/objects/objects.h"
#include "src/objects/property-array.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -259,19 +259,12 @@ class JSReceiver : public HeapObject {
Handle<JSReceiver> object, PropertyFilter filter,
bool try_fast_path = true);
- V8_WARN_UNUSED_RESULT static Handle<FixedArray> GetOwnElementIndices(
- Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSObject> object);
-
static const int kHashMask = PropertyArray::HashField::kMask;
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_JSRECEIVER_FIELDS)
- static const int kHeaderSize = kSize;
-
bool HasProxyInPrototype(Isolate* isolate);
- bool HasComplexElements();
-
V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetPrivateEntries(
Isolate* isolate, Handle<JSReceiver> receiver);
@@ -345,8 +338,9 @@ class JSObject : public JSReceiver {
// Returns true if an object has elements of PACKED_ELEMENTS
inline bool HasPackedElements();
inline bool HasFrozenOrSealedElements();
+ inline bool HasSealedElements();
- inline bool HasFixedTypedArrayElements();
+ inline bool HasTypedArrayElements();
inline bool HasFixedUint8ClampedElements();
inline bool HasFixedArrayElements();
@@ -641,7 +635,12 @@ class JSObject : public JSReceiver {
inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index);
inline void FastPropertyAtPut(FieldIndex index, Object value);
- inline void RawFastPropertyAtPut(FieldIndex index, Object value);
+ inline void RawFastPropertyAtPut(
+ FieldIndex index, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void RawFastInobjectPropertyAtPut(
+ FieldIndex index, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void RawFastDoublePropertyAsBitsAtPut(FieldIndex index, uint64_t bits);
inline void WriteToField(int descriptor, PropertyDetails details,
Object value);
@@ -879,15 +878,8 @@ class JSIteratorResult : public JSObject {
DECL_ACCESSORS(done, Object)
// Layout description.
-#define JS_ITERATOR_RESULT_FIELDS(V) \
- V(kValueOffset, kTaggedSize) \
- V(kDoneOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_ITERATOR_RESULT_FIELDS)
-#undef JS_ITERATOR_RESULT_FIELDS
+ TORQUE_GENERATED_JSITERATOR_RESULT_FIELDS)
// Indices of in-object properties.
static const int kValueIndex = 0;
@@ -1209,15 +1201,8 @@ class JSGlobalObject : public JSObject {
DECL_VERIFIER(JSGlobalObject)
// Layout description.
-#define JS_GLOBAL_OBJECT_FIELDS(V) \
- V(kNativeContextOffset, kTaggedSize) \
- V(kGlobalProxyOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_GLOBAL_OBJECT_FIELDS)
-#undef JS_GLOBAL_OBJECT_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSGLOBAL_OBJECT_FIELDS)
OBJECT_CONSTRUCTORS(JSGlobalObject, JSObject);
};
@@ -1356,23 +1341,30 @@ class JSMessageObject : public JSObject {
// [stack_frames]: an array of stack frames for this error object.
DECL_ACCESSORS(stack_frames, Object)
- // [start_position]: the start position in the script for the error message.
- inline int start_position() const;
- inline void set_start_position(int value);
+ // Initializes the source positions in the object if possible. Does nothing if
+ // called more than once. If called when stack space is exhausted, then the
+ // source positions will be not be set and calling it again when there is more
+ // stack space will not have any effect.
+ static void EnsureSourcePositionsAvailable(Isolate* isolate,
+ Handle<JSMessageObject> message);
- // [end_position]: the end position in the script for the error message.
- inline int end_position() const;
- inline void set_end_position(int value);
+ // Gets the start and end positions for the message.
+ // EnsureSourcePositionsAvailable must have been called before calling these.
+ inline int GetStartPosition() const;
+ inline int GetEndPosition() const;
// Returns the line number for the error message (1-based), or
// Message::kNoLineNumberInfo if the line cannot be determined.
+ // EnsureSourcePositionsAvailable must have been called before calling this.
V8_EXPORT_PRIVATE int GetLineNumber() const;
// Returns the offset of the given position within the containing line.
+ // EnsureSourcePositionsAvailable must have been called before calling this.
V8_EXPORT_PRIVATE int GetColumnNumber() const;
// Returns the source code line containing the given source
// position, or the empty string if the position is invalid.
+ // EnsureSourcePositionsAvailable must have been called before calling this.
Handle<String> GetSourceLine() const;
inline int error_level() const;
@@ -1393,6 +1385,27 @@ class JSMessageObject : public JSObject {
kPointerFieldsEndOffset, kSize>;
OBJECT_CONSTRUCTORS(JSMessageObject, JSObject);
+
+ private:
+ friend class Factory;
+
+ inline bool DidEnsureSourcePositionsAvailable() const;
+
+ // [shared]: optional SharedFunctionInfo that can be used to reconstruct the
+ // source position if not available when the message was generated.
+ DECL_ACCESSORS(shared_info, HeapObject)
+
+ // [bytecode_offset]: optional offset using along with |shared| to generation
+ // source positions.
+ DECL_ACCESSORS(bytecode_offset, Smi)
+
+ // [start_position]: the start position in the script for the error message.
+ inline int start_position() const;
+ inline void set_start_position(int value);
+
+ // [end_position]: the end position in the script for the error message.
+ inline int end_position() const;
+ inline void set_end_position(int value);
};
// The [Async-from-Sync Iterator] object
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index f7248431e8..1924bdc4ff 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -9,9 +9,9 @@
#ifndef V8_OBJECTS_JS_PLURAL_RULES_INL_H_
#define V8_OBJECTS_JS_PLURAL_RULES_INL_H_
-#include "src/api-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
#include "src/objects/js-plural-rules.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -24,9 +24,9 @@ OBJECT_CONSTRUCTORS_IMPL(JSPluralRules, JSObject)
ACCESSORS(JSPluralRules, locale, String, kLocaleOffset)
SMI_ACCESSORS(JSPluralRules, flags, kFlagsOffset)
ACCESSORS(JSPluralRules, icu_plural_rules, Managed<icu::PluralRules>,
- kICUPluralRulesOffset)
+ kIcuPluralRulesOffset)
ACCESSORS(JSPluralRules, icu_decimal_format, Managed<icu::DecimalFormat>,
- kICUDecimalFormatOffset)
+ kIcuDecimalFormatOffset)
inline void JSPluralRules::set_type(Type type) {
DCHECK_LT(type, Type::COUNT);
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index da349dcd81..8daf5db64a 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -8,7 +8,7 @@
#include "src/objects/js-plural-rules.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-plural-rules-inl.h"
#include "unicode/decimfmt.h"
@@ -164,9 +164,24 @@ MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
CHECK_NOT_NULL(icu_decimal_format.get());
// 9. Perform ? SetNumberFormatDigitOptions(pluralRules, options, 0, 3).
- Maybe<bool> done = Intl::SetNumberFormatDigitOptions(
- isolate, icu_decimal_format.get(), options, 0, 3);
- MAYBE_RETURN(done, MaybeHandle<JSPluralRules>());
+ Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
+ Intl::SetNumberFormatDigitOptions(isolate, options, 0, 3);
+ MAYBE_RETURN(maybe_digit_options, MaybeHandle<JSPluralRules>());
+ Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
+
+ icu_decimal_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
+ icu_decimal_format->setMinimumIntegerDigits(
+ digit_options.minimum_integer_digits);
+ icu_decimal_format->setMinimumFractionDigits(
+ digit_options.minimum_fraction_digits);
+ icu_decimal_format->setMaximumFractionDigits(
+ digit_options.maximum_fraction_digits);
+ if (digit_options.minimum_significant_digits > 0) {
+ icu_decimal_format->setMinimumSignificantDigits(
+ digit_options.minimum_significant_digits);
+ icu_decimal_format->setMaximumSignificantDigits(
+ digit_options.maximum_significant_digits);
+ }
Handle<Managed<icu::PluralRules>> managed_plural_rules =
Managed<icu::PluralRules>::FromUniquePtr(isolate, 0,
@@ -184,11 +199,11 @@ MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
MaybeHandle<String> JSPluralRules::ResolvePlural(
Isolate* isolate, Handle<JSPluralRules> plural_rules, double number) {
- icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules()->raw();
+ icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules().raw();
CHECK_NOT_NULL(icu_plural_rules);
icu::DecimalFormat* icu_decimal_format =
- plural_rules->icu_decimal_format()->raw();
+ plural_rules->icu_decimal_format().raw();
CHECK_NOT_NULL(icu_decimal_format);
// Currently, PluralRules doesn't implement all the options for rounding that
@@ -247,7 +262,7 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
"type");
icu::DecimalFormat* icu_decimal_format =
- plural_rules->icu_decimal_format()->raw();
+ plural_rules->icu_decimal_format().raw();
CHECK_NOT_NULL(icu_decimal_format);
// This is a safe upcast as icu::DecimalFormat inherits from
@@ -281,7 +296,7 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
// 6. Let pluralCategories be a List of Strings representing the
// possible results of PluralRuleSelect for the selected locale pr.
- icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules()->raw();
+ icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules().raw();
CHECK_NOT_NULL(icu_plural_rules);
UErrorCode status = U_ZERO_ERROR;
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index 70c63a9a8f..249090bdf6 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -12,16 +12,17 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace U_ICU_NAMESPACE {
+class DecimalFormat;
class PluralRules;
} // namespace U_ICU_NAMESPACE
@@ -69,16 +70,8 @@ class JSPluralRules : public JSObject {
STATIC_ASSERT(Type::ORDINAL <= TypeBits::kMax);
// Layout description.
-#define JS_PLURAL_RULES_FIELDS(V) \
- V(kLocaleOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- V(kICUPluralRulesOffset, kTaggedSize) \
- V(kICUDecimalFormatOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_PLURAL_RULES_FIELDS)
-#undef JS_PLURAL_RULES_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSPLURAL_RULES_FIELDS)
DECL_ACCESSORS(locale, String)
DECL_INT_ACCESSORS(flags)
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
index a423c0281c..ecfeb53306 100644
--- a/deps/v8/src/objects/js-promise-inl.h
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -7,8 +7,8 @@
#include "src/objects/js-promise.h"
-#include "src/objects-inl.h" // Needed for write barriers
-#include "src/objects.h"
+#include "src/objects/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-proxy-inl.h b/deps/v8/src/objects/js-proxy-inl.h
index e0d0835f06..f33628b5c2 100644
--- a/deps/v8/src/objects/js-proxy-inl.h
+++ b/deps/v8/src/objects/js-proxy-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/js-proxy.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -22,7 +22,7 @@ CAST_ACCESSOR(JSProxy)
ACCESSORS(JSProxy, target, Object, kTargetOffset)
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
-bool JSProxy::IsRevoked() const { return !handler()->IsJSReceiver(); }
+bool JSProxy::IsRevoked() const { return !handler().IsJSReceiver(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index 68fbb333b9..c4f98927e9 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_JS_PROXY_H_
#include "src/objects/js-objects.h"
-#include "torque-generated/builtin-definitions-from-dsl.h"
+#include "torque-generated/builtin-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 8322a3c258..93e6ee008d 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/js-regexp.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
#include "src/objects/smi.h"
#include "src/objects/string.h"
@@ -28,9 +28,9 @@ ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
JSRegExp::Type JSRegExp::TypeTag() const {
Object data = this->data();
- if (data->IsUndefined()) return JSRegExp::NOT_COMPILED;
- Smi smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
- return static_cast<JSRegExp::Type>(smi->value());
+ if (data.IsUndefined()) return JSRegExp::NOT_COMPILED;
+ Smi smi = Smi::cast(FixedArray::cast(data).get(kTagIndex));
+ return static_cast<JSRegExp::Type>(smi.value());
}
int JSRegExp::CaptureCount() {
@@ -45,21 +45,21 @@ int JSRegExp::CaptureCount() {
}
JSRegExp::Flags JSRegExp::GetFlags() {
- DCHECK(this->data()->IsFixedArray());
+ DCHECK(this->data().IsFixedArray());
Object data = this->data();
- Smi smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
- return Flags(smi->value());
+ Smi smi = Smi::cast(FixedArray::cast(data).get(kFlagsIndex));
+ return Flags(smi.value());
}
String JSRegExp::Pattern() {
- DCHECK(this->data()->IsFixedArray());
+ DCHECK(this->data().IsFixedArray());
Object data = this->data();
- String pattern = String::cast(FixedArray::cast(data)->get(kSourceIndex));
+ String pattern = String::cast(FixedArray::cast(data).get(kSourceIndex));
return pattern;
}
Object JSRegExp::CaptureNameMap() {
- DCHECK(this->data()->IsFixedArray());
+ DCHECK(this->data().IsFixedArray());
DCHECK_EQ(TypeTag(), IRREGEXP);
Object value = DataAt(kIrregexpCaptureNameMapIndex);
DCHECK_NE(value, Smi::FromInt(JSRegExp::kUninitializedValue));
@@ -68,24 +68,24 @@ Object JSRegExp::CaptureNameMap() {
Object JSRegExp::DataAt(int index) const {
DCHECK(TypeTag() != NOT_COMPILED);
- return FixedArray::cast(data())->get(index);
+ return FixedArray::cast(data()).get(index);
}
void JSRegExp::SetDataAt(int index, Object value) {
DCHECK(TypeTag() != NOT_COMPILED);
DCHECK_GE(index,
kDataIndex); // Only implementation data can be set this way.
- FixedArray::cast(data())->set(index, value);
+ FixedArray::cast(data()).set(index, value);
}
bool JSRegExp::HasCompiledCode() const {
if (TypeTag() != IRREGEXP) return false;
#ifdef DEBUG
- DCHECK(DataAt(kIrregexpLatin1CodeIndex)->IsCode() ||
- DataAt(kIrregexpLatin1CodeIndex)->IsByteArray() ||
+ DCHECK(DataAt(kIrregexpLatin1CodeIndex).IsCode() ||
+ DataAt(kIrregexpLatin1CodeIndex).IsByteArray() ||
DataAt(kIrregexpLatin1CodeIndex) == Smi::FromInt(kUninitializedValue));
- DCHECK(DataAt(kIrregexpUC16CodeIndex)->IsCode() ||
- DataAt(kIrregexpUC16CodeIndex)->IsByteArray() ||
+ DCHECK(DataAt(kIrregexpUC16CodeIndex).IsCode() ||
+ DataAt(kIrregexpUC16CodeIndex).IsByteArray() ||
DataAt(kIrregexpUC16CodeIndex) == Smi::FromInt(kUninitializedValue));
#endif // DEBUG
Smi uninitialized = Smi::FromInt(kUninitializedValue);
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
index 82565f0de9..08e2f99d7e 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator-inl.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/js-regexp-string-iterator.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index a48900d81b..e525c66e3e 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -167,10 +167,10 @@ class JSRegExp : public JSObject {
// Descriptor array index to important methods in the prototype.
static const int kExecFunctionDescriptorIndex = 1;
static const int kSymbolMatchFunctionDescriptorIndex = 13;
- static const int kSymbolReplaceFunctionDescriptorIndex = 14;
- static const int kSymbolSearchFunctionDescriptorIndex = 15;
- static const int kSymbolSplitFunctionDescriptorIndex = 16;
- static const int kSymbolMatchAllFunctionDescriptorIndex = 17;
+ static const int kSymbolMatchAllFunctionDescriptorIndex = 14;
+ static const int kSymbolReplaceFunctionDescriptorIndex = 15;
+ static const int kSymbolSearchFunctionDescriptorIndex = 16;
+ static const int kSymbolSplitFunctionDescriptorIndex = 17;
// The uninitialized value for a regexp code object.
static const int kUninitializedValue = -1;
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 6b8b4550ac..1ff66b1a12 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_INL_H_
#define V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-relative-time-format.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,7 +23,7 @@ OBJECT_CONSTRUCTORS_IMPL(JSRelativeTimeFormat, JSObject)
// Base relative time format accessors.
ACCESSORS(JSRelativeTimeFormat, locale, String, kLocaleOffset)
ACCESSORS(JSRelativeTimeFormat, icu_formatter,
- Managed<icu::RelativeDateTimeFormatter>, kICUFormatterOffset)
+ Managed<icu::RelativeDateTimeFormatter>, kIcuFormatterOffset)
SMI_ACCESSORS(JSRelativeTimeFormat, flags, kFlagsOffset)
inline void JSRelativeTimeFormat::set_style(Style style) {
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 5b89e83057..59a3bf7ea0 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -12,12 +12,12 @@
#include <memory>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format.h"
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/objects-inl.h"
#include "unicode/numfmt.h"
#include "unicode/reldatefmt.h"
@@ -88,8 +88,21 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSRelativeTimeFormat>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
- // 7. Let localeData be %RelativeTimeFormat%.[[LocaleData]].
- // 8. Let r be
+ // 7. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`,
+ // `"string"`, *undefined*, *undefined*).
+ std::unique_ptr<char[]> numbering_system_str = nullptr;
+ Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
+ isolate, options, "Intl.RelativeTimeFormat", &numbering_system_str);
+ // 8. If _numberingSystem_ is not *undefined*, then
+ // a. If _numberingSystem_ does not match the
+ // `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError*
+ // exception.
+ MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSRelativeTimeFormat>());
+
+ // 9. Set _opt_.[[nu]] to _numberingSystem_.
+
+ // 10. Let localeData be %RelativeTimeFormat%.[[LocaleData]].
+ // 11. Let r be
// ResolveLocale(%RelativeTimeFormat%.[[AvailableLocales]],
// requestedLocales, opt,
// %RelativeTimeFormat%.[[RelevantExtensionKeys]], localeData).
@@ -97,14 +110,24 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
Intl::ResolveLocale(isolate, JSRelativeTimeFormat::GetAvailableLocales(),
requested_locales, matcher, {"nu"});
- // 9. Let locale be r.[[Locale]].
- // 10. Set relativeTimeFormat.[[Locale]] to locale.
- // 11. Let dataLocale be r.[[DataLocale]].
- Handle<String> locale_str =
- isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
+ // 12. Let locale be r.[[Locale]].
+ // 13. Set relativeTimeFormat.[[Locale]] to locale.
+ // 14. Let dataLocale be r.[[DataLocale]].
+ icu::Locale icu_locale = r.icu_locale;
+ UErrorCode status = U_ZERO_ERROR;
+ if (numbering_system_str != nullptr) {
+ icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status);
+ CHECK(U_SUCCESS(status));
+ }
+
+ Maybe<std::string> maybe_locale_str = Intl::ToLanguageTag(icu_locale);
+ MAYBE_RETURN(maybe_locale_str, MaybeHandle<JSRelativeTimeFormat>());
+
+ Handle<String> locale_str = isolate->factory()->NewStringFromAsciiChecked(
+ maybe_locale_str.FromJust().c_str());
relative_time_format_holder->set_locale(*locale_str);
- // 12. Let s be ? GetOption(options, "style", "string",
+ // 15. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
isolate, options, "style", "Intl.RelativeTimeFormat",
@@ -113,10 +136,10 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
MAYBE_RETURN(maybe_style, MaybeHandle<JSRelativeTimeFormat>());
Style style_enum = maybe_style.FromJust();
- // 13. Set relativeTimeFormat.[[Style]] to s.
+ // 16. Set relativeTimeFormat.[[Style]] to s.
relative_time_format_holder->set_style(style_enum);
- // 14. Let numeric be ? GetOption(options, "numeric", "string",
+ // 17. Let numeric be ? GetOption(options, "numeric", "string",
// «"always", "auto"», "always").
Maybe<Numeric> maybe_numeric = Intl::GetStringOption<Numeric>(
isolate, options, "numeric", "Intl.RelativeTimeFormat",
@@ -124,12 +147,9 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
MAYBE_RETURN(maybe_numeric, MaybeHandle<JSRelativeTimeFormat>());
Numeric numeric_enum = maybe_numeric.FromJust();
- // 15. Set relativeTimeFormat.[[Numeric]] to numeric.
+ // 18. Set relativeTimeFormat.[[Numeric]] to numeric.
relative_time_format_holder->set_numeric(numeric_enum);
- icu::Locale icu_locale = r.icu_locale;
- UErrorCode status = U_ZERO_ERROR;
-
// 19. Let relativeTimeFormat.[[NumberFormat]] be
// ? Construct(%NumberFormat%, « nfLocale, nfOptions »).
icu::NumberFormat* number_format =
@@ -177,7 +197,7 @@ Handle<JSObject> JSRelativeTimeFormat::ResolvedOptions(
format_holder->StyleAsString(), NONE);
JSObject::AddProperty(isolate, result, factory->numeric_string(),
format_holder->NumericAsString(), NONE);
- std::string locale_str(format_holder->locale()->ToCString().get());
+ std::string locale_str(format_holder->locale().ToCString().get());
icu::Locale icu_locale = Intl::CreateICULocale(locale_str);
std::string numbering_system = Intl::GetNumberingSystem(icu_locale);
JSObject::AddProperty(
@@ -293,7 +313,7 @@ MaybeHandle<T> FormatCommon(
isolate->factory()->NewStringFromAsciiChecked(func_name)),
T);
}
- icu::RelativeDateTimeFormatter* formatter = format->icu_formatter()->raw();
+ icu::RelativeDateTimeFormatter* formatter = format->icu_formatter().raw();
CHECK_NOT_NULL(formatter);
URelativeDateTimeUnit unit_enum;
if (!GetURelativeDateTimeUnit(unit, &unit_enum)) {
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 8c8ef7bbce..740336c29c 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -12,10 +12,10 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -109,16 +109,8 @@ class JSRelativeTimeFormat : public JSObject {
DECL_VERIFIER(JSRelativeTimeFormat)
// Layout description.
-#define JS_RELATIVE_TIME_FORMAT_FIELDS(V) \
- V(kLocaleOffset, kTaggedSize) \
- V(kICUFormatterOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_RELATIVE_TIME_FORMAT_FIELDS)
-#undef JS_RELATIVE_TIME_FORMAT_FIELDS
+ TORQUE_GENERATED_JSRELATIVE_TIME_FORMAT_FIELDS)
private:
static Style getStyle(const char* str);
diff --git a/deps/v8/src/objects/js-segment-iterator-inl.h b/deps/v8/src/objects/js-segment-iterator-inl.h
index 0c1a3e4eec..24a827c030 100644
--- a/deps/v8/src/objects/js-segment-iterator-inl.h
+++ b/deps/v8/src/objects/js-segment-iterator-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_SEGMENT_ITERATOR_INL_H_
#define V8_OBJECTS_JS_SEGMENT_ITERATOR_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-segment-iterator.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -22,7 +22,7 @@ OBJECT_CONSTRUCTORS_IMPL(JSSegmentIterator, JSObject)
// Base segment iterator accessors.
ACCESSORS(JSSegmentIterator, icu_break_iterator, Managed<icu::BreakIterator>,
- kICUBreakIteratorOffset)
+ kIcuBreakIteratorOffset)
ACCESSORS(JSSegmentIterator, unicode_string, Managed<icu::UnicodeString>,
kUnicodeStringOffset)
diff --git a/deps/v8/src/objects/js-segment-iterator.cc b/deps/v8/src/objects/js-segment-iterator.cc
index 570c71dd21..3d2b19ca5c 100644
--- a/deps/v8/src/objects/js-segment-iterator.cc
+++ b/deps/v8/src/objects/js-segment-iterator.cc
@@ -12,12 +12,12 @@
#include <memory>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/objects-inl.h"
#include "unicode/brkiter.h"
namespace v8 {
@@ -26,7 +26,7 @@ namespace internal {
MaybeHandle<String> JSSegmentIterator::GetSegment(Isolate* isolate,
int32_t start,
int32_t end) const {
- return Intl::ToString(isolate, *(unicode_string()->raw()), start, end);
+ return Intl::ToString(isolate, *(unicode_string().raw()), start, end);
}
Handle<String> JSSegmentIterator::GranularityAsString() const {
@@ -80,7 +80,7 @@ Handle<Object> JSSegmentIterator::BreakType() const {
if (!is_break_type_set()) {
return GetReadOnlyRoots().undefined_value_handle();
}
- icu::BreakIterator* break_iterator = icu_break_iterator()->raw();
+ icu::BreakIterator* break_iterator = icu_break_iterator().raw();
int32_t rule_status = break_iterator->getRuleStatus();
switch (granularity()) {
case JSSegmenter::Granularity::GRAPHEME:
@@ -128,7 +128,7 @@ Handle<Object> JSSegmentIterator::BreakType() const {
Handle<Object> JSSegmentIterator::Index(
Isolate* isolate, Handle<JSSegmentIterator> segment_iterator) {
icu::BreakIterator* icu_break_iterator =
- segment_iterator->icu_break_iterator()->raw();
+ segment_iterator->icu_break_iterator().raw();
CHECK_NOT_NULL(icu_break_iterator);
return isolate->factory()->NewNumberFromInt(icu_break_iterator->current());
}
@@ -138,7 +138,7 @@ MaybeHandle<JSReceiver> JSSegmentIterator::Next(
Isolate* isolate, Handle<JSSegmentIterator> segment_iterator) {
Factory* factory = isolate->factory();
icu::BreakIterator* icu_break_iterator =
- segment_iterator->icu_break_iterator()->raw();
+ segment_iterator->icu_break_iterator().raw();
// 3. Let _previousIndex be iterator.[[SegmentIteratorIndex]].
int32_t prev = icu_break_iterator->current();
// 4. Let done be AdvanceSegmentIterator(iterator, forwards).
@@ -192,7 +192,7 @@ Maybe<bool> JSSegmentIterator::Following(
Handle<Object> from_obj) {
Factory* factory = isolate->factory();
icu::BreakIterator* icu_break_iterator =
- segment_iterator->icu_break_iterator()->raw();
+ segment_iterator->icu_break_iterator().raw();
// 3. If from is not undefined,
if (!from_obj->IsUndefined()) {
// a. Let from be ? ToIndex(from).
@@ -244,7 +244,7 @@ Maybe<bool> JSSegmentIterator::Preceding(
Handle<Object> from_obj) {
Factory* factory = isolate->factory();
icu::BreakIterator* icu_break_iterator =
- segment_iterator->icu_break_iterator()->raw();
+ segment_iterator->icu_break_iterator().raw();
// 3. If from is not undefined,
if (!from_obj->IsUndefined()) {
// a. Let from be ? ToIndex(from).
diff --git a/deps/v8/src/objects/js-segment-iterator.h b/deps/v8/src/objects/js-segment-iterator.h
index 0535704a68..cadb99e79d 100644
--- a/deps/v8/src/objects/js-segment-iterator.h
+++ b/deps/v8/src/objects/js-segment-iterator.h
@@ -9,11 +9,11 @@
#ifndef V8_OBJECTS_JS_SEGMENT_ITERATOR_H_
#define V8_OBJECTS_JS_SEGMENT_ITERATOR_H_
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/js-segmenter.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -90,16 +90,8 @@ class JSSegmentIterator : public JSObject {
DECL_INT_ACCESSORS(flags)
// Layout description.
-#define SEGMENTER_FIELDS(V) \
- /* Pointer fields. */ \
- V(kICUBreakIteratorOffset, kTaggedSize) \
- V(kUnicodeStringOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Total Size */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, SEGMENTER_FIELDS)
-#undef SEGMENTER_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSSEGMENT_ITERATOR_FIELDS)
OBJECT_CONSTRUCTORS(JSSegmentIterator, JSObject);
};
diff --git a/deps/v8/src/objects/js-segmenter-inl.h b/deps/v8/src/objects/js-segmenter-inl.h
index 05935fa905..b4adf4c8e6 100644
--- a/deps/v8/src/objects/js-segmenter-inl.h
+++ b/deps/v8/src/objects/js-segmenter-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_SEGMENTER_INL_H_
#define V8_OBJECTS_JS_SEGMENTER_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-segmenter.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,7 +23,7 @@ OBJECT_CONSTRUCTORS_IMPL(JSSegmenter, JSObject)
// Base segmenter accessors.
ACCESSORS(JSSegmenter, locale, String, kLocaleOffset)
ACCESSORS(JSSegmenter, icu_break_iterator, Managed<icu::BreakIterator>,
- kICUBreakIteratorOffset)
+ kIcuBreakIteratorOffset)
SMI_ACCESSORS(JSSegmenter, flags, kFlagsOffset)
inline void JSSegmenter::set_granularity(Granularity granularity) {
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
index 7548b65f23..5321334678 100644
--- a/deps/v8/src/objects/js-segmenter.cc
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -12,12 +12,12 @@
#include <memory>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-segmenter-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/objects-inl.h"
#include "unicode/brkiter.h"
namespace v8 {
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index 4fd509eb0c..423dd67497 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -12,10 +12,10 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -78,16 +78,8 @@ class JSSegmenter : public JSObject {
DECL_VERIFIER(JSSegmenter)
// Layout description.
-#define JS_SEGMENTER_FIELDS(V) \
- V(kJSSegmenterOffset, kTaggedSize) \
- V(kLocaleOffset, kTaggedSize) \
- V(kICUBreakIteratorOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_SEGMENTER_FIELDS)
-#undef JS_SEGMENTER_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSSEGMENTER_FIELDS)
private:
static Granularity GetGranularity(const char* str);
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index a08cb08fcf..6632a31002 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/js-weak-refs.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/smi-inl.h"
@@ -70,14 +70,14 @@ void JSFinalizationGroup::Register(
// Add to active_cells.
weak_cell->set_next(finalization_group->active_cells());
- if (finalization_group->active_cells()->IsWeakCell()) {
- WeakCell::cast(finalization_group->active_cells())->set_prev(*weak_cell);
+ if (finalization_group->active_cells().IsWeakCell()) {
+ WeakCell::cast(finalization_group->active_cells()).set_prev(*weak_cell);
}
finalization_group->set_active_cells(*weak_cell);
if (!key->IsUndefined(isolate)) {
Handle<ObjectHashTable> key_map;
- if (finalization_group->key_map()->IsUndefined(isolate)) {
+ if (finalization_group->key_map().IsUndefined(isolate)) {
key_map = ObjectHashTable::New(isolate, 1);
} else {
key_map =
@@ -85,12 +85,12 @@ void JSFinalizationGroup::Register(
}
Object value = key_map->Lookup(key);
- if (value->IsWeakCell()) {
+ if (value.IsWeakCell()) {
WeakCell existing_weak_cell = WeakCell::cast(value);
- existing_weak_cell->set_key_list_prev(*weak_cell);
+ existing_weak_cell.set_key_list_prev(*weak_cell);
weak_cell->set_key_list_next(existing_weak_cell);
} else {
- DCHECK(value->IsTheHole(isolate));
+ DCHECK(value.IsTheHole(isolate));
}
key_map = ObjectHashTable::Put(key_map, key, weak_cell);
finalization_group->set_key_map(*key_map);
@@ -103,17 +103,17 @@ void JSFinalizationGroup::Unregister(
// Iterate through the doubly linked list of WeakCells associated with the
// key. Each WeakCell will be in the "active_cells" or "cleared_cells" list of
// its FinalizationGroup; remove it from there.
- if (!finalization_group->key_map()->IsUndefined(isolate)) {
+ if (!finalization_group->key_map().IsUndefined(isolate)) {
Handle<ObjectHashTable> key_map =
handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
Object value = key_map->Lookup(key);
Object undefined = ReadOnlyRoots(isolate).undefined_value();
- while (value->IsWeakCell()) {
+ while (value.IsWeakCell()) {
WeakCell weak_cell = WeakCell::cast(value);
- weak_cell->RemoveFromFinalizationGroupCells(isolate);
- value = weak_cell->key_list_next();
- weak_cell->set_key_list_prev(undefined);
- weak_cell->set_key_list_next(undefined);
+ weak_cell.RemoveFromFinalizationGroupCells(isolate);
+ value = weak_cell.key_list_next();
+ weak_cell.set_key_list_prev(undefined);
+ weak_cell.set_key_list_next(undefined);
}
bool was_present;
key_map = ObjectHashTable::Remove(isolate, key_map, key, &was_present);
@@ -122,7 +122,7 @@ void JSFinalizationGroup::Unregister(
}
bool JSFinalizationGroup::NeedsCleanup() const {
- return cleared_cells()->IsWeakCell();
+ return cleared_cells().IsWeakCell();
}
bool JSFinalizationGroup::scheduled_for_cleanup() const {
@@ -138,23 +138,23 @@ Object JSFinalizationGroup::PopClearedCellHoldings(
Handle<JSFinalizationGroup> finalization_group, Isolate* isolate) {
Handle<WeakCell> weak_cell =
handle(WeakCell::cast(finalization_group->cleared_cells()), isolate);
- DCHECK(weak_cell->prev()->IsUndefined(isolate));
+ DCHECK(weak_cell->prev().IsUndefined(isolate));
finalization_group->set_cleared_cells(weak_cell->next());
weak_cell->set_next(ReadOnlyRoots(isolate).undefined_value());
- if (finalization_group->cleared_cells()->IsWeakCell()) {
+ if (finalization_group->cleared_cells().IsWeakCell()) {
WeakCell cleared_cells_head =
WeakCell::cast(finalization_group->cleared_cells());
- DCHECK_EQ(cleared_cells_head->prev(), *weak_cell);
- cleared_cells_head->set_prev(ReadOnlyRoots(isolate).undefined_value());
+ DCHECK_EQ(cleared_cells_head.prev(), *weak_cell);
+ cleared_cells_head.set_prev(ReadOnlyRoots(isolate).undefined_value());
} else {
- DCHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
+ DCHECK(finalization_group->cleared_cells().IsUndefined(isolate));
}
// Also remove the WeakCell from the key_map (if it's there).
- if (!weak_cell->key()->IsUndefined(isolate)) {
- if (weak_cell->key_list_prev()->IsUndefined(isolate) &&
- weak_cell->key_list_next()->IsUndefined(isolate)) {
+ if (!weak_cell->key().IsUndefined(isolate)) {
+ if (weak_cell->key_list_prev().IsUndefined(isolate) &&
+ weak_cell->key_list_next().IsUndefined(isolate)) {
// weak_cell is the only one associated with its key; remove the key
// from the hash table.
Handle<ObjectHashTable> key_map =
@@ -164,7 +164,7 @@ Object JSFinalizationGroup::PopClearedCellHoldings(
key_map = ObjectHashTable::Remove(isolate, key_map, key, &was_present);
DCHECK(was_present);
finalization_group->set_key_map(*key_map);
- } else if (weak_cell->key_list_prev()->IsUndefined()) {
+ } else if (weak_cell->key_list_prev().IsUndefined()) {
// weak_cell is the list head for its key; we need to change the value of
// the key in the hash table.
Handle<ObjectHashTable> key_map =
@@ -180,10 +180,10 @@ Object JSFinalizationGroup::PopClearedCellHoldings(
} else {
// weak_cell is somewhere in the middle of its key list.
WeakCell prev = WeakCell::cast(weak_cell->key_list_prev());
- prev->set_key_list_next(weak_cell->key_list_next());
- if (!weak_cell->key_list_next()->IsUndefined()) {
+ prev.set_key_list_next(weak_cell->key_list_next());
+ if (!weak_cell->key_list_next().IsUndefined()) {
WeakCell next = WeakCell::cast(weak_cell->key_list_next());
- next->set_key_list_prev(weak_cell->key_list_prev());
+ next.set_key_list_prev(weak_cell->key_list_prev());
}
}
}
@@ -200,41 +200,41 @@ void WeakCell::Nullify(
// only called for WeakCells which haven't been unregistered yet, so they will
// be in the active_cells list. (The caller must guard against calling this
// for unregistered WeakCells by checking that the target is not undefined.)
- DCHECK(target()->IsJSReceiver());
+ DCHECK(target().IsJSReceiver());
set_target(ReadOnlyRoots(isolate).undefined_value());
JSFinalizationGroup fg = JSFinalizationGroup::cast(finalization_group());
- if (prev()->IsWeakCell()) {
- DCHECK_NE(fg->active_cells(), *this);
+ if (prev().IsWeakCell()) {
+ DCHECK_NE(fg.active_cells(), *this);
WeakCell prev_cell = WeakCell::cast(prev());
- prev_cell->set_next(next());
+ prev_cell.set_next(next());
gc_notify_updated_slot(prev_cell, prev_cell.RawField(WeakCell::kNextOffset),
next());
} else {
- DCHECK_EQ(fg->active_cells(), *this);
- fg->set_active_cells(next());
+ DCHECK_EQ(fg.active_cells(), *this);
+ fg.set_active_cells(next());
gc_notify_updated_slot(
fg, fg.RawField(JSFinalizationGroup::kActiveCellsOffset), next());
}
- if (next()->IsWeakCell()) {
+ if (next().IsWeakCell()) {
WeakCell next_cell = WeakCell::cast(next());
- next_cell->set_prev(prev());
+ next_cell.set_prev(prev());
gc_notify_updated_slot(next_cell, next_cell.RawField(WeakCell::kPrevOffset),
prev());
}
set_prev(ReadOnlyRoots(isolate).undefined_value());
- Object cleared_head = fg->cleared_cells();
- if (cleared_head->IsWeakCell()) {
+ Object cleared_head = fg.cleared_cells();
+ if (cleared_head.IsWeakCell()) {
WeakCell cleared_head_cell = WeakCell::cast(cleared_head);
- cleared_head_cell->set_prev(*this);
+ cleared_head_cell.set_prev(*this);
gc_notify_updated_slot(cleared_head_cell,
cleared_head_cell.RawField(WeakCell::kPrevOffset),
*this);
}
- set_next(fg->cleared_cells());
+ set_next(fg.cleared_cells());
gc_notify_updated_slot(*this, RawField(WeakCell::kNextOffset), next());
- fg->set_cleared_cells(*this);
+ fg.set_cleared_cells(*this);
gc_notify_updated_slot(
fg, fg.RawField(JSFinalizationGroup::kClearedCellsOffset), *this);
}
@@ -245,24 +245,24 @@ void WeakCell::RemoveFromFinalizationGroupCells(Isolate* isolate) {
// It's important to set_target to undefined here. This guards that we won't
// call Nullify (which assumes that the WeakCell is in active_cells).
- DCHECK(target()->IsUndefined() || target()->IsJSReceiver());
+ DCHECK(target().IsUndefined() || target().IsJSReceiver());
set_target(ReadOnlyRoots(isolate).undefined_value());
JSFinalizationGroup fg = JSFinalizationGroup::cast(finalization_group());
- if (fg->active_cells() == *this) {
- DCHECK(prev()->IsUndefined(isolate));
- fg->set_active_cells(next());
- } else if (fg->cleared_cells() == *this) {
- DCHECK(!prev()->IsWeakCell());
- fg->set_cleared_cells(next());
+ if (fg.active_cells() == *this) {
+ DCHECK(prev().IsUndefined(isolate));
+ fg.set_active_cells(next());
+ } else if (fg.cleared_cells() == *this) {
+ DCHECK(!prev().IsWeakCell());
+ fg.set_cleared_cells(next());
} else {
- DCHECK(prev()->IsWeakCell());
+ DCHECK(prev().IsWeakCell());
WeakCell prev_cell = WeakCell::cast(prev());
- prev_cell->set_next(next());
+ prev_cell.set_next(next());
}
- if (next()->IsWeakCell()) {
+ if (next().IsWeakCell()) {
WeakCell next_cell = WeakCell::cast(next());
- next_cell->set_prev(prev());
+ next_cell.set_prev(prev());
}
set_prev(ReadOnlyRoots(isolate).undefined_value());
set_next(ReadOnlyRoots(isolate).undefined_value());
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
new file mode 100644
index 0000000000..d3a1f6bdc2
--- /dev/null
+++ b/deps/v8/src/objects/keys.cc
@@ -0,0 +1,1017 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/keys.h"
+
+#include "src/api/api-arguments-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/heap/factory.h"
+#include "src/objects/api-callbacks.h"
+#include "src/objects/elements-inl.h"
+#include "src/objects/field-index-inl.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/property-descriptor.h"
+#include "src/objects/prototype.h"
+#include "src/utils/identity-map.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
+ int len = array->length();
+ for (int i = 0; i < len; i++) {
+ Object e = array->get(i);
+ if (!(e.IsName() || e.IsNumber())) return false;
+ }
+ return true;
+}
+
+} // namespace
+
+// static
+MaybeHandle<FixedArray> KeyAccumulator::GetKeys(
+ Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
+ GetKeysConversion keys_conversion, bool is_for_in, bool skip_indices) {
+ Isolate* isolate = object->GetIsolate();
+ FastKeyAccumulator accumulator(isolate, object, mode, filter, is_for_in,
+ skip_indices);
+ return accumulator.GetKeys(keys_conversion);
+}
+
+Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
+ if (keys_.is_null()) {
+ return isolate_->factory()->empty_fixed_array();
+ }
+ if (mode_ == KeyCollectionMode::kOwnOnly &&
+ keys_->map() == ReadOnlyRoots(isolate_).fixed_array_map()) {
+ return Handle<FixedArray>::cast(keys_);
+ }
+ USE(ContainsOnlyValidKeys);
+ Handle<FixedArray> result =
+ OrderedHashSet::ConvertToKeysArray(isolate(), keys(), convert);
+ DCHECK(ContainsOnlyValidKeys(result));
+ return result;
+}
+
+Handle<OrderedHashSet> KeyAccumulator::keys() {
+ return Handle<OrderedHashSet>::cast(keys_);
+}
+
+void KeyAccumulator::AddKey(Object key, AddKeyConversion convert) {
+ AddKey(handle(key, isolate_), convert);
+}
+
+void KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
+ if (filter_ == PRIVATE_NAMES_ONLY) {
+ if (!key->IsSymbol()) return;
+ if (!Symbol::cast(*key).is_private_name()) return;
+ } else if (key->IsSymbol()) {
+ if (filter_ & SKIP_SYMBOLS) return;
+
+ if (Symbol::cast(*key).is_private()) return;
+ } else if (filter_ & SKIP_STRINGS) {
+ return;
+ }
+
+ if (IsShadowed(key)) return;
+ if (keys_.is_null()) {
+ keys_ = OrderedHashSet::Allocate(isolate_, 16);
+ }
+ uint32_t index;
+ if (convert == CONVERT_TO_ARRAY_INDEX && key->IsString() &&
+ Handle<String>::cast(key)->AsArrayIndex(&index)) {
+ key = isolate_->factory()->NewNumberFromUint(index);
+ }
+ Handle<OrderedHashSet> new_set = OrderedHashSet::Add(isolate(), keys(), key);
+ if (*new_set != *keys_) {
+ // The keys_ Set is converted directly to a FixedArray in GetKeys which can
+ // be left-trimmer. Hence the previous Set should not keep a pointer to the
+ // new one.
+ keys_->set(OrderedHashSet::NextTableIndex(), Smi::kZero);
+ keys_ = new_set;
+ }
+}
+
+void KeyAccumulator::AddKeys(Handle<FixedArray> array,
+ AddKeyConversion convert) {
+ int add_length = array->length();
+ for (int i = 0; i < add_length; i++) {
+ Handle<Object> current(array->get(i), isolate_);
+ AddKey(current, convert);
+ }
+}
+
+void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
+ AddKeyConversion convert) {
+ DCHECK(array_like->IsJSArray() || array_like->HasSloppyArgumentsElements());
+ ElementsAccessor* accessor = array_like->GetElementsAccessor();
+ accessor->AddElementsToKeyAccumulator(array_like, this, convert);
+}
+
+MaybeHandle<FixedArray> FilterProxyKeys(KeyAccumulator* accumulator,
+ Handle<JSProxy> owner,
+ Handle<FixedArray> keys,
+ PropertyFilter filter) {
+ if (filter == ALL_PROPERTIES) {
+ // Nothing to do.
+ return keys;
+ }
+ Isolate* isolate = accumulator->isolate();
+ int store_position = 0;
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Name> key(Name::cast(keys->get(i)), isolate);
+ if (key->FilterKey(filter)) continue; // Skip this key.
+ if (filter & ONLY_ENUMERABLE) {
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSProxy::GetOwnPropertyDescriptor(isolate, owner, key, &desc);
+ MAYBE_RETURN(found, MaybeHandle<FixedArray>());
+ if (!found.FromJust()) continue;
+ if (!desc.enumerable()) {
+ accumulator->AddShadowingKey(key);
+ continue;
+ }
+ }
+ // Keep this key.
+ if (store_position != i) {
+ keys->set(store_position, *key);
+ }
+ store_position++;
+ }
+ return FixedArray::ShrinkOrEmpty(isolate, keys, store_position);
+}
+
+// Returns "nothing" in case of exception, "true" on success.
+Maybe<bool> KeyAccumulator::AddKeysFromJSProxy(Handle<JSProxy> proxy,
+ Handle<FixedArray> keys) {
+ // Postpone the enumerable check for for-in to the ForInFilter step.
+ if (!is_for_in_) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, keys, FilterProxyKeys(this, proxy, keys, filter_),
+ Nothing<bool>());
+ if (mode_ == KeyCollectionMode::kOwnOnly) {
+ // If we collect only the keys from a JSProxy do not sort or deduplicate.
+ keys_ = keys;
+ return Just(true);
+ }
+ }
+ AddKeys(keys, is_for_in_ ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT);
+ return Just(true);
+}
+
+Maybe<bool> KeyAccumulator::CollectKeys(Handle<JSReceiver> receiver,
+ Handle<JSReceiver> object) {
+ // Proxies have no hidden prototype and we should not trigger the
+ // [[GetPrototypeOf]] trap on the last iteration when using
+ // AdvanceFollowingProxies.
+ if (mode_ == KeyCollectionMode::kOwnOnly && object->IsJSProxy()) {
+ MAYBE_RETURN(CollectOwnJSProxyKeys(receiver, Handle<JSProxy>::cast(object)),
+ Nothing<bool>());
+ return Just(true);
+ }
+
+ PrototypeIterator::WhereToEnd end = mode_ == KeyCollectionMode::kOwnOnly
+ ? PrototypeIterator::END_AT_NON_HIDDEN
+ : PrototypeIterator::END_AT_NULL;
+ for (PrototypeIterator iter(isolate_, object, kStartAtReceiver, end);
+ !iter.IsAtEnd();) {
+ // Start the shadow checks only after the first prototype has added
+ // shadowing keys.
+ if (HasShadowingKeys()) skip_shadow_check_ = false;
+ Handle<JSReceiver> current =
+ PrototypeIterator::GetCurrent<JSReceiver>(iter);
+ Maybe<bool> result = Just(false); // Dummy initialization.
+ if (current->IsJSProxy()) {
+ result = CollectOwnJSProxyKeys(receiver, Handle<JSProxy>::cast(current));
+ } else {
+ DCHECK(current->IsJSObject());
+ result = CollectOwnKeys(receiver, Handle<JSObject>::cast(current));
+ }
+ MAYBE_RETURN(result, Nothing<bool>());
+ if (!result.FromJust()) break; // |false| means "stop iterating".
+ // Iterate through proxies but ignore access checks for the ALL_CAN_READ
+ // case on API objects for OWN_ONLY keys handled in CollectOwnKeys.
+ if (!iter.AdvanceFollowingProxiesIgnoringAccessChecks()) {
+ return Nothing<bool>();
+ }
+ if (!last_non_empty_prototype_.is_null() &&
+ *last_non_empty_prototype_ == *current) {
+ break;
+ }
+ }
+ return Just(true);
+}
+
+bool KeyAccumulator::HasShadowingKeys() { return !shadowing_keys_.is_null(); }
+
+bool KeyAccumulator::IsShadowed(Handle<Object> key) {
+ if (!HasShadowingKeys() || skip_shadow_check_) return false;
+ return shadowing_keys_->Has(isolate_, key);
+}
+
+void KeyAccumulator::AddShadowingKey(Object key) {
+ if (mode_ == KeyCollectionMode::kOwnOnly) return;
+ AddShadowingKey(handle(key, isolate_));
+}
+void KeyAccumulator::AddShadowingKey(Handle<Object> key) {
+ if (mode_ == KeyCollectionMode::kOwnOnly) return;
+ if (shadowing_keys_.is_null()) {
+ shadowing_keys_ = ObjectHashSet::New(isolate_, 16);
+ }
+ shadowing_keys_ = ObjectHashSet::Add(isolate(), shadowing_keys_, key);
+}
+
+namespace {
+
+void TrySettingEmptyEnumCache(JSReceiver object) {
+ Map map = object.map();
+ DCHECK_EQ(kInvalidEnumCacheSentinel, map.EnumLength());
+ if (!map.OnlyHasSimpleProperties()) return;
+ if (map.IsJSProxyMap()) return;
+ if (map.NumberOfEnumerableProperties() > 0) return;
+ DCHECK(object.IsJSObject());
+ map.SetEnumLength(0);
+}
+
+bool CheckAndInitalizeEmptyEnumCache(JSReceiver object) {
+ if (object.map().EnumLength() == kInvalidEnumCacheSentinel) {
+ TrySettingEmptyEnumCache(object);
+ }
+ if (object.map().EnumLength() != 0) return false;
+ DCHECK(object.IsJSObject());
+ return !JSObject::cast(object).HasEnumerableElements();
+}
+} // namespace
+
+void FastKeyAccumulator::Prepare() {
+ DisallowHeapAllocation no_gc;
+ // Directly go for the fast path for OWN_ONLY keys.
+ if (mode_ == KeyCollectionMode::kOwnOnly) return;
+ // Fully walk the prototype chain and find the last prototype with keys.
+ is_receiver_simple_enum_ = false;
+ has_empty_prototype_ = true;
+ JSReceiver last_prototype;
+ for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd();
+ iter.Advance()) {
+ JSReceiver current = iter.GetCurrent<JSReceiver>();
+ bool has_no_properties = CheckAndInitalizeEmptyEnumCache(current);
+ if (has_no_properties) continue;
+ last_prototype = current;
+ has_empty_prototype_ = false;
+ }
+ if (has_empty_prototype_) {
+ is_receiver_simple_enum_ =
+ receiver_->map().EnumLength() != kInvalidEnumCacheSentinel &&
+ !JSObject::cast(*receiver_).HasEnumerableElements();
+ } else if (!last_prototype.is_null()) {
+ last_non_empty_prototype_ = handle(last_prototype, isolate_);
+ }
+}
+
+namespace {
+
+Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
+ Handle<FixedArray> array, int length) {
+ DCHECK_LE(length, array->length());
+ if (array->length() == length) return array;
+ return isolate->factory()->CopyFixedArrayUpTo(array, length);
+}
+
+// Initializes and directly returns the enume cache. Users of this function
+// have to make sure to never directly leak the enum cache.
+Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
+ Handle<JSObject> object) {
+ Handle<Map> map(object->map(), isolate);
+ Handle<FixedArray> keys(map->instance_descriptors().enum_cache().keys(),
+ isolate);
+
+ // Check if the {map} has a valid enum length, which implies that it
+ // must have a valid enum cache as well.
+ int enum_length = map->EnumLength();
+ if (enum_length != kInvalidEnumCacheSentinel) {
+ DCHECK(map->OnlyHasSimpleProperties());
+ DCHECK_LE(enum_length, keys->length());
+ DCHECK_EQ(enum_length, map->NumberOfEnumerableProperties());
+ isolate->counters()->enum_cache_hits()->Increment();
+ return ReduceFixedArrayTo(isolate, keys, enum_length);
+ }
+
+ // Determine the actual number of enumerable properties of the {map}.
+ enum_length = map->NumberOfEnumerableProperties();
+
+ // Check if there's already a shared enum cache on the {map}s
+ // DescriptorArray with sufficient number of entries.
+ if (enum_length <= keys->length()) {
+ if (map->OnlyHasSimpleProperties()) map->SetEnumLength(enum_length);
+ isolate->counters()->enum_cache_hits()->Increment();
+ return ReduceFixedArrayTo(isolate, keys, enum_length);
+ }
+
+ Handle<DescriptorArray> descriptors =
+ Handle<DescriptorArray>(map->instance_descriptors(), isolate);
+ isolate->counters()->enum_cache_misses()->Increment();
+ int nod = map->NumberOfOwnDescriptors();
+
+ // Create the keys array.
+ int index = 0;
+ bool fields_only = true;
+ keys = isolate->factory()->NewFixedArray(enum_length);
+ for (int i = 0; i < nod; i++) {
+ DisallowHeapAllocation no_gc;
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.IsDontEnum()) continue;
+ Object key = descriptors->GetKey(i);
+ if (key.IsSymbol()) continue;
+ keys->set(index, key);
+ if (details.location() != kField) fields_only = false;
+ index++;
+ }
+ DCHECK_EQ(index, keys->length());
+
+ // Optionally also create the indices array.
+ Handle<FixedArray> indices = isolate->factory()->empty_fixed_array();
+ if (fields_only) {
+ indices = isolate->factory()->NewFixedArray(enum_length);
+ index = 0;
+ for (int i = 0; i < nod; i++) {
+ DisallowHeapAllocation no_gc;
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.IsDontEnum()) continue;
+ Object key = descriptors->GetKey(i);
+ if (key.IsSymbol()) continue;
+ DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(kField, details.location());
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ indices->set(index, Smi::FromInt(field_index.GetLoadByFieldIndex()));
+ index++;
+ }
+ DCHECK_EQ(index, indices->length());
+ }
+
+ DescriptorArray::InitializeOrChangeEnumCache(descriptors, isolate, keys,
+ indices);
+ if (map->OnlyHasSimpleProperties()) map->SetEnumLength(enum_length);
+
+ return keys;
+}
+
+template <bool fast_properties>
+MaybeHandle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
+ Handle<JSObject> object,
+ GetKeysConversion convert,
+ bool skip_indices) {
+ Handle<FixedArray> keys;
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ if (fast_properties) {
+ keys = GetFastEnumPropertyKeys(isolate, object);
+ } else {
+ // TODO(cbruni): preallocate big enough array to also hold elements.
+ keys = KeyAccumulator::GetOwnEnumPropertyKeys(isolate, object);
+ }
+
+ MaybeHandle<FixedArray> result;
+ if (skip_indices) {
+ result = keys;
+ } else {
+ result =
+ accessor->PrependElementIndices(object, keys, convert, ONLY_ENUMERABLE);
+ }
+
+ if (FLAG_trace_for_in_enumerate) {
+ PrintF("| strings=%d symbols=0 elements=%u || prototypes>=1 ||\n",
+ keys->length(), result.ToHandleChecked()->length() - keys->length());
+ }
+ return result;
+}
+
+} // namespace
+
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeys(
+ GetKeysConversion keys_conversion) {
+ if (filter_ == ENUMERABLE_STRINGS) {
+ Handle<FixedArray> keys;
+ if (GetKeysFast(keys_conversion).ToHandle(&keys)) {
+ return keys;
+ }
+ if (isolate_->has_pending_exception()) return MaybeHandle<FixedArray>();
+ }
+
+ return GetKeysSlow(keys_conversion);
+}
+
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
+ GetKeysConversion keys_conversion) {
+ bool own_only = has_empty_prototype_ || mode_ == KeyCollectionMode::kOwnOnly;
+ Map map = receiver_->map();
+ if (!own_only || map.IsCustomElementsReceiverMap()) {
+ return MaybeHandle<FixedArray>();
+ }
+
+ // From this point on we are certain to only collect own keys.
+ DCHECK(receiver_->IsJSObject());
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver_);
+
+ // Do not try to use the enum-cache for dict-mode objects.
+ if (map.is_dictionary_map()) {
+ return GetOwnKeysWithElements<false>(isolate_, object, keys_conversion,
+ skip_indices_);
+ }
+ int enum_length = receiver_->map().EnumLength();
+ if (enum_length == kInvalidEnumCacheSentinel) {
+ Handle<FixedArray> keys;
+ // Try initializing the enum cache and return own properties.
+ if (GetOwnKeysWithUninitializedEnumCache().ToHandle(&keys)) {
+ if (FLAG_trace_for_in_enumerate) {
+ PrintF("| strings=%d symbols=0 elements=0 || prototypes>=1 ||\n",
+ keys->length());
+ }
+ is_receiver_simple_enum_ =
+ object->map().EnumLength() != kInvalidEnumCacheSentinel;
+ return keys;
+ }
+ }
+ // The properties-only case failed because there were probably elements on the
+ // receiver.
+ return GetOwnKeysWithElements<true>(isolate_, object, keys_conversion,
+ skip_indices_);
+}
+
+MaybeHandle<FixedArray>
+FastKeyAccumulator::GetOwnKeysWithUninitializedEnumCache() {
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver_);
+ // Uninitalized enum cache
+ Map map = object->map();
+ if (object->elements() != ReadOnlyRoots(isolate_).empty_fixed_array() &&
+ object->elements() !=
+ ReadOnlyRoots(isolate_).empty_slow_element_dictionary()) {
+ // Assume that there are elements.
+ return MaybeHandle<FixedArray>();
+ }
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
+ if (number_of_own_descriptors == 0) {
+ map.SetEnumLength(0);
+ return isolate_->factory()->empty_fixed_array();
+ }
+ // We have no elements but possibly enumerable property keys, hence we can
+ // directly initialize the enum cache.
+ Handle<FixedArray> keys = GetFastEnumPropertyKeys(isolate_, object);
+ if (is_for_in_) return keys;
+ // Do not leak the enum cache as it might end up as an elements backing store.
+ return isolate_->factory()->CopyFixedArray(keys);
+}
+
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
+ GetKeysConversion keys_conversion) {
+ KeyAccumulator accumulator(isolate_, mode_, filter_);
+ accumulator.set_is_for_in(is_for_in_);
+ accumulator.set_skip_indices(skip_indices_);
+ accumulator.set_last_non_empty_prototype(last_non_empty_prototype_);
+
+ MAYBE_RETURN(accumulator.CollectKeys(receiver_, receiver_),
+ MaybeHandle<FixedArray>());
+ return accumulator.GetKeys(keys_conversion);
+}
+
+namespace {
+
+enum IndexedOrNamed { kIndexed, kNamed };
+
+void FilterForEnumerableProperties(Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ Handle<InterceptorInfo> interceptor,
+ KeyAccumulator* accumulator,
+ Handle<JSObject> result,
+ IndexedOrNamed type) {
+ DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
+ ElementsAccessor* accessor = result->GetElementsAccessor();
+
+ uint32_t length = accessor->GetCapacity(*result, result->elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (!accessor->HasEntry(*result, i)) continue;
+
+ // args are invalid after args.Call(), create a new one in every iteration.
+ PropertyCallbackArguments args(accumulator->isolate(), interceptor->data(),
+ *receiver, *object, Just(kDontThrow));
+
+ Handle<Object> element = accessor->Get(result, i);
+ Handle<Object> attributes;
+ if (type == kIndexed) {
+ uint32_t number;
+ CHECK(element->ToUint32(&number));
+ attributes = args.CallIndexedQuery(interceptor, number);
+ } else {
+ CHECK(element->IsName());
+ attributes =
+ args.CallNamedQuery(interceptor, Handle<Name>::cast(element));
+ }
+
+ if (!attributes.is_null()) {
+ int32_t value;
+ CHECK(attributes->ToInt32(&value));
+ if ((value & DONT_ENUM) == 0) {
+ accumulator->AddKey(element, DO_NOT_CONVERT);
+ }
+ }
+ }
+}
+
+// Returns |true| on success, |nothing| on exception.
+Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ Handle<InterceptorInfo> interceptor,
+ KeyAccumulator* accumulator,
+ IndexedOrNamed type) {
+ Isolate* isolate = accumulator->isolate();
+ PropertyCallbackArguments enum_args(isolate, interceptor->data(), *receiver,
+ *object, Just(kDontThrow));
+
+ Handle<JSObject> result;
+ if (!interceptor->enumerator().IsUndefined(isolate)) {
+ if (type == kIndexed) {
+ result = enum_args.CallIndexedEnumerator(interceptor);
+ } else {
+ DCHECK_EQ(type, kNamed);
+ result = enum_args.CallNamedEnumerator(interceptor);
+ }
+ }
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ if (result.is_null()) return Just(true);
+
+ if ((accumulator->filter() & ONLY_ENUMERABLE) &&
+ !interceptor->query().IsUndefined(isolate)) {
+ FilterForEnumerableProperties(receiver, object, interceptor, accumulator,
+ result, type);
+ } else {
+ accumulator->AddKeys(
+ result, type == kIndexed ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT);
+ }
+ return Just(true);
+}
+
+Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ KeyAccumulator* accumulator,
+ IndexedOrNamed type) {
+ Isolate* isolate = accumulator->isolate();
+ if (type == kIndexed) {
+ if (!object->HasIndexedInterceptor()) return Just(true);
+ } else {
+ if (!object->HasNamedInterceptor()) return Just(true);
+ }
+ Handle<InterceptorInfo> interceptor(type == kIndexed
+ ? object->GetIndexedInterceptor()
+ : object->GetNamedInterceptor(),
+ isolate);
+ if ((accumulator->filter() & ONLY_ALL_CAN_READ) &&
+ !interceptor->all_can_read()) {
+ return Just(true);
+ }
+ return CollectInterceptorKeysInternal(receiver, object, interceptor,
+ accumulator, type);
+}
+
+} // namespace
+
+Maybe<bool> KeyAccumulator::CollectOwnElementIndices(
+ Handle<JSReceiver> receiver, Handle<JSObject> object) {
+ if (filter_ & SKIP_STRINGS || skip_indices_) return Just(true);
+
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->CollectElementIndices(object, this);
+
+ return CollectInterceptorKeys(receiver, object, this, kIndexed);
+}
+
+namespace {
+
+template <bool skip_symbols>
+int CollectOwnPropertyNamesInternal(Handle<JSObject> object,
+ KeyAccumulator* keys,
+ Handle<DescriptorArray> descs,
+ int start_index, int limit) {
+ int first_skipped = -1;
+ PropertyFilter filter = keys->filter();
+ KeyCollectionMode mode = keys->mode();
+ for (int i = start_index; i < limit; i++) {
+ bool is_shadowing_key = false;
+ PropertyDetails details = descs->GetDetails(i);
+
+ if ((details.attributes() & filter) != 0) {
+ if (mode == KeyCollectionMode::kIncludePrototypes) {
+ is_shadowing_key = true;
+ } else {
+ continue;
+ }
+ }
+
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object accessors = descs->GetStrongValue(i);
+ if (!accessors.IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors).all_can_read()) continue;
+ }
+
+ Name key = descs->GetKey(i);
+ if (skip_symbols == key.IsSymbol()) {
+ if (first_skipped == -1) first_skipped = i;
+ continue;
+ }
+ if (key.FilterKey(keys->filter())) continue;
+
+ if (is_shadowing_key) {
+ keys->AddShadowingKey(key);
+ } else {
+ keys->AddKey(key, DO_NOT_CONVERT);
+ }
+ }
+ return first_skipped;
+}
+
+template <class T>
+Handle<FixedArray> GetOwnEnumPropertyDictionaryKeys(Isolate* isolate,
+ KeyCollectionMode mode,
+ KeyAccumulator* accumulator,
+ Handle<JSObject> object,
+ T raw_dictionary) {
+ Handle<T> dictionary(raw_dictionary, isolate);
+ if (dictionary->NumberOfElements() == 0) {
+ return isolate->factory()->empty_fixed_array();
+ }
+ int length = dictionary->NumberOfEnumerableProperties();
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
+ T::CopyEnumKeysTo(isolate, dictionary, storage, mode, accumulator);
+ return storage;
+}
+} // namespace
+
+Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
+ Handle<JSObject> object) {
+ if (filter_ == ENUMERABLE_STRINGS) {
+ Handle<FixedArray> enum_keys;
+ if (object->HasFastProperties()) {
+ enum_keys = KeyAccumulator::GetOwnEnumPropertyKeys(isolate_, object);
+ // If the number of properties equals the length of enumerable properties
+ // we do not have to filter out non-enumerable ones
+ Map map = object->map();
+ int nof_descriptors = map.NumberOfOwnDescriptors();
+ if (enum_keys->length() != nof_descriptors) {
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(map.instance_descriptors(), isolate_);
+ for (int i = 0; i < nof_descriptors; i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ if (!details.IsDontEnum()) continue;
+ Object key = descs->GetKey(i);
+ this->AddShadowingKey(key);
+ }
+ }
+ } else if (object->IsJSGlobalObject()) {
+ enum_keys = GetOwnEnumPropertyDictionaryKeys(
+ isolate_, mode_, this, object,
+ JSGlobalObject::cast(*object).global_dictionary());
+ } else {
+ enum_keys = GetOwnEnumPropertyDictionaryKeys(
+ isolate_, mode_, this, object, object->property_dictionary());
+ }
+ if (object->IsJSModuleNamespace()) {
+ // Simulate [[GetOwnProperty]] for establishing enumerability, which
+ // throws for uninitialized exports.
+ for (int i = 0, n = enum_keys->length(); i < n; ++i) {
+ Handle<String> key(String::cast(enum_keys->get(i)), isolate_);
+ if (Handle<JSModuleNamespace>::cast(object)
+ ->GetExport(isolate(), key)
+ .is_null()) {
+ return Nothing<bool>();
+ }
+ }
+ }
+ AddKeys(enum_keys, DO_NOT_CONVERT);
+ } else {
+ if (object->HasFastProperties()) {
+ int limit = object->map().NumberOfOwnDescriptors();
+ Handle<DescriptorArray> descs(object->map().instance_descriptors(),
+ isolate_);
+ // First collect the strings,
+ int first_symbol =
+ CollectOwnPropertyNamesInternal<true>(object, this, descs, 0, limit);
+ // then the symbols.
+ if (first_symbol != -1) {
+ CollectOwnPropertyNamesInternal<false>(object, this, descs,
+ first_symbol, limit);
+ }
+ } else if (object->IsJSGlobalObject()) {
+ GlobalDictionary::CollectKeysTo(
+ handle(JSGlobalObject::cast(*object).global_dictionary(), isolate_),
+ this);
+ } else {
+ NameDictionary::CollectKeysTo(
+ handle(object->property_dictionary(), isolate_), this);
+ }
+ }
+ // Add the property keys from the interceptor.
+ return CollectInterceptorKeys(receiver, object, this, kNamed);
+}
+
+void KeyAccumulator::CollectPrivateNames(Handle<JSReceiver> receiver,
+ Handle<JSObject> object) {
+ if (object->HasFastProperties()) {
+ int limit = object->map().NumberOfOwnDescriptors();
+ Handle<DescriptorArray> descs(object->map().instance_descriptors(),
+ isolate_);
+ CollectOwnPropertyNamesInternal<false>(object, this, descs, 0, limit);
+ } else if (object->IsJSGlobalObject()) {
+ GlobalDictionary::CollectKeysTo(
+ handle(JSGlobalObject::cast(*object).global_dictionary(), isolate_),
+ this);
+ } else {
+ NameDictionary::CollectKeysTo(
+ handle(object->property_dictionary(), isolate_), this);
+ }
+}
+
+Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
+ Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
+ Handle<JSObject> object) {
+ if (!skip_indices_) {
+ MAYBE_RETURN((CollectInterceptorKeysInternal(
+ receiver, object,
+ handle(InterceptorInfo::cast(
+ access_check_info->indexed_interceptor()),
+ isolate_),
+ this, kIndexed)),
+ Nothing<bool>());
+ }
+ MAYBE_RETURN(
+ (CollectInterceptorKeysInternal(
+ receiver, object,
+ handle(InterceptorInfo::cast(access_check_info->named_interceptor()),
+ isolate_),
+ this, kNamed)),
+ Nothing<bool>());
+ return Just(true);
+}
+
+// Returns |true| on success, |false| if prototype walking should be stopped,
+// |nothing| if an exception was thrown.
+Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
+ Handle<JSObject> object) {
+ // Check access rights if required.
+ if (object->IsAccessCheckNeeded() &&
+ !isolate_->MayAccess(handle(isolate_->context(), isolate_), object)) {
+ // The cross-origin spec says that [[Enumerate]] shall return an empty
+ // iterator when it doesn't have access...
+ if (mode_ == KeyCollectionMode::kIncludePrototypes) {
+ return Just(false);
+ }
+ // ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
+ DCHECK_EQ(KeyCollectionMode::kOwnOnly, mode_);
+ Handle<AccessCheckInfo> access_check_info;
+ {
+ DisallowHeapAllocation no_gc;
+ AccessCheckInfo maybe_info = AccessCheckInfo::Get(isolate_, object);
+ if (!maybe_info.is_null()) {
+ access_check_info = handle(maybe_info, isolate_);
+ }
+ }
+ // We always have both kinds of interceptors or none.
+ if (!access_check_info.is_null() &&
+ access_check_info->named_interceptor() != Object()) {
+ MAYBE_RETURN(CollectAccessCheckInterceptorKeys(access_check_info,
+ receiver, object),
+ Nothing<bool>());
+ return Just(false);
+ }
+ filter_ = static_cast<PropertyFilter>(filter_ | ONLY_ALL_CAN_READ);
+ }
+ if (filter_ & PRIVATE_NAMES_ONLY) {
+ CollectPrivateNames(receiver, object);
+ return Just(true);
+ }
+
+ MAYBE_RETURN(CollectOwnElementIndices(receiver, object), Nothing<bool>());
+ MAYBE_RETURN(CollectOwnPropertyNames(receiver, object), Nothing<bool>());
+ return Just(true);
+}
+
+// static
+Handle<FixedArray> KeyAccumulator::GetOwnEnumPropertyKeys(
+ Isolate* isolate, Handle<JSObject> object) {
+ if (object->HasFastProperties()) {
+ return GetFastEnumPropertyKeys(isolate, object);
+ } else if (object->IsJSGlobalObject()) {
+ return GetOwnEnumPropertyDictionaryKeys(
+ isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
+ JSGlobalObject::cast(*object).global_dictionary());
+ } else {
+ return GetOwnEnumPropertyDictionaryKeys(
+ isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
+ object->property_dictionary());
+ }
+}
+
+namespace {
+
+class NameComparator {
+ public:
+ explicit NameComparator(Isolate* isolate) : isolate_(isolate) {}
+
+ bool operator()(uint32_t hash1, uint32_t hash2, const Handle<Name>& key1,
+ const Handle<Name>& key2) const {
+ return Name::Equals(isolate_, key1, key2);
+ }
+
+ private:
+ Isolate* isolate_;
+};
+
+} // namespace
+
+// ES6 #sec-proxy-object-internal-methods-and-internal-slots-ownpropertykeys
+// Returns |true| on success, |nothing| in case of exception.
+Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
+ Handle<JSProxy> proxy) {
+ STACK_CHECK(isolate_, Nothing<bool>());
+ if (filter_ == PRIVATE_NAMES_ONLY) {
+ NameDictionary::CollectKeysTo(
+ handle(proxy->property_dictionary(), isolate_), this);
+ return Just(true);
+ }
+
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate_);
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate_->Throw(*isolate_->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, isolate_->factory()->ownKeys_string()));
+ return Nothing<bool>();
+ }
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate_);
+ // 5. Let trap be ? GetMethod(handler, "ownKeys").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate_->factory()->ownKeys_string()),
+ Nothing<bool>());
+ // 6. If trap is undefined, then
+ if (trap->IsUndefined(isolate_)) {
+ // 6a. Return target.[[OwnPropertyKeys]]().
+ return CollectOwnJSProxyTargetKeys(proxy, target);
+ }
+ // 7. Let trapResultArray be Call(trap, handler, «target»).
+ Handle<Object> trap_result_array;
+ Handle<Object> args[] = {target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, trap_result_array,
+ Execution::Call(isolate_, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ // 8. Let trapResult be ? CreateListFromArrayLike(trapResultArray,
+ // «String, Symbol»).
+ Handle<FixedArray> trap_result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, trap_result,
+ Object::CreateListFromArrayLike(isolate_, trap_result_array,
+ ElementTypes::kStringAndSymbol),
+ Nothing<bool>());
+ // 9. If trapResult contains any duplicate entries, throw a TypeError
+ // exception. Combine with step 18
+ // 18. Let uncheckedResultKeys be a new List which is a copy of trapResult.
+ Zone set_zone(isolate_->allocator(), ZONE_NAME);
+ ZoneAllocationPolicy alloc(&set_zone);
+ const int kPresent = 1;
+ const int kGone = 0;
+ base::TemplateHashMapImpl<Handle<Name>, int, NameComparator,
+ ZoneAllocationPolicy>
+ unchecked_result_keys(ZoneHashMap::kDefaultHashMapCapacity,
+ NameComparator(isolate_), alloc);
+ int unchecked_result_keys_size = 0;
+ for (int i = 0; i < trap_result->length(); ++i) {
+ Handle<Name> key(Name::cast(trap_result->get(i)), isolate_);
+ auto entry = unchecked_result_keys.LookupOrInsert(key, key->Hash(), alloc);
+ if (entry->value != kPresent) {
+ entry->value = kPresent;
+ unchecked_result_keys_size++;
+ } else {
+ // found dupes, throw exception
+ isolate_->Throw(*isolate_->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysDuplicateEntries));
+ return Nothing<bool>();
+ }
+ }
+ // 10. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(maybe_extensible, Nothing<bool>());
+ bool extensible_target = maybe_extensible.FromJust();
+ // 11. Let targetKeys be ? target.[[OwnPropertyKeys]]().
+ Handle<FixedArray> target_keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, target_keys,
+ JSReceiver::OwnPropertyKeys(target),
+ Nothing<bool>());
+ // 12, 13. (Assert)
+ // 14. Let targetConfigurableKeys be an empty List.
+ // To save memory, we're re-using target_keys and will modify it in-place.
+ Handle<FixedArray> target_configurable_keys = target_keys;
+ // 15. Let targetNonconfigurableKeys be an empty List.
+ Handle<FixedArray> target_nonconfigurable_keys =
+ isolate_->factory()->NewFixedArray(target_keys->length());
+ int nonconfigurable_keys_length = 0;
+ // 16. Repeat, for each element key of targetKeys:
+ for (int i = 0; i < target_keys->length(); ++i) {
+ // 16a. Let desc be ? target.[[GetOwnProperty]](key).
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate_, target, handle(target_keys->get(i), isolate_), &desc);
+ MAYBE_RETURN(found, Nothing<bool>());
+ // 16b. If desc is not undefined and desc.[[Configurable]] is false, then
+ if (found.FromJust() && !desc.configurable()) {
+ // 16b i. Append key as an element of targetNonconfigurableKeys.
+ target_nonconfigurable_keys->set(nonconfigurable_keys_length,
+ target_keys->get(i));
+ nonconfigurable_keys_length++;
+ // The key was moved, null it out in the original list.
+ target_keys->set(i, Smi::kZero);
+ } else {
+ // 16c. Else,
+ // 16c i. Append key as an element of targetConfigurableKeys.
+ // (No-op, just keep it in |target_keys|.)
+ }
+ }
+ // 17. If extensibleTarget is true and targetNonconfigurableKeys is empty,
+ // then:
+ if (extensible_target && nonconfigurable_keys_length == 0) {
+ // 17a. Return trapResult.
+ return AddKeysFromJSProxy(proxy, trap_result);
+ }
+ // 18. (Done in step 9)
+ // 19. Repeat, for each key that is an element of targetNonconfigurableKeys:
+ for (int i = 0; i < nonconfigurable_keys_length; ++i) {
+ Object raw_key = target_nonconfigurable_keys->get(i);
+ Handle<Name> key(Name::cast(raw_key), isolate_);
+ // 19a. If key is not an element of uncheckedResultKeys, throw a
+ // TypeError exception.
+ auto found = unchecked_result_keys.Lookup(key, key->Hash());
+ if (found == nullptr || found->value == kGone) {
+ isolate_->Throw(*isolate_->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysMissing, key));
+ return Nothing<bool>();
+ }
+ // 19b. Remove key from uncheckedResultKeys.
+ found->value = kGone;
+ unchecked_result_keys_size--;
+ }
+ // 20. If extensibleTarget is true, return trapResult.
+ if (extensible_target) {
+ return AddKeysFromJSProxy(proxy, trap_result);
+ }
+ // 21. Repeat, for each key that is an element of targetConfigurableKeys:
+ for (int i = 0; i < target_configurable_keys->length(); ++i) {
+ Object raw_key = target_configurable_keys->get(i);
+ if (raw_key.IsSmi()) continue; // Zapped entry, was nonconfigurable.
+ Handle<Name> key(Name::cast(raw_key), isolate_);
+ // 21a. If key is not an element of uncheckedResultKeys, throw a
+ // TypeError exception.
+ auto found = unchecked_result_keys.Lookup(key, key->Hash());
+ if (found == nullptr || found->value == kGone) {
+ isolate_->Throw(*isolate_->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysMissing, key));
+ return Nothing<bool>();
+ }
+ // 21b. Remove key from uncheckedResultKeys.
+ found->value = kGone;
+ unchecked_result_keys_size--;
+ }
+ // 22. If uncheckedResultKeys is not empty, throw a TypeError exception.
+ if (unchecked_result_keys_size != 0) {
+ DCHECK_GT(unchecked_result_keys_size, 0);
+ isolate_->Throw(*isolate_->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysNonExtensible));
+ return Nothing<bool>();
+ }
+ // 23. Return trapResult.
+ return AddKeysFromJSProxy(proxy, trap_result);
+}
+
+Maybe<bool> KeyAccumulator::CollectOwnJSProxyTargetKeys(
+ Handle<JSProxy> proxy, Handle<JSReceiver> target) {
+ // TODO(cbruni): avoid creating another KeyAccumulator
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, keys,
+ KeyAccumulator::GetKeys(
+ target, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+ GetKeysConversion::kConvertToString, is_for_in_, skip_indices_),
+ Nothing<bool>());
+ Maybe<bool> result = AddKeysFromJSProxy(proxy, keys);
+ return result;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/keys.h b/deps/v8/src/objects/keys.h
new file mode 100644
index 0000000000..69f61a886e
--- /dev/null
+++ b/deps/v8/src/objects/keys.h
@@ -0,0 +1,175 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_KEYS_H_
+#define V8_OBJECTS_KEYS_H_
+
+#include "src/objects/hash-table.h"
+#include "src/objects/js-objects.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class JSProxy;
+
+enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX };
+
+// This is a helper class for JSReceiver::GetKeys which collects and sorts keys.
+// GetKeys needs to sort keys per prototype level, first showing the integer
+// indices from elements then the strings from the properties. However, this
+// does not apply to proxies which are in full control of how the keys are
+// sorted.
+//
+// For performance reasons the KeyAccumulator internally separates integer keys
+// in |elements_| into sorted lists per prototype level. String keys are
+// collected in |string_properties_|, a single OrderedHashSet (similar for
+// Symbols in |symbol_properties_|. To separate the keys per level later when
+// assembling the final list, |levelLengths_| keeps track of the number of
+// String and Symbol keys per level.
+//
+// Only unique keys are kept by the KeyAccumulator, strings are stored in a
+// HashSet for inexpensive lookups. Integer keys are kept in sorted lists which
+// are more compact and allow for reasonably fast includes check.
+class KeyAccumulator final {
+ public:
+ KeyAccumulator(Isolate* isolate, KeyCollectionMode mode,
+ PropertyFilter filter)
+ : isolate_(isolate), mode_(mode), filter_(filter) {}
+ ~KeyAccumulator() = default;
+
+ static MaybeHandle<FixedArray> GetKeys(
+ Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
+ GetKeysConversion keys_conversion = GetKeysConversion::kKeepNumbers,
+ bool is_for_in = false, bool skip_indices = false);
+
+ Handle<FixedArray> GetKeys(
+ GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
+ Maybe<bool> CollectKeys(Handle<JSReceiver> receiver,
+ Handle<JSReceiver> object);
+ Maybe<bool> CollectOwnElementIndices(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+ Maybe<bool> CollectOwnPropertyNames(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+ void CollectPrivateNames(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+ Maybe<bool> CollectAccessCheckInterceptorKeys(
+ Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+
+ // Might return directly the object's enum_cache, copy the result before using
+ // as an elements backing store for a JSObject.
+ // Does not throw for uninitialized exports in module namespace objects, so
+ // this has to be checked separately.
+ static Handle<FixedArray> GetOwnEnumPropertyKeys(Isolate* isolate,
+ Handle<JSObject> object);
+
+ void AddKey(Object key, AddKeyConversion convert = DO_NOT_CONVERT);
+ void AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
+ void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
+ void AddKeys(Handle<JSObject> array_like, AddKeyConversion convert);
+
+ // Jump to the next level, pushing the current |levelLength_| to
+ // |levelLengths_| and adding a new list to |elements_|.
+ Isolate* isolate() { return isolate_; }
+ // Filter keys based on their property descriptors.
+ PropertyFilter filter() { return filter_; }
+ // The collection mode defines whether we collect the keys from the prototype
+ // chain or only look at the receiver.
+ KeyCollectionMode mode() { return mode_; }
+ // In case of for-in loops we have to treat JSProxy keys differently and
+ // deduplicate them. Additionally we convert JSProxy keys back to array
+ // indices.
+ void set_is_for_in(bool value) { is_for_in_ = value; }
+ void set_skip_indices(bool value) { skip_indices_ = value; }
+ // The last_non_empty_prototype is used to limit the prototypes for which
+ // we have to keep track of non-enumerable keys that can shadow keys
+ // repeated on the prototype chain.
+ void set_last_non_empty_prototype(Handle<JSReceiver> object) {
+ last_non_empty_prototype_ = object;
+ }
+ // Shadowing keys are used to filter keys. This happens when non-enumerable
+ // keys appear again on the prototype chain.
+ void AddShadowingKey(Object key);
+ void AddShadowingKey(Handle<Object> key);
+
+ private:
+ Maybe<bool> CollectOwnKeys(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+ Maybe<bool> CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
+ Handle<JSProxy> proxy);
+ Maybe<bool> CollectOwnJSProxyTargetKeys(Handle<JSProxy> proxy,
+ Handle<JSReceiver> target);
+ Maybe<bool> AddKeysFromJSProxy(Handle<JSProxy> proxy,
+ Handle<FixedArray> keys);
+ bool IsShadowed(Handle<Object> key);
+ bool HasShadowingKeys();
+ Handle<OrderedHashSet> keys();
+
+ Isolate* isolate_;
+ // keys_ is either an Handle<OrderedHashSet> or in the case of own JSProxy
+ // keys a Handle<FixedArray>. The OrderedHashSet is in-place converted to the
+ // result list, a FixedArray containing all collected keys.
+ Handle<FixedArray> keys_;
+ Handle<JSReceiver> last_non_empty_prototype_;
+ Handle<ObjectHashSet> shadowing_keys_;
+ KeyCollectionMode mode_;
+ PropertyFilter filter_;
+ bool is_for_in_ = false;
+ bool skip_indices_ = false;
+ // For all the keys on the first receiver adding a shadowing key we can skip
+ // the shadow check.
+ bool skip_shadow_check_ = true;
+
+ DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
+};
+
+// The FastKeyAccumulator handles the cases where there are no elements on the
+// prototype chain and forwords the complex/slow cases to the normal
+// KeyAccumulator. This significantly speeds up the cases where the OWN_ONLY
+// case where we do not have to walk the prototype chain.
+class FastKeyAccumulator {
+ public:
+ FastKeyAccumulator(Isolate* isolate, Handle<JSReceiver> receiver,
+ KeyCollectionMode mode, PropertyFilter filter,
+ bool is_for_in = false, bool skip_indices = false)
+ : isolate_(isolate),
+ receiver_(receiver),
+ mode_(mode),
+ filter_(filter),
+ is_for_in_(is_for_in),
+ skip_indices_(skip_indices) {
+ Prepare();
+ }
+
+ bool is_receiver_simple_enum() { return is_receiver_simple_enum_; }
+ bool has_empty_prototype() { return has_empty_prototype_; }
+
+ MaybeHandle<FixedArray> GetKeys(
+ GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
+
+ private:
+ void Prepare();
+ MaybeHandle<FixedArray> GetKeysFast(GetKeysConversion convert);
+ MaybeHandle<FixedArray> GetKeysSlow(GetKeysConversion convert);
+
+ MaybeHandle<FixedArray> GetOwnKeysWithUninitializedEnumCache();
+
+ Isolate* isolate_;
+ Handle<JSReceiver> receiver_;
+ Handle<JSReceiver> last_non_empty_prototype_;
+ KeyCollectionMode mode_;
+ PropertyFilter filter_;
+ bool is_for_in_ = false;
+ bool skip_indices_ = false;
+ bool is_receiver_simple_enum_ = false;
+ bool has_empty_prototype_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(FastKeyAccumulator);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_KEYS_H_
diff --git a/deps/v8/src/objects/layout-descriptor-inl.h b/deps/v8/src/objects/layout-descriptor-inl.h
new file mode 100644
index 0000000000..49683da267
--- /dev/null
+++ b/deps/v8/src/objects/layout-descriptor-inl.h
@@ -0,0 +1,253 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_LAYOUT_DESCRIPTOR_INL_H_
+#define V8_OBJECTS_LAYOUT_DESCRIPTOR_INL_H_
+
+#include "src/objects/layout-descriptor.h"
+
+#include "src/handles/handles-inl.h"
+#include "src/objects/descriptor-array-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/smi.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+LayoutDescriptor::LayoutDescriptor(Address ptr)
+ : ByteArray(ptr, AllowInlineSmiStorage::kAllowBeingASmi) {
+ SLOW_DCHECK(IsLayoutDescriptor());
+}
+CAST_ACCESSOR(LayoutDescriptor)
+
+LayoutDescriptor LayoutDescriptor::FromSmi(Smi smi) {
+ return LayoutDescriptor::cast(smi);
+}
+
+Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
+ if (length <= kBitsInSmiLayout) {
+ // The whole bit vector fits into a smi.
+ return handle(LayoutDescriptor::FromSmi(Smi::zero()), isolate);
+ }
+ int backing_store_length = GetSlowModeBackingStoreLength(length);
+ Handle<LayoutDescriptor> result =
+ Handle<LayoutDescriptor>::cast(isolate->factory()->NewByteArray(
+ backing_store_length, AllocationType::kOld));
+ memset(reinterpret_cast<void*>(result->GetDataStartAddress()), 0,
+ result->DataSize());
+ return result;
+}
+
+bool LayoutDescriptor::InobjectUnboxedField(int inobject_properties,
+ PropertyDetails details) {
+ if (details.location() != kField || !details.representation().IsDouble()) {
+ return false;
+ }
+ // We care only about in-object properties.
+ return details.field_index() < inobject_properties;
+}
+
+LayoutDescriptor LayoutDescriptor::FastPointerLayout() {
+ return LayoutDescriptor::FromSmi(Smi::zero());
+}
+
+bool LayoutDescriptor::GetIndexes(int field_index, int* layout_word_index,
+ int* layout_bit_index) {
+ if (static_cast<unsigned>(field_index) >= static_cast<unsigned>(capacity())) {
+ return false;
+ }
+
+ *layout_word_index = field_index / kBitsPerLayoutWord;
+ CHECK((!IsSmi() && (*layout_word_index < length())) ||
+ (IsSmi() && (*layout_word_index < 1)));
+
+ *layout_bit_index = field_index % kBitsPerLayoutWord;
+ return true;
+}
+
+LayoutDescriptor LayoutDescriptor::SetRawData(int field_index) {
+ return SetTagged(field_index, false);
+}
+
+LayoutDescriptor LayoutDescriptor::SetTagged(int field_index, bool tagged) {
+ int layout_word_index = 0;
+ int layout_bit_index = 0;
+
+ CHECK(GetIndexes(field_index, &layout_word_index, &layout_bit_index));
+ uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
+
+ if (IsSlowLayout()) {
+ uint32_t value = get_layout_word(layout_word_index);
+ if (tagged) {
+ value &= ~layout_mask;
+ } else {
+ value |= layout_mask;
+ }
+ set_layout_word(layout_word_index, value);
+ return *this;
+ } else {
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(*this));
+ if (tagged) {
+ value &= ~layout_mask;
+ } else {
+ value |= layout_mask;
+ }
+ return LayoutDescriptor::FromSmi(Smi::FromInt(static_cast<int>(value)));
+ }
+}
+
+bool LayoutDescriptor::IsTagged(int field_index) {
+ if (IsFastPointerLayout()) return true;
+
+ int layout_word_index;
+ int layout_bit_index;
+
+ if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
+ // All bits after Out of bounds queries
+ return true;
+ }
+ uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
+
+ if (IsSlowLayout()) {
+ uint32_t value = get_layout_word(layout_word_index);
+ return (value & layout_mask) == 0;
+ } else {
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(*this));
+ return (value & layout_mask) == 0;
+ }
+}
+
+bool LayoutDescriptor::IsFastPointerLayout() {
+ return *this == FastPointerLayout();
+}
+
+bool LayoutDescriptor::IsFastPointerLayout(Object layout_descriptor) {
+ return layout_descriptor == FastPointerLayout();
+}
+
+bool LayoutDescriptor::IsSlowLayout() { return !IsSmi(); }
+
+int LayoutDescriptor::capacity() {
+ return IsSlowLayout() ? (length() * kBitsPerByte) : kBitsInSmiLayout;
+}
+
+LayoutDescriptor LayoutDescriptor::cast_gc_safe(Object object) {
+ // The map word of the object can be a forwarding pointer during
+ // object evacuation phase of GC. Since the layout descriptor methods
+ // for checking whether a field is tagged or not do not depend on the
+ // object map, it should be safe.
+ return LayoutDescriptor::unchecked_cast(object);
+}
+
+int LayoutDescriptor::GetSlowModeBackingStoreLength(int length) {
+ DCHECK_LT(0, length);
+ // We allocate kTaggedSize rounded blocks of memory anyway so we increase
+ // the length of allocated array to utilize that "lost" space which could
+ // also help to avoid layout descriptor reallocations.
+ return RoundUp(length, kBitsPerByte * kTaggedSize) / kBitsPerByte;
+}
+
+int LayoutDescriptor::CalculateCapacity(Map map, DescriptorArray descriptors,
+ int num_descriptors) {
+ int inobject_properties = map.GetInObjectProperties();
+ if (inobject_properties == 0) return 0;
+
+ DCHECK_LE(num_descriptors, descriptors.number_of_descriptors());
+
+ int layout_descriptor_length;
+ const int kMaxWordsPerField = kDoubleSize / kTaggedSize;
+
+ if (num_descriptors <= kBitsInSmiLayout / kMaxWordsPerField) {
+ // Even in the "worst" case (all fields are doubles) it would fit into
+ // a Smi, so no need to calculate length.
+ layout_descriptor_length = kBitsInSmiLayout;
+
+ } else {
+ layout_descriptor_length = 0;
+
+ for (int i = 0; i < num_descriptors; i++) {
+ PropertyDetails details = descriptors.GetDetails(i);
+ if (!InobjectUnboxedField(inobject_properties, details)) continue;
+ int field_index = details.field_index();
+ int field_width_in_words = details.field_width_in_words();
+ layout_descriptor_length =
+ Max(layout_descriptor_length, field_index + field_width_in_words);
+ }
+ }
+ layout_descriptor_length = Min(layout_descriptor_length, inobject_properties);
+ return layout_descriptor_length;
+}
+
+LayoutDescriptor LayoutDescriptor::Initialize(
+ LayoutDescriptor layout_descriptor, Map map, DescriptorArray descriptors,
+ int num_descriptors) {
+ DisallowHeapAllocation no_allocation;
+ int inobject_properties = map.GetInObjectProperties();
+
+ for (int i = 0; i < num_descriptors; i++) {
+ PropertyDetails details = descriptors.GetDetails(i);
+ if (!InobjectUnboxedField(inobject_properties, details)) {
+ DCHECK(details.location() != kField ||
+ layout_descriptor.IsTagged(details.field_index()));
+ continue;
+ }
+ int field_index = details.field_index();
+ layout_descriptor = layout_descriptor.SetRawData(field_index);
+ if (details.field_width_in_words() > 1) {
+ layout_descriptor = layout_descriptor.SetRawData(field_index + 1);
+ }
+ }
+ return layout_descriptor;
+}
+
+int LayoutDescriptor::number_of_layout_words() {
+ return length() / kUInt32Size;
+}
+
+uint32_t LayoutDescriptor::get_layout_word(int index) const {
+ return get_uint32(index);
+}
+
+void LayoutDescriptor::set_layout_word(int index, uint32_t value) {
+ set_uint32(index, value);
+}
+
+// LayoutDescriptorHelper is a helper class for querying whether inobject
+// property at offset is Double or not.
+LayoutDescriptorHelper::LayoutDescriptorHelper(Map map)
+ : all_fields_tagged_(true),
+ header_size_(0),
+ layout_descriptor_(LayoutDescriptor::FastPointerLayout()) {
+ if (!FLAG_unbox_double_fields) return;
+
+ layout_descriptor_ = map.layout_descriptor_gc_safe();
+ if (layout_descriptor_.IsFastPointerLayout()) {
+ return;
+ }
+
+ header_size_ = map.GetInObjectPropertiesStartInWords() * kTaggedSize;
+ DCHECK_GE(header_size_, 0);
+
+ all_fields_tagged_ = false;
+}
+
+bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
+ DCHECK(IsAligned(offset_in_bytes, kTaggedSize));
+ if (all_fields_tagged_) return true;
+ // Object headers do not contain non-tagged fields.
+ if (offset_in_bytes < header_size_) return true;
+ int field_index = (offset_in_bytes - header_size_) / kTaggedSize;
+
+ return layout_descriptor_.IsTagged(field_index);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_LAYOUT_DESCRIPTOR_INL_H_
diff --git a/deps/v8/src/objects/layout-descriptor.cc b/deps/v8/src/objects/layout-descriptor.cc
new file mode 100644
index 0000000000..76421aaf4f
--- /dev/null
+++ b/deps/v8/src/objects/layout-descriptor.cc
@@ -0,0 +1,288 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/layout-descriptor.h"
+
+#include <sstream>
+
+#include "src/base/bits.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<LayoutDescriptor> LayoutDescriptor::New(
+ Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
+ int num_descriptors) {
+ if (!FLAG_unbox_double_fields) return handle(FastPointerLayout(), isolate);
+
+ int layout_descriptor_length =
+ CalculateCapacity(*map, *descriptors, num_descriptors);
+
+ if (layout_descriptor_length == 0) {
+ // No double fields were found, use fast pointer layout.
+ return handle(FastPointerLayout(), isolate);
+ }
+
+ // Initially, layout descriptor corresponds to an object with all fields
+ // tagged.
+ Handle<LayoutDescriptor> layout_descriptor_handle =
+ LayoutDescriptor::New(isolate, layout_descriptor_length);
+
+ LayoutDescriptor layout_descriptor = Initialize(
+ *layout_descriptor_handle, *map, *descriptors, num_descriptors);
+
+ return handle(layout_descriptor, isolate);
+}
+
+Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
+ Isolate* isolate, Handle<Map> map, PropertyDetails details) {
+ DCHECK(map->owns_descriptors());
+ Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
+ isolate);
+
+ if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
+ DCHECK(details.location() != kField ||
+ layout_descriptor->IsTagged(details.field_index()));
+ return layout_descriptor;
+ }
+ int field_index = details.field_index();
+ layout_descriptor = LayoutDescriptor::EnsureCapacity(
+ isolate, layout_descriptor, field_index + details.field_width_in_words());
+
+ DisallowHeapAllocation no_allocation;
+ LayoutDescriptor layout_desc = *layout_descriptor;
+ layout_desc = layout_desc.SetRawData(field_index);
+ if (details.field_width_in_words() > 1) {
+ layout_desc = layout_desc.SetRawData(field_index + 1);
+ }
+ return handle(layout_desc, isolate);
+}
+
+Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
+ Isolate* isolate, Handle<Map> map, PropertyDetails details,
+ Handle<LayoutDescriptor> full_layout_descriptor) {
+ DisallowHeapAllocation no_allocation;
+ LayoutDescriptor layout_descriptor = map->layout_descriptor();
+ if (layout_descriptor.IsSlowLayout()) {
+ return full_layout_descriptor;
+ }
+ if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
+ DCHECK(details.location() != kField ||
+ layout_descriptor.IsTagged(details.field_index()));
+ return handle(layout_descriptor, isolate);
+ }
+ int field_index = details.field_index();
+ int new_capacity = field_index + details.field_width_in_words();
+ if (new_capacity > layout_descriptor.capacity()) {
+ // Current map's layout descriptor runs out of space, so use the full
+ // layout descriptor.
+ return full_layout_descriptor;
+ }
+
+ layout_descriptor = layout_descriptor.SetRawData(field_index);
+ if (details.field_width_in_words() > 1) {
+ layout_descriptor = layout_descriptor.SetRawData(field_index + 1);
+ }
+ return handle(layout_descriptor, isolate);
+}
+
+Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
+ Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
+ int new_capacity) {
+ int old_capacity = layout_descriptor->capacity();
+ if (new_capacity <= old_capacity) {
+ return layout_descriptor;
+ }
+ Handle<LayoutDescriptor> new_layout_descriptor =
+ LayoutDescriptor::New(isolate, new_capacity);
+ DCHECK(new_layout_descriptor->IsSlowLayout());
+
+ if (layout_descriptor->IsSlowLayout()) {
+ memcpy(new_layout_descriptor->GetDataStartAddress(),
+ layout_descriptor->GetDataStartAddress(),
+ layout_descriptor->DataSize());
+ return new_layout_descriptor;
+ } else {
+ // Fast layout.
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(*layout_descriptor));
+ new_layout_descriptor->set_layout_word(0, value);
+ return new_layout_descriptor;
+ }
+}
+
+bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
+ int* out_sequence_length) {
+ DCHECK_GT(max_sequence_length, 0);
+ if (IsFastPointerLayout()) {
+ *out_sequence_length = max_sequence_length;
+ return true;
+ }
+
+ int layout_word_index;
+ int layout_bit_index;
+
+ if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
+ // Out of bounds queries are considered tagged.
+ *out_sequence_length = max_sequence_length;
+ return true;
+ }
+ uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
+
+ uint32_t value = IsSlowLayout() ? get_layout_word(layout_word_index)
+ : static_cast<uint32_t>(Smi::ToInt(*this));
+
+ bool is_tagged = (value & layout_mask) == 0;
+ if (!is_tagged) value = ~value; // Count set bits instead of cleared bits.
+ value = value & ~(layout_mask - 1); // Clear bits we are not interested in.
+ int sequence_length;
+ if (IsSlowLayout()) {
+ sequence_length = base::bits::CountTrailingZeros(value) - layout_bit_index;
+
+ if (layout_bit_index + sequence_length == kBitsPerLayoutWord) {
+ // This is a contiguous sequence till the end of current word, proceed
+ // counting in the subsequent words.
+ ++layout_word_index;
+ int num_words = number_of_layout_words();
+ for (; layout_word_index < num_words; layout_word_index++) {
+ value = get_layout_word(layout_word_index);
+ bool cur_is_tagged = (value & 1) == 0;
+ if (cur_is_tagged != is_tagged) break;
+ if (!is_tagged) value = ~value; // Count set bits instead.
+ int cur_sequence_length = base::bits::CountTrailingZeros(value);
+ sequence_length += cur_sequence_length;
+ if (sequence_length >= max_sequence_length) break;
+ if (cur_sequence_length != kBitsPerLayoutWord) break;
+ }
+ if (is_tagged && (field_index + sequence_length == capacity())) {
+ // The contiguous sequence of tagged fields lasts till the end of the
+ // layout descriptor which means that all the fields starting from
+ // field_index are tagged.
+ sequence_length = std::numeric_limits<int>::max();
+ }
+ }
+ } else { // Fast layout.
+ sequence_length = Min(base::bits::CountTrailingZeros(value),
+ static_cast<unsigned>(kBitsInSmiLayout)) -
+ layout_bit_index;
+ if (is_tagged && (field_index + sequence_length == capacity())) {
+ // The contiguous sequence of tagged fields lasts till the end of the
+ // layout descriptor which means that all the fields starting from
+ // field_index are tagged.
+ sequence_length = std::numeric_limits<int>::max();
+ }
+ }
+ *out_sequence_length = Min(sequence_length, max_sequence_length);
+ return is_tagged;
+}
+
+Handle<LayoutDescriptor> LayoutDescriptor::NewForTesting(Isolate* isolate,
+ int length) {
+ return New(isolate, length);
+}
+
+LayoutDescriptor LayoutDescriptor::SetTaggedForTesting(int field_index,
+ bool tagged) {
+ return SetTagged(field_index, tagged);
+}
+
+bool LayoutDescriptorHelper::IsTagged(
+ int offset_in_bytes, int end_offset,
+ int* out_end_of_contiguous_region_offset) {
+ DCHECK(IsAligned(offset_in_bytes, kTaggedSize));
+ DCHECK(IsAligned(end_offset, kTaggedSize));
+ DCHECK(offset_in_bytes < end_offset);
+ if (all_fields_tagged_) {
+ *out_end_of_contiguous_region_offset = end_offset;
+ DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
+ return true;
+ }
+ int max_sequence_length = (end_offset - offset_in_bytes) / kTaggedSize;
+ int field_index = Max(0, (offset_in_bytes - header_size_) / kTaggedSize);
+ int sequence_length;
+ bool tagged = layout_descriptor_.IsTagged(field_index, max_sequence_length,
+ &sequence_length);
+ DCHECK_GT(sequence_length, 0);
+ if (offset_in_bytes < header_size_) {
+ // Object headers do not contain non-tagged fields. Check if the contiguous
+ // region continues after the header.
+ if (tagged) {
+ // First field is tagged, calculate end offset from there.
+ *out_end_of_contiguous_region_offset =
+ header_size_ + sequence_length * kTaggedSize;
+
+ } else {
+ *out_end_of_contiguous_region_offset = header_size_;
+ }
+ DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
+ return true;
+ }
+ *out_end_of_contiguous_region_offset =
+ offset_in_bytes + sequence_length * kTaggedSize;
+ DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
+ return tagged;
+}
+
+LayoutDescriptor LayoutDescriptor::Trim(Heap* heap, Map map,
+ DescriptorArray descriptors,
+ int num_descriptors) {
+ DisallowHeapAllocation no_allocation;
+ // Fast mode descriptors are never shared and therefore always fully
+ // correspond to their map.
+ if (!IsSlowLayout()) return *this;
+
+ int layout_descriptor_length =
+ CalculateCapacity(map, descriptors, num_descriptors);
+ // It must not become fast-mode descriptor here, because otherwise it has to
+ // be fast pointer layout descriptor already but it's is slow mode now.
+ DCHECK_LT(kBitsInSmiLayout, layout_descriptor_length);
+
+ // Trim, clean and reinitialize this slow-mode layout descriptor.
+ int new_backing_store_length =
+ GetSlowModeBackingStoreLength(layout_descriptor_length);
+ int backing_store_length = length();
+ if (new_backing_store_length != backing_store_length) {
+ DCHECK_LT(new_backing_store_length, backing_store_length);
+ int delta = backing_store_length - new_backing_store_length;
+ heap->RightTrimFixedArray(*this, delta);
+ }
+ memset(GetDataStartAddress(), 0, DataSize());
+ LayoutDescriptor layout_descriptor =
+ Initialize(*this, map, descriptors, num_descriptors);
+ DCHECK_EQ(*this, layout_descriptor);
+ return layout_descriptor;
+}
+
+bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) {
+ if (FLAG_unbox_double_fields) {
+ DescriptorArray descriptors = map.instance_descriptors();
+ int nof_descriptors = map.NumberOfOwnDescriptors();
+ int last_field_index = 0;
+ for (int i = 0; i < nof_descriptors; i++) {
+ PropertyDetails details = descriptors.GetDetails(i);
+ if (details.location() != kField) continue;
+ FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
+ bool tagged_expected =
+ !field_index.is_inobject() || !details.representation().IsDouble();
+ for (int bit = 0; bit < details.field_width_in_words(); bit++) {
+ bool tagged_actual = IsTagged(details.field_index() + bit);
+ DCHECK_EQ(tagged_expected, tagged_actual);
+ if (tagged_actual != tagged_expected) return false;
+ }
+ last_field_index =
+ Max(last_field_index,
+ details.field_index() + details.field_width_in_words());
+ }
+ if (check_tail) {
+ int n = capacity();
+ for (int i = last_field_index; i < n; i++) {
+ DCHECK(IsTagged(i));
+ }
+ }
+ }
+ return true;
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/layout-descriptor.h b/deps/v8/src/objects/layout-descriptor.h
new file mode 100644
index 0000000000..2311594ff6
--- /dev/null
+++ b/deps/v8/src/objects/layout-descriptor.h
@@ -0,0 +1,175 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_LAYOUT_DESCRIPTOR_H_
+#define V8_OBJECTS_LAYOUT_DESCRIPTOR_H_
+
+#include <iosfwd>
+
+#include "src/objects/fixed-array.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// LayoutDescriptor is a bit vector defining which fields contain non-tagged
+// values. It could either be a fixed typed array (slow form) or a Smi
+// if the length fits (fast form).
+// Each bit in the layout represents a FIELD. The bits are referenced by
+// field_index which is a field number. If the bit is set then the corresponding
+// field contains a non-tagged value and therefore must be skipped by GC.
+// Otherwise the field is considered tagged. If the queried bit lays "outside"
+// of the descriptor then the field is also considered tagged.
+// Once a layout descriptor is created it is allowed only to append properties
+// to it. GC uses layout descriptors to iterate objects. Avoid heap pointers
+// in a layout descriptor because they can lead to data races in GC when
+// GC moves objects in parallel.
+class V8_EXPORT_PRIVATE LayoutDescriptor : public ByteArray {
+ public:
+ V8_INLINE bool IsTagged(int field_index);
+
+ // Queries the contiguous region of fields that are either tagged or not.
+ // Returns true if the given field is tagged or false otherwise and writes
+ // the length of the contiguous region to |out_sequence_length|.
+ // If the sequence is longer than |max_sequence_length| then
+ // |out_sequence_length| is set to |max_sequence_length|.
+ bool IsTagged(int field_index, int max_sequence_length,
+ int* out_sequence_length);
+
+ // Returns true if this is a layout of the object having only tagged fields.
+ V8_INLINE bool IsFastPointerLayout();
+ V8_INLINE static bool IsFastPointerLayout(Object layout_descriptor);
+
+ // Returns true if the layout descriptor is in non-Smi form.
+ V8_INLINE bool IsSlowLayout();
+
+ DECL_CAST(LayoutDescriptor)
+
+ V8_INLINE static LayoutDescriptor cast_gc_safe(Object object);
+
+ // Builds layout descriptor optimized for given |map| by |num_descriptors|
+ // elements of given descriptors array. The |map|'s descriptors could be
+ // different.
+ static Handle<LayoutDescriptor> New(Isolate* isolate, Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ int num_descriptors);
+
+ // Modifies |map|'s layout descriptor or creates a new one if necessary by
+ // appending property with |details| to it.
+ static Handle<LayoutDescriptor> ShareAppend(Isolate* isolate, Handle<Map> map,
+ PropertyDetails details);
+
+ // Creates new layout descriptor by appending property with |details| to
+ // |map|'s layout descriptor and if it is still fast then returns it.
+ // Otherwise the |full_layout_descriptor| is returned.
+ static Handle<LayoutDescriptor> AppendIfFastOrUseFull(
+ Isolate* isolate, Handle<Map> map, PropertyDetails details,
+ Handle<LayoutDescriptor> full_layout_descriptor);
+
+ // Layout descriptor that corresponds to an object all fields of which are
+ // tagged (FastPointerLayout).
+ V8_INLINE static LayoutDescriptor FastPointerLayout();
+
+ // Check that this layout descriptor corresponds to given map.
+ bool IsConsistentWithMap(Map map, bool check_tail = false);
+
+ // Trims this layout descriptor to given number of descriptors. This happens
+ // only when corresponding descriptors array is trimmed.
+ // The layout descriptor could be trimmed if it was slow or it could
+ // become fast.
+ LayoutDescriptor Trim(Heap* heap, Map map, DescriptorArray descriptors,
+ int num_descriptors);
+
+#ifdef OBJECT_PRINT
+ // For our gdb macros, we should perhaps change these in the future.
+ void Print();
+
+ void ShortPrint(std::ostream& os);
+ void Print(std::ostream& os); // NOLINT
+#endif
+
+ // Capacity of layout descriptors in bits.
+ V8_INLINE int capacity();
+
+ static Handle<LayoutDescriptor> NewForTesting(Isolate* isolate, int length);
+ LayoutDescriptor SetTaggedForTesting(int field_index, bool tagged);
+
+ private:
+ // Exclude sign-bit to simplify encoding.
+ static constexpr int kBitsInSmiLayout =
+ SmiValuesAre32Bits() ? 32 : kSmiValueSize - 1;
+
+ static const int kBitsPerLayoutWord = 32;
+
+ V8_INLINE int number_of_layout_words();
+ V8_INLINE uint32_t get_layout_word(int index) const;
+ V8_INLINE void set_layout_word(int index, uint32_t value);
+
+ V8_INLINE static Handle<LayoutDescriptor> New(Isolate* isolate, int length);
+ V8_INLINE static LayoutDescriptor FromSmi(Smi smi);
+
+ V8_INLINE static bool InobjectUnboxedField(int inobject_properties,
+ PropertyDetails details);
+
+ // Calculates minimal layout descriptor capacity required for given
+ // |map|, |descriptors| and |num_descriptors|.
+ V8_INLINE static int CalculateCapacity(Map map, DescriptorArray descriptors,
+ int num_descriptors);
+
+ // Calculates the length of the slow-mode backing store array by given layout
+ // descriptor length.
+ V8_INLINE static int GetSlowModeBackingStoreLength(int length);
+
+ // Fills in clean |layout_descriptor| according to given |map|, |descriptors|
+ // and |num_descriptors|.
+ V8_INLINE static LayoutDescriptor Initialize(
+ LayoutDescriptor layout_descriptor, Map map, DescriptorArray descriptors,
+ int num_descriptors);
+
+ static Handle<LayoutDescriptor> EnsureCapacity(
+ Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
+ int new_capacity);
+
+ // Returns false if requested field_index is out of bounds.
+ V8_INLINE bool GetIndexes(int field_index, int* layout_word_index,
+ int* layout_bit_index);
+
+ V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor SetRawData(int field_index);
+
+ V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor SetTagged(int field_index,
+ bool tagged);
+
+ OBJECT_CONSTRUCTORS(LayoutDescriptor, ByteArray);
+};
+
+// LayoutDescriptorHelper is a helper class for querying layout descriptor
+// about whether the field at given offset is tagged or not.
+class LayoutDescriptorHelper {
+ public:
+ inline explicit LayoutDescriptorHelper(Map map);
+
+ bool all_fields_tagged() { return all_fields_tagged_; }
+ inline bool IsTagged(int offset_in_bytes);
+
+ // Queries the contiguous region of fields that are either tagged or not.
+ // Returns true if fields starting at |offset_in_bytes| are tagged or false
+ // otherwise and writes the offset of the end of the contiguous region to
+ // |out_end_of_contiguous_region_offset|. The |end_offset| value is the
+ // upper bound for |out_end_of_contiguous_region_offset|.
+ V8_EXPORT_PRIVATE bool IsTagged(int offset_in_bytes, int end_offset,
+ int* out_end_of_contiguous_region_offset);
+
+ private:
+ bool all_fields_tagged_;
+ int header_size_;
+ LayoutDescriptor layout_descriptor_;
+};
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_LAYOUT_DESCRIPTOR_H_
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index fafbb17f88..1ddb333cff 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/literal-objects.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -70,7 +70,7 @@ void ArrayBoilerplateDescription::set_elements_kind(ElementsKind kind) {
}
bool ArrayBoilerplateDescription::is_empty() const {
- return constant_elements()->length() == 0;
+ return constant_elements().length() == 0;
}
} // namespace internal
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index d699ac7345..bfdbd9317b 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -4,13 +4,13 @@
#include "src/objects/literal-objects.h"
-#include "src/accessors.h"
#include "src/ast/ast.h"
+#include "src/builtins/accessors.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/objects/struct-inl.h"
@@ -113,7 +113,7 @@ void AddToDescriptorArrayTemplate(
value_kind == ClassBoilerplate::kSetter);
Object raw_accessor = descriptor_array_template->GetStrongValue(entry);
AccessorPair pair;
- if (raw_accessor->IsAccessorPair()) {
+ if (raw_accessor.IsAccessorPair()) {
pair = AccessorPair::cast(raw_accessor);
} else {
Handle<AccessorPair> new_pair = isolate->factory()->NewAccessorPair();
@@ -122,9 +122,9 @@ void AddToDescriptorArrayTemplate(
descriptor_array_template->Set(entry, &d);
pair = *new_pair;
}
- pair->set(value_kind == ClassBoilerplate::kGetter ? ACCESSOR_GETTER
- : ACCESSOR_SETTER,
- *value);
+ pair.set(value_kind == ClassBoilerplate::kGetter ? ACCESSOR_GETTER
+ : ACCESSOR_SETTER,
+ *value);
}
}
}
@@ -165,7 +165,7 @@ constexpr int ComputeEnumerationIndex(int value_index) {
}
inline int GetExistingValueIndex(Object value) {
- return value->IsSmi() ? Smi::ToInt(value) : -1;
+ return value.IsSmi() ? Smi::ToInt(value) : -1;
}
template <typename Dictionary, typename Key>
@@ -215,13 +215,13 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
Object existing_value = dictionary->ValueAt(entry);
if (value_kind == ClassBoilerplate::kData) {
// Computed value is a normal method.
- if (existing_value->IsAccessorPair()) {
+ if (existing_value.IsAccessorPair()) {
AccessorPair current_pair = AccessorPair::cast(existing_value);
int existing_getter_index =
- GetExistingValueIndex(current_pair->getter());
+ GetExistingValueIndex(current_pair.getter());
int existing_setter_index =
- GetExistingValueIndex(current_pair->setter());
+ GetExistingValueIndex(current_pair.setter());
// At least one of the accessors must already be defined.
DCHECK(existing_getter_index >= 0 || existing_setter_index >= 0);
if (existing_getter_index < key_index &&
@@ -243,7 +243,7 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
// and then it was overwritten by the current computed method which
// in turn was later overwritten by the setter method. So we clear
// the getter.
- current_pair->set_getter(*isolate->factory()->null_value());
+ current_pair.set_getter(*isolate->factory()->null_value());
} else if (existing_setter_index < key_index) {
DCHECK_LT(key_index, existing_getter_index);
@@ -251,19 +251,18 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
// and then it was overwritten by the current computed method which
// in turn was later overwritten by the getter method. So we clear
// the setter.
- current_pair->set_setter(*isolate->factory()->null_value());
+ current_pair.set_setter(*isolate->factory()->null_value());
}
}
} else {
// Overwrite existing value if it was defined before the computed one
// (AccessorInfo "length" property is always defined before).
- DCHECK_IMPLIES(!existing_value->IsSmi(),
- existing_value->IsAccessorInfo());
- DCHECK_IMPLIES(!existing_value->IsSmi(),
- AccessorInfo::cast(existing_value)->name() ==
+ DCHECK_IMPLIES(!existing_value.IsSmi(),
+ existing_value.IsAccessorInfo());
+ DCHECK_IMPLIES(!existing_value.IsSmi(),
+ AccessorInfo::cast(existing_value).name() ==
*isolate->factory()->length_string());
- if (!existing_value->IsSmi() ||
- Smi::ToInt(existing_value) < key_index) {
+ if (!existing_value.IsSmi() || Smi::ToInt(existing_value) < key_index) {
PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
enum_order);
dictionary->DetailsAtPut(isolate, entry, details);
@@ -274,14 +273,14 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
AccessorComponent component = value_kind == ClassBoilerplate::kGetter
? ACCESSOR_GETTER
: ACCESSOR_SETTER;
- if (existing_value->IsAccessorPair()) {
+ if (existing_value.IsAccessorPair()) {
// Update respective component of existing AccessorPair.
AccessorPair current_pair = AccessorPair::cast(existing_value);
int existing_component_index =
- GetExistingValueIndex(current_pair->get(component));
+ GetExistingValueIndex(current_pair.get(component));
if (existing_component_index < key_index) {
- current_pair->set(component, value);
+ current_pair.set(component, value);
}
} else {
@@ -380,7 +379,7 @@ class ObjectDescriptor {
AddToDictionaryTemplate(isolate, properties_dictionary_template_, name,
value_index, value_kind, value);
} else {
- *temp_handle_.location() = value->ptr();
+ *temp_handle_.location() = value.ptr();
AddToDescriptorArrayTemplate(isolate, descriptor_array_template_, name,
value_kind, temp_handle_);
}
@@ -526,6 +525,11 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
for (int i = 0; i < expr->properties()->length(); i++) {
ClassLiteral::Property* property = expr->properties()->at(i);
+ // Private members are not processed using the class boilerplate.
+ if (property->is_private()) {
+ continue;
+ }
+
ClassBoilerplate::ValueKind value_kind;
switch (property->kind()) {
case ClassLiteral::Property::METHOD:
diff --git a/deps/v8/src/objects/lookup-cache-inl.h b/deps/v8/src/objects/lookup-cache-inl.h
new file mode 100644
index 0000000000..0894d6c4c7
--- /dev/null
+++ b/deps/v8/src/objects/lookup-cache-inl.h
@@ -0,0 +1,43 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_LOOKUP_CACHE_INL_H_
+#define V8_OBJECTS_LOOKUP_CACHE_INL_H_
+
+#include "src/objects/lookup-cache.h"
+
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+int DescriptorLookupCache::Hash(Map source, Name name) {
+ DCHECK(name.IsUniqueName());
+ // Uses only lower 32 bits if pointers are larger.
+ uint32_t source_hash = static_cast<uint32_t>(source.ptr()) >> kTaggedSizeLog2;
+ uint32_t name_hash = name.hash_field();
+ return (source_hash ^ name_hash) % kLength;
+}
+
+int DescriptorLookupCache::Lookup(Map source, Name name) {
+ int index = Hash(source, name);
+ Key& key = keys_[index];
+ if ((key.source == source) && (key.name == name)) return results_[index];
+ return kAbsent;
+}
+
+void DescriptorLookupCache::Update(Map source, Name name, int result) {
+ DCHECK_NE(result, kAbsent);
+ int index = Hash(source, name);
+ Key& key = keys_[index];
+ key.source = source;
+ key.name = name;
+ results_[index] = result;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_LOOKUP_CACHE_INL_H_
diff --git a/deps/v8/src/objects/lookup-cache.cc b/deps/v8/src/objects/lookup-cache.cc
new file mode 100644
index 0000000000..3f251912d6
--- /dev/null
+++ b/deps/v8/src/objects/lookup-cache.cc
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/lookup-cache.h"
+
+namespace v8 {
+namespace internal {
+
+void DescriptorLookupCache::Clear() {
+ for (int index = 0; index < kLength; index++) keys_[index].source = Map();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/lookup-cache.h b/deps/v8/src/objects/lookup-cache.h
new file mode 100644
index 0000000000..a2016d23df
--- /dev/null
+++ b/deps/v8/src/objects/lookup-cache.h
@@ -0,0 +1,60 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_LOOKUP_CACHE_H_
+#define V8_OBJECTS_LOOKUP_CACHE_H_
+
+#include "src/objects/map.h"
+#include "src/objects/name.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Cache for mapping (map, property name) into descriptor index.
+// The cache contains both positive and negative results.
+// Descriptor index equals kNotFound means the property is absent.
+// Cleared at startup and prior to any gc.
+class DescriptorLookupCache {
+ public:
+ // Lookup descriptor index for (map, name).
+ // If absent, kAbsent is returned.
+ inline int Lookup(Map source, Name name);
+
+ // Update an element in the cache.
+ inline void Update(Map source, Name name, int result);
+
+ // Clear the cache.
+ void Clear();
+
+ static const int kAbsent = -2;
+
+ private:
+ DescriptorLookupCache() {
+ for (int i = 0; i < kLength; ++i) {
+ keys_[i].source = Map();
+ keys_[i].name = Name();
+ results_[i] = kAbsent;
+ }
+ }
+
+ static inline int Hash(Map source, Name name);
+
+ static const int kLength = 64;
+ struct Key {
+ Map source;
+ Name name;
+ };
+
+ Key keys_[kLength];
+ int results_[kLength];
+
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_LOOKUP_CACHE_H_
diff --git a/deps/v8/src/objects/lookup-inl.h b/deps/v8/src/objects/lookup-inl.h
new file mode 100644
index 0000000000..5b2dbff258
--- /dev/null
+++ b/deps/v8/src/objects/lookup-inl.h
@@ -0,0 +1,194 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_LOOKUP_INL_H_
+#define V8_OBJECTS_LOOKUP_INL_H_
+
+#include "src/objects/lookup.h"
+
+#include "src/handles/handles-inl.h"
+#include "src/heap/factory-inl.h"
+#include "src/objects/api-callbacks.h"
+#include "src/objects/map-inl.h"
+#include "src/objects/name-inl.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ Handle<Name> name, Configuration configuration)
+ : LookupIterator(isolate, receiver, name, GetRoot(isolate, receiver),
+ configuration) {}
+
+LookupIterator::LookupIterator(Handle<Object> receiver, Handle<Name> name,
+ Handle<JSReceiver> holder,
+ Configuration configuration)
+ : LookupIterator(holder->GetIsolate(), receiver, name, holder,
+ configuration) {}
+
+LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ Handle<Name> name, Handle<JSReceiver> holder,
+ Configuration configuration)
+ : configuration_(ComputeConfiguration(configuration, name)),
+ interceptor_state_(InterceptorState::kUninitialized),
+ property_details_(PropertyDetails::Empty()),
+ isolate_(isolate),
+ name_(isolate_->factory()->InternalizeName(name)),
+ receiver_(receiver),
+ initial_holder_(holder),
+ // kMaxUInt32 isn't a valid index.
+ index_(kMaxUInt32),
+ number_(static_cast<uint32_t>(DescriptorArray::kNotFound)) {
+#ifdef DEBUG
+ uint32_t index; // Assert that the name is not an array index.
+ DCHECK(!name->AsArrayIndex(&index));
+#endif // DEBUG
+ Start<false>();
+}
+
+LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ uint32_t index, Configuration configuration)
+ : LookupIterator(isolate, receiver, index,
+ GetRoot(isolate, receiver, index), configuration) {}
+
+LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ Handle<JSReceiver> holder,
+ Configuration configuration) {
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ LookupIterator it =
+ LookupIterator(isolate, receiver, index, holder, configuration);
+ it.name_ = name;
+ return it;
+ }
+ return LookupIterator(isolate, receiver, name, holder, configuration);
+}
+
+LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ Configuration configuration) {
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ LookupIterator it = LookupIterator(isolate, receiver, index, configuration);
+ it.name_ = name;
+ return it;
+ }
+ return LookupIterator(isolate, receiver, name, configuration);
+}
+
+Handle<Name> LookupIterator::GetName() {
+ if (name_.is_null()) {
+ DCHECK(IsElement());
+ name_ = factory()->Uint32ToString(index_);
+ }
+ return name_;
+}
+
+bool LookupIterator::is_dictionary_holder() const {
+ return !holder_->HasFastProperties();
+}
+
+Handle<Map> LookupIterator::transition_map() const {
+ DCHECK_EQ(TRANSITION, state_);
+ return Handle<Map>::cast(transition_);
+}
+
+Handle<PropertyCell> LookupIterator::transition_cell() const {
+ DCHECK_EQ(TRANSITION, state_);
+ return Handle<PropertyCell>::cast(transition_);
+}
+
+template <class T>
+Handle<T> LookupIterator::GetHolder() const {
+ DCHECK(IsFound());
+ return Handle<T>::cast(holder_);
+}
+
+bool LookupIterator::ExtendingNonExtensible(Handle<JSReceiver> receiver) {
+ DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
+ return !receiver->map().is_extensible() &&
+ (IsElement() || !name_->IsPrivate());
+}
+
+bool LookupIterator::IsCacheableTransition() {
+ DCHECK_EQ(TRANSITION, state_);
+ return transition_->IsPropertyCell() ||
+ (transition_map()->is_dictionary_map() &&
+ !GetStoreTarget<JSReceiver>()->HasFastProperties()) ||
+ transition_map()->GetBackPointer().IsMap();
+}
+
+void LookupIterator::UpdateProtector() {
+ if (IsElement()) return;
+ // This list must be kept in sync with
+ // CodeStubAssembler::CheckForAssociatedProtector!
+ ReadOnlyRoots roots(heap());
+ if (*name_ == roots.is_concat_spreadable_symbol() ||
+ *name_ == roots.constructor_string() || *name_ == roots.next_string() ||
+ *name_ == roots.species_symbol() || *name_ == roots.iterator_symbol() ||
+ *name_ == roots.resolve_string() || *name_ == roots.then_string()) {
+ InternalUpdateProtector();
+ }
+}
+
+int LookupIterator::descriptor_number() const {
+ DCHECK(!IsElement());
+ DCHECK(has_property_);
+ DCHECK(holder_->HasFastProperties());
+ return number_;
+}
+
+int LookupIterator::dictionary_entry() const {
+ DCHECK(!IsElement());
+ DCHECK(has_property_);
+ DCHECK(!holder_->HasFastProperties());
+ return number_;
+}
+
+LookupIterator::Configuration LookupIterator::ComputeConfiguration(
+ Configuration configuration, Handle<Name> name) {
+ return name->IsPrivate() ? OWN_SKIP_INTERCEPTOR : configuration;
+}
+
+Handle<JSReceiver> LookupIterator::GetRoot(Isolate* isolate,
+ Handle<Object> receiver,
+ uint32_t index) {
+ if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+ return GetRootForNonJSReceiver(isolate, receiver, index);
+}
+
+template <class T>
+Handle<T> LookupIterator::GetStoreTarget() const {
+ DCHECK(receiver_->IsJSReceiver());
+ if (receiver_->IsJSGlobalProxy()) {
+ Map map = JSGlobalProxy::cast(*receiver_).map();
+ if (map.has_hidden_prototype()) {
+ return handle(JSGlobalObject::cast(map.prototype()), isolate_);
+ }
+ }
+ return Handle<T>::cast(receiver_);
+}
+
+template <bool is_element>
+InterceptorInfo LookupIterator::GetInterceptor(JSObject holder) {
+ return is_element ? holder.GetIndexedInterceptor()
+ : holder.GetNamedInterceptor();
+}
+
+inline Handle<InterceptorInfo> LookupIterator::GetInterceptor() const {
+ DCHECK_EQ(INTERCEPTOR, state_);
+ InterceptorInfo result =
+ IsElement() ? GetInterceptor<true>(JSObject::cast(*holder_))
+ : GetInterceptor<false>(JSObject::cast(*holder_));
+ return handle(result, isolate_);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_LOOKUP_INL_H_
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
new file mode 100644
index 0000000000..744cf67482
--- /dev/null
+++ b/deps/v8/src/objects/lookup.cc
@@ -0,0 +1,1215 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/lookup.h"
+
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/isolate-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/objects/elements.h"
+#include "src/objects/field-type.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/struct-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+LookupIterator LookupIterator::PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
+ bool* success, Handle<JSReceiver> holder, Configuration configuration) {
+ uint32_t index = 0;
+ if (key->ToArrayIndex(&index)) {
+ *success = true;
+ return LookupIterator(isolate, receiver, index, holder, configuration);
+ }
+
+ Handle<Name> name;
+ *success = Object::ToName(isolate, key).ToHandle(&name);
+ if (!*success) {
+ DCHECK(isolate->has_pending_exception());
+ // Return an unusable dummy.
+ return LookupIterator(isolate, receiver,
+ isolate->factory()->empty_string());
+ }
+
+ if (name->AsArrayIndex(&index)) {
+ LookupIterator it(isolate, receiver, index, holder, configuration);
+ // Here we try to avoid having to rebuild the string later
+ // by storing it on the indexed LookupIterator.
+ it.name_ = name;
+ return it;
+ }
+
+ return LookupIterator(receiver, name, holder, configuration);
+}
+
+// static
+LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> key,
+ bool* success,
+ Configuration configuration) {
+ // TODO(mslekova): come up with better way to avoid duplication
+ uint32_t index = 0;
+ if (key->ToArrayIndex(&index)) {
+ *success = true;
+ return LookupIterator(isolate, receiver, index, configuration);
+ }
+
+ Handle<Name> name;
+ *success = Object::ToName(isolate, key).ToHandle(&name);
+ if (!*success) {
+ DCHECK(isolate->has_pending_exception());
+ // Return an unusable dummy.
+ return LookupIterator(isolate, receiver,
+ isolate->factory()->empty_string());
+ }
+
+ if (name->AsArrayIndex(&index)) {
+ LookupIterator it(isolate, receiver, index, configuration);
+ // Here we try to avoid having to rebuild the string later
+ // by storing it on the indexed LookupIterator.
+ it.name_ = name;
+ return it;
+ }
+
+ return LookupIterator(isolate, receiver, name, configuration);
+}
+
+// TODO(ishell): Consider removing this way of LookupIterator creation.
+// static
+LookupIterator LookupIterator::ForTransitionHandler(
+ Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Handle<Object> value, MaybeHandle<Map> maybe_transition_map) {
+ Handle<Map> transition_map;
+ if (!maybe_transition_map.ToHandle(&transition_map) ||
+ !transition_map->IsPrototypeValidityCellValid()) {
+ // This map is not a valid transition handler, so full lookup is required.
+ return LookupIterator(isolate, receiver, name);
+ }
+
+ PropertyDetails details = PropertyDetails::Empty();
+ bool has_property;
+ if (transition_map->is_dictionary_map()) {
+ details = PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
+ has_property = false;
+ } else {
+ details = transition_map->GetLastDescriptorDetails();
+ has_property = true;
+ }
+#ifdef DEBUG
+ if (name->IsPrivate()) {
+ DCHECK_EQ(DONT_ENUM, details.attributes());
+ } else {
+ DCHECK_EQ(NONE, details.attributes());
+ }
+#endif
+ LookupIterator it(isolate, receiver, name, transition_map, details,
+ has_property);
+
+ if (!transition_map->is_dictionary_map()) {
+ int descriptor_number = transition_map->LastAdded();
+ Handle<Map> new_map =
+ Map::PrepareForDataProperty(isolate, transition_map, descriptor_number,
+ PropertyConstness::kConst, value);
+ // Reload information; this is no-op if nothing changed.
+ it.property_details_ =
+ new_map->instance_descriptors().GetDetails(descriptor_number);
+ it.transition_ = new_map;
+ }
+ return it;
+}
+
+LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ Handle<Name> name, Handle<Map> transition_map,
+ PropertyDetails details, bool has_property)
+ : configuration_(DEFAULT),
+ state_(TRANSITION),
+ has_property_(has_property),
+ interceptor_state_(InterceptorState::kUninitialized),
+ property_details_(details),
+ isolate_(isolate),
+ name_(name),
+ transition_(transition_map),
+ receiver_(receiver),
+ initial_holder_(GetRoot(isolate, receiver)),
+ index_(kMaxUInt32),
+ number_(static_cast<uint32_t>(DescriptorArray::kNotFound)) {
+ holder_ = initial_holder_;
+}
+
+template <bool is_element>
+void LookupIterator::Start() {
+ DisallowHeapAllocation no_gc;
+
+ has_property_ = false;
+ state_ = NOT_FOUND;
+ holder_ = initial_holder_;
+
+ JSReceiver holder = *holder_;
+ Map map = holder.map();
+
+ state_ = LookupInHolder<is_element>(map, holder);
+ if (IsFound()) return;
+
+ NextInternal<is_element>(map, holder);
+}
+
+template void LookupIterator::Start<true>();
+template void LookupIterator::Start<false>();
+
+void LookupIterator::Next() {
+ DCHECK_NE(JSPROXY, state_);
+ DCHECK_NE(TRANSITION, state_);
+ DisallowHeapAllocation no_gc;
+ has_property_ = false;
+
+ JSReceiver holder = *holder_;
+ Map map = holder.map();
+
+ if (map.IsSpecialReceiverMap()) {
+ state_ = IsElement() ? LookupInSpecialHolder<true>(map, holder)
+ : LookupInSpecialHolder<false>(map, holder);
+ if (IsFound()) return;
+ }
+
+ IsElement() ? NextInternal<true>(map, holder)
+ : NextInternal<false>(map, holder);
+}
+
+template <bool is_element>
+void LookupIterator::NextInternal(Map map, JSReceiver holder) {
+ do {
+ JSReceiver maybe_holder = NextHolder(map);
+ if (maybe_holder.is_null()) {
+ if (interceptor_state_ == InterceptorState::kSkipNonMasking) {
+ RestartLookupForNonMaskingInterceptors<is_element>();
+ return;
+ }
+ state_ = NOT_FOUND;
+ if (holder != *holder_) holder_ = handle(holder, isolate_);
+ return;
+ }
+ holder = maybe_holder;
+ map = holder.map();
+ state_ = LookupInHolder<is_element>(map, holder);
+ } while (!IsFound());
+
+ holder_ = handle(holder, isolate_);
+}
+
+template <bool is_element>
+void LookupIterator::RestartInternal(InterceptorState interceptor_state) {
+ interceptor_state_ = interceptor_state;
+ property_details_ = PropertyDetails::Empty();
+ number_ = static_cast<uint32_t>(DescriptorArray::kNotFound);
+ Start<is_element>();
+}
+
+template void LookupIterator::RestartInternal<true>(InterceptorState);
+template void LookupIterator::RestartInternal<false>(InterceptorState);
+
+// static
+Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
+ Isolate* isolate, Handle<Object> receiver, uint32_t index) {
+ // Strings are the only objects with properties (only elements) directly on
+ // the wrapper. Hence we can skip generating the wrapper for all other cases.
+ if (receiver->IsString() &&
+ index < static_cast<uint32_t>(String::cast(*receiver).length())) {
+ // TODO(verwaest): Speed this up. Perhaps use a cached wrapper on the native
+ // context, ensuring that we don't leak it into JS?
+ Handle<JSFunction> constructor = isolate->string_function();
+ Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
+ Handle<JSValue>::cast(result)->set_value(*receiver);
+ return result;
+ }
+ auto root =
+ handle(receiver->GetPrototypeChainRootMap(isolate).prototype(), isolate);
+ if (root->IsNull(isolate)) {
+ isolate->PushStackTraceAndDie(reinterpret_cast<void*>(receiver->ptr()));
+ }
+ return Handle<JSReceiver>::cast(root);
+}
+
+Handle<Map> LookupIterator::GetReceiverMap() const {
+ if (receiver_->IsNumber()) return factory()->heap_number_map();
+ return handle(Handle<HeapObject>::cast(receiver_)->map(), isolate_);
+}
+
+bool LookupIterator::HasAccess() const {
+ DCHECK_EQ(ACCESS_CHECK, state_);
+ return isolate_->MayAccess(handle(isolate_->context(), isolate_),
+ GetHolder<JSObject>());
+}
+
+template <bool is_element>
+void LookupIterator::ReloadPropertyInformation() {
+ state_ = BEFORE_PROPERTY;
+ interceptor_state_ = InterceptorState::kUninitialized;
+ state_ = LookupInHolder<is_element>(holder_->map(), *holder_);
+ DCHECK(IsFound() || !holder_->HasFastProperties());
+}
+
+namespace {
+
+bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver holder) {
+ static uint32_t context_slots[] = {
+#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype) \
+ Context::TYPE##_ARRAY_FUN_INDEX,
+
+ TYPED_ARRAYS(TYPED_ARRAY_CONTEXT_SLOTS)
+#undef TYPED_ARRAY_CONTEXT_SLOTS
+ };
+
+ if (!holder.IsJSFunction()) return false;
+
+ return std::any_of(
+ std::begin(context_slots), std::end(context_slots),
+ [=](uint32_t slot) { return isolate->IsInAnyContext(holder, slot); });
+}
+
+} // namespace
+
+void LookupIterator::InternalUpdateProtector() {
+ if (isolate_->bootstrapper()->IsActive()) return;
+
+ ReadOnlyRoots roots(heap());
+ if (*name_ == roots.constructor_string()) {
+ if (!isolate_->IsArraySpeciesLookupChainIntact() &&
+ !isolate_->IsPromiseSpeciesLookupChainIntact() &&
+ !isolate_->IsRegExpSpeciesLookupChainIntact() &&
+ !isolate_->IsTypedArraySpeciesLookupChainIntact()) {
+ return;
+ }
+ // Setting the constructor property could change an instance's @@species
+ if (holder_->IsJSArray()) {
+ if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+ isolate_->CountUsage(
+ v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
+ isolate_->InvalidateArraySpeciesProtector();
+ return;
+ } else if (holder_->IsJSPromise()) {
+ if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
+ isolate_->InvalidatePromiseSpeciesProtector();
+ return;
+ } else if (holder_->IsJSRegExp()) {
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
+ isolate_->InvalidateRegExpSpeciesProtector();
+ return;
+ } else if (holder_->IsJSTypedArray()) {
+ if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
+ isolate_->InvalidateTypedArraySpeciesProtector();
+ return;
+ }
+ if (holder_->map().is_prototype_map()) {
+ DisallowHeapAllocation no_gc;
+ // Setting the constructor of any prototype with the @@species protector
+ // (of any realm) also needs to invalidate the protector.
+ // For typed arrays, we check a prototype of this holder since TypedArrays
+ // have different prototypes for each type, and their parent prototype is
+ // pointing the same TYPED_ARRAY_PROTOTYPE.
+ if (isolate_->IsInAnyContext(*holder_,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+ isolate_->CountUsage(
+ v8::Isolate::UseCounterFeature::kArrayPrototypeConstructorModified);
+ isolate_->InvalidateArraySpeciesProtector();
+ } else if (isolate_->IsInAnyContext(*holder_,
+ Context::PROMISE_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
+ isolate_->InvalidatePromiseSpeciesProtector();
+ } else if (isolate_->IsInAnyContext(*holder_,
+ Context::REGEXP_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
+ isolate_->InvalidateRegExpSpeciesProtector();
+ } else if (isolate_->IsInAnyContext(
+ holder_->map().prototype(),
+ Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
+ isolate_->InvalidateTypedArraySpeciesProtector();
+ }
+ }
+ } else if (*name_ == roots.next_string()) {
+ if (isolate_->IsInAnyContext(
+ *holder_, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)) {
+ // Setting the next property of %ArrayIteratorPrototype% also needs to
+ // invalidate the array iterator protector.
+ if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
+ isolate_->InvalidateArrayIteratorProtector();
+ } else if (isolate_->IsInAnyContext(
+ *holder_, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsMapIteratorLookupChainIntact()) return;
+ isolate_->InvalidateMapIteratorProtector();
+ } else if (isolate_->IsInAnyContext(
+ *holder_, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsSetIteratorLookupChainIntact()) return;
+ isolate_->InvalidateSetIteratorProtector();
+ } else if (isolate_->IsInAnyContext(
+ *receiver_,
+ Context::INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX)) {
+ // Setting the next property of %StringIteratorPrototype% invalidates the
+ // string iterator protector.
+ if (!isolate_->IsStringIteratorLookupChainIntact()) return;
+ isolate_->InvalidateStringIteratorProtector();
+ }
+ } else if (*name_ == roots.species_symbol()) {
+ if (!isolate_->IsArraySpeciesLookupChainIntact() &&
+ !isolate_->IsPromiseSpeciesLookupChainIntact() &&
+ !isolate_->IsRegExpSpeciesLookupChainIntact() &&
+ !isolate_->IsTypedArraySpeciesLookupChainIntact()) {
+ return;
+ }
+ // Setting the Symbol.species property of any Array, Promise or TypedArray
+ // constructor invalidates the @@species protector
+ if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX)) {
+ if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+ isolate_->CountUsage(
+ v8::Isolate::UseCounterFeature::kArraySpeciesModified);
+ isolate_->InvalidateArraySpeciesProtector();
+ } else if (isolate_->IsInAnyContext(*holder_,
+ Context::PROMISE_FUNCTION_INDEX)) {
+ if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
+ isolate_->InvalidatePromiseSpeciesProtector();
+ } else if (isolate_->IsInAnyContext(*holder_,
+ Context::REGEXP_FUNCTION_INDEX)) {
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
+ isolate_->InvalidateRegExpSpeciesProtector();
+ } else if (IsTypedArrayFunctionInAnyContext(isolate_, *holder_)) {
+ if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
+ isolate_->InvalidateTypedArraySpeciesProtector();
+ }
+ } else if (*name_ == roots.is_concat_spreadable_symbol()) {
+ if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
+ isolate_->InvalidateIsConcatSpreadableProtector();
+ } else if (*name_ == roots.iterator_symbol()) {
+ if (holder_->IsJSArray()) {
+ if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
+ isolate_->InvalidateArrayIteratorProtector();
+ } else if (isolate_->IsInAnyContext(
+ *holder_, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX)) {
+ if (isolate_->IsMapIteratorLookupChainIntact()) {
+ isolate_->InvalidateMapIteratorProtector();
+ }
+ if (isolate_->IsSetIteratorLookupChainIntact()) {
+ isolate_->InvalidateSetIteratorProtector();
+ }
+ } else if (isolate_->IsInAnyContext(*holder_,
+ Context::INITIAL_SET_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsSetIteratorLookupChainIntact()) return;
+ isolate_->InvalidateSetIteratorProtector();
+ } else if (isolate_->IsInAnyContext(
+ *receiver_, Context::INITIAL_STRING_PROTOTYPE_INDEX)) {
+ // Setting the Symbol.iterator property of String.prototype invalidates
+ // the string iterator protector. Symbol.iterator can also be set on a
+ // String wrapper, but not on a primitive string. We only support
+ // protector for primitive strings.
+ if (!isolate_->IsStringIteratorLookupChainIntact()) return;
+ isolate_->InvalidateStringIteratorProtector();
+ }
+ } else if (*name_ == roots.resolve_string()) {
+ if (!isolate_->IsPromiseResolveLookupChainIntact()) return;
+ // Setting the "resolve" property on any %Promise% intrinsic object
+ // invalidates the Promise.resolve protector.
+ if (isolate_->IsInAnyContext(*holder_, Context::PROMISE_FUNCTION_INDEX)) {
+ isolate_->InvalidatePromiseResolveProtector();
+ }
+ } else if (*name_ == roots.then_string()) {
+ if (!isolate_->IsPromiseThenLookupChainIntact()) return;
+ // Setting the "then" property on any JSPromise instance or on the
+ // initial %PromisePrototype% invalidates the Promise#then protector.
+ // Also setting the "then" property on the initial %ObjectPrototype%
+ // invalidates the Promise#then protector, since we use this protector
+ // to guard the fast-path in AsyncGeneratorResolve, where we can skip
+ // the ResolvePromise step and go directly to FulfillPromise if we
+ // know that the Object.prototype doesn't contain a "then" method.
+ if (holder_->IsJSPromise() ||
+ isolate_->IsInAnyContext(*holder_,
+ Context::INITIAL_OBJECT_PROTOTYPE_INDEX) ||
+ isolate_->IsInAnyContext(*holder_, Context::PROMISE_PROTOTYPE_INDEX)) {
+ isolate_->InvalidatePromiseThenProtector();
+ }
+ }
+}
+
+void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
+ DCHECK(state_ == DATA || state_ == ACCESSOR);
+ DCHECK(HolderIsReceiverOrHiddenPrototype());
+
+ Handle<JSReceiver> holder = GetHolder<JSReceiver>();
+ // JSProxy does not have fast properties so we do an early return.
+ DCHECK_IMPLIES(holder->IsJSProxy(), !holder->HasFastProperties());
+ DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
+ if (holder->IsJSProxy()) return;
+
+ Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
+
+ if (IsElement()) {
+ ElementsKind kind = holder_obj->GetElementsKind();
+ ElementsKind to = value->OptimalElementsKind();
+ if (IsHoleyElementsKind(kind)) to = GetHoleyElementsKind(to);
+ to = GetMoreGeneralElementsKind(kind, to);
+
+ if (kind != to) {
+ JSObject::TransitionElementsKind(holder_obj, to);
+ }
+
+ // Copy the backing store if it is copy-on-write.
+ if (IsSmiOrObjectElementsKind(to) || IsSealedElementsKind(to)) {
+ JSObject::EnsureWritableFastElements(holder_obj);
+ }
+ return;
+ }
+
+ if (holder_obj->IsJSGlobalObject()) {
+ Handle<GlobalDictionary> dictionary(
+ JSGlobalObject::cast(*holder_obj).global_dictionary(), isolate());
+ Handle<PropertyCell> cell(dictionary->CellAt(dictionary_entry()),
+ isolate());
+ property_details_ = cell->property_details();
+ PropertyCell::PrepareForValue(isolate(), dictionary, dictionary_entry(),
+ value, property_details_);
+ return;
+ }
+ if (!holder_obj->HasFastProperties()) return;
+
+ PropertyConstness new_constness = PropertyConstness::kConst;
+ if (constness() == PropertyConstness::kConst) {
+ DCHECK_EQ(kData, property_details_.kind());
+ // Check that current value matches new value otherwise we should make
+ // the property mutable.
+ if (!IsConstFieldValueEqualTo(*value))
+ new_constness = PropertyConstness::kMutable;
+ }
+
+ Handle<Map> old_map(holder_obj->map(), isolate_);
+ Handle<Map> new_map = Map::PrepareForDataProperty(
+ isolate(), old_map, descriptor_number(), new_constness, value);
+
+ if (old_map.is_identical_to(new_map)) {
+ // Update the property details if the representation was None.
+ if (constness() != new_constness || representation().IsNone()) {
+ property_details_ =
+ new_map->instance_descriptors().GetDetails(descriptor_number());
+ }
+ return;
+ }
+
+ JSObject::MigrateToMap(holder_obj, new_map);
+ ReloadPropertyInformation<false>();
+}
+
+void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
+ PropertyAttributes attributes) {
+ DCHECK(state_ == DATA || state_ == ACCESSOR);
+ DCHECK(HolderIsReceiverOrHiddenPrototype());
+
+ Handle<JSReceiver> holder = GetHolder<JSReceiver>();
+
+ // Property details can never change for private properties.
+ if (holder->IsJSProxy()) {
+ DCHECK(name()->IsPrivate());
+ return;
+ }
+
+ Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
+ if (IsElement()) {
+ DCHECK(!holder_obj->HasTypedArrayElements());
+ DCHECK(attributes != NONE || !holder_obj->HasFastElements());
+ Handle<FixedArrayBase> elements(holder_obj->elements(), isolate());
+ holder_obj->GetElementsAccessor()->Reconfigure(holder_obj, elements,
+ number_, value, attributes);
+ ReloadPropertyInformation<true>();
+ } else if (holder_obj->HasFastProperties()) {
+ Handle<Map> old_map(holder_obj->map(), isolate_);
+ Handle<Map> new_map = Map::ReconfigureExistingProperty(
+ isolate_, old_map, descriptor_number(), i::kData, attributes);
+ // Force mutable to avoid changing constant value by reconfiguring
+ // kData -> kAccessor -> kData.
+ new_map =
+ Map::PrepareForDataProperty(isolate(), new_map, descriptor_number(),
+ PropertyConstness::kMutable, value);
+ JSObject::MigrateToMap(holder_obj, new_map);
+ ReloadPropertyInformation<false>();
+ }
+
+ if (!IsElement() && !holder_obj->HasFastProperties()) {
+ PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
+ if (holder_obj->map().is_prototype_map() &&
+ (property_details_.attributes() & READ_ONLY) == 0 &&
+ (attributes & READ_ONLY) != 0) {
+ // Invalidate prototype validity cell when a property is reconfigured
+ // from writable to read-only as this may invalidate transitioning store
+ // IC handlers.
+ JSObject::InvalidatePrototypeChains(holder->map());
+ }
+ if (holder_obj->IsJSGlobalObject()) {
+ Handle<GlobalDictionary> dictionary(
+ JSGlobalObject::cast(*holder_obj).global_dictionary(), isolate());
+
+ Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
+ isolate(), dictionary, dictionary_entry(), value, details);
+ cell->set_value(*value);
+ property_details_ = cell->property_details();
+ } else {
+ Handle<NameDictionary> dictionary(holder_obj->property_dictionary(),
+ isolate());
+ PropertyDetails original_details =
+ dictionary->DetailsAt(dictionary_entry());
+ int enumeration_index = original_details.dictionary_index();
+ DCHECK_GT(enumeration_index, 0);
+ details = details.set_index(enumeration_index);
+ dictionary->SetEntry(isolate(), dictionary_entry(), *name(), *value,
+ details);
+ property_details_ = details;
+ }
+ state_ = DATA;
+ }
+
+ WriteDataValue(value, true);
+
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ holder->HeapObjectVerify(isolate());
+ }
+#endif
+}
+
+// Can only be called when the receiver is a JSObject. JSProxy has to be handled
+// via a trap. Adding properties to primitive values is not observable.
+void LookupIterator::PrepareTransitionToDataProperty(
+ Handle<JSReceiver> receiver, Handle<Object> value,
+ PropertyAttributes attributes, StoreOrigin store_origin) {
+ DCHECK_IMPLIES(receiver->IsJSProxy(), name()->IsPrivate());
+ DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
+ if (state_ == TRANSITION) return;
+
+ if (!IsElement() && name()->IsPrivate()) {
+ attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
+ }
+
+ DCHECK(state_ != LookupIterator::ACCESSOR ||
+ (GetAccessors()->IsAccessorInfo() &&
+ AccessorInfo::cast(*GetAccessors()).is_special_data_property()));
+ DCHECK_NE(INTEGER_INDEXED_EXOTIC, state_);
+ DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
+
+ Handle<Map> map(receiver->map(), isolate_);
+
+ // Dictionary maps can always have additional data properties.
+ if (map->is_dictionary_map()) {
+ state_ = TRANSITION;
+ if (map->IsJSGlobalObjectMap()) {
+ // Install a property cell.
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(receiver);
+ int entry;
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name(), PropertyCellType::kUninitialized, &entry);
+ Handle<GlobalDictionary> dictionary(global->global_dictionary(),
+ isolate_);
+ DCHECK(cell->value().IsTheHole(isolate_));
+ DCHECK(!value->IsTheHole(isolate_));
+ transition_ = cell;
+ // Assign an enumeration index to the property and update
+ // SetNextEnumerationIndex.
+ int index = dictionary->NextEnumerationIndex();
+ dictionary->SetNextEnumerationIndex(index + 1);
+ property_details_ = PropertyDetails(
+ kData, attributes, PropertyCellType::kUninitialized, index);
+ PropertyCellType new_type =
+ PropertyCell::UpdatedType(isolate(), cell, value, property_details_);
+ property_details_ = property_details_.set_cell_type(new_type);
+ cell->set_property_details(property_details_);
+ number_ = entry;
+ has_property_ = true;
+ } else {
+ // Don't set enumeration index (it will be set during value store).
+ property_details_ =
+ PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
+ transition_ = map;
+ }
+ return;
+ }
+
+ Handle<Map> transition =
+ Map::TransitionToDataProperty(isolate_, map, name_, value, attributes,
+ PropertyConstness::kConst, store_origin);
+ state_ = TRANSITION;
+ transition_ = transition;
+
+ if (transition->is_dictionary_map()) {
+ // Don't set enumeration index (it will be set during value store).
+ property_details_ =
+ PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
+ } else {
+ property_details_ = transition->GetLastDescriptorDetails();
+ has_property_ = true;
+ }
+}
+
+void LookupIterator::ApplyTransitionToDataProperty(
+ Handle<JSReceiver> receiver) {
+ DCHECK_EQ(TRANSITION, state_);
+
+ DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
+ holder_ = receiver;
+ if (receiver->IsJSGlobalObject()) {
+ JSObject::InvalidatePrototypeChains(receiver->map());
+ state_ = DATA;
+ return;
+ }
+ Handle<Map> transition = transition_map();
+ bool simple_transition = transition->GetBackPointer() == receiver->map();
+
+ if (configuration_ == DEFAULT && !transition->is_dictionary_map() &&
+ !transition->IsPrototypeValidityCellValid()) {
+ // Only LookupIterator instances with DEFAULT (full prototype chain)
+ // configuration can produce valid transition handler maps.
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(transition, isolate());
+ transition->set_prototype_validity_cell(*validity_cell);
+ }
+
+ if (!receiver->IsJSProxy()) {
+ JSObject::MigrateToMap(Handle<JSObject>::cast(receiver), transition);
+ }
+
+ if (simple_transition) {
+ int number = transition->LastAdded();
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = transition->GetLastDescriptorDetails();
+ state_ = DATA;
+ } else if (receiver->map().is_dictionary_map()) {
+ Handle<NameDictionary> dictionary(receiver->property_dictionary(),
+ isolate_);
+ int entry;
+ if (receiver->map().is_prototype_map() && receiver->IsJSObject()) {
+ JSObject::InvalidatePrototypeChains(receiver->map());
+ }
+ dictionary = NameDictionary::Add(isolate(), dictionary, name(),
+ isolate_->factory()->uninitialized_value(),
+ property_details_, &entry);
+ receiver->SetProperties(*dictionary);
+ // Reload details containing proper enumeration index value.
+ property_details_ = dictionary->DetailsAt(entry);
+ number_ = entry;
+ has_property_ = true;
+ state_ = DATA;
+
+ } else {
+ ReloadPropertyInformation<false>();
+ }
+}
+
+void LookupIterator::Delete() {
+ Handle<JSReceiver> holder = Handle<JSReceiver>::cast(holder_);
+ if (IsElement()) {
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->Delete(object, number_);
+ } else {
+ DCHECK(!name()->IsPrivateName());
+ bool is_prototype_map = holder->map().is_prototype_map();
+ RuntimeCallTimerScope stats_scope(
+ isolate_, is_prototype_map
+ ? RuntimeCallCounterId::kPrototypeObject_DeleteProperty
+ : RuntimeCallCounterId::kObject_DeleteProperty);
+
+ PropertyNormalizationMode mode =
+ is_prototype_map ? KEEP_INOBJECT_PROPERTIES : CLEAR_INOBJECT_PROPERTIES;
+
+ if (holder->HasFastProperties()) {
+ JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
+ "DeletingProperty");
+ ReloadPropertyInformation<false>();
+ }
+ JSReceiver::DeleteNormalizedProperty(holder, number_);
+ if (holder->IsJSObject()) {
+ JSObject::ReoptimizeIfPrototype(Handle<JSObject>::cast(holder));
+ }
+ }
+ state_ = NOT_FOUND;
+}
+
+void LookupIterator::TransitionToAccessorProperty(
+ Handle<Object> getter, Handle<Object> setter,
+ PropertyAttributes attributes) {
+ DCHECK(!getter->IsNull(isolate_) || !setter->IsNull(isolate_));
+ // Can only be called when the receiver is a JSObject. JSProxy has to be
+ // handled via a trap. Adding properties to primitive values is not
+ // observable.
+ Handle<JSObject> receiver = GetStoreTarget<JSObject>();
+ if (!IsElement() && name()->IsPrivate()) {
+ attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
+ }
+
+ if (!IsElement() && !receiver->map().is_dictionary_map()) {
+ Handle<Map> old_map(receiver->map(), isolate_);
+
+ if (!holder_.is_identical_to(receiver)) {
+ holder_ = receiver;
+ state_ = NOT_FOUND;
+ } else if (state_ == INTERCEPTOR) {
+ LookupInRegularHolder<false>(*old_map, *holder_);
+ }
+ int descriptor =
+ IsFound() ? static_cast<int>(number_) : DescriptorArray::kNotFound;
+
+ Handle<Map> new_map = Map::TransitionToAccessorProperty(
+ isolate_, old_map, name_, descriptor, getter, setter, attributes);
+ bool simple_transition = new_map->GetBackPointer() == receiver->map();
+ JSObject::MigrateToMap(receiver, new_map);
+
+ if (simple_transition) {
+ int number = new_map->LastAdded();
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = new_map->GetLastDescriptorDetails();
+ state_ = ACCESSOR;
+ return;
+ }
+
+ ReloadPropertyInformation<false>();
+ if (!new_map->is_dictionary_map()) return;
+ }
+
+ Handle<AccessorPair> pair;
+ if (state() == ACCESSOR && GetAccessors()->IsAccessorPair()) {
+ pair = Handle<AccessorPair>::cast(GetAccessors());
+ // If the component and attributes are identical, nothing has to be done.
+ if (pair->Equals(*getter, *setter)) {
+ if (property_details().attributes() == attributes) {
+ if (!IsElement()) JSObject::ReoptimizeIfPrototype(receiver);
+ return;
+ }
+ } else {
+ pair = AccessorPair::Copy(isolate(), pair);
+ pair->SetComponents(*getter, *setter);
+ }
+ } else {
+ pair = factory()->NewAccessorPair();
+ pair->SetComponents(*getter, *setter);
+ }
+
+ TransitionToAccessorPair(pair, attributes);
+
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ receiver->JSObjectVerify(isolate());
+ }
+#endif
+}
+
+void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
+ PropertyAttributes attributes) {
+ Handle<JSObject> receiver = GetStoreTarget<JSObject>();
+ holder_ = receiver;
+
+ PropertyDetails details(kAccessor, attributes, PropertyCellType::kMutable);
+
+ if (IsElement()) {
+ // TODO(verwaest): Move code into the element accessor.
+ isolate_->CountUsage(v8::Isolate::kIndexAccessor);
+ Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(receiver);
+
+ dictionary = NumberDictionary::Set(isolate_, dictionary, index_, pair,
+ receiver, details);
+ receiver->RequireSlowElements(*dictionary);
+
+ if (receiver->HasSlowArgumentsElements()) {
+ FixedArray parameter_map = FixedArray::cast(receiver->elements());
+ uint32_t length = parameter_map.length() - 2;
+ if (number_ < length) {
+ parameter_map.set(number_ + 2, ReadOnlyRoots(heap()).the_hole_value());
+ }
+ FixedArray::cast(receiver->elements()).set(1, *dictionary);
+ } else {
+ receiver->set_elements(*dictionary);
+ }
+
+ ReloadPropertyInformation<true>();
+ } else {
+ PropertyNormalizationMode mode = CLEAR_INOBJECT_PROPERTIES;
+ if (receiver->map().is_prototype_map()) {
+ JSObject::InvalidatePrototypeChains(receiver->map());
+ mode = KEEP_INOBJECT_PROPERTIES;
+ }
+
+ // Normalize object to make this operation simple.
+ JSObject::NormalizeProperties(receiver, mode, 0,
+ "TransitionToAccessorPair");
+
+ JSObject::SetNormalizedProperty(receiver, name_, pair, details);
+ JSObject::ReoptimizeIfPrototype(receiver);
+
+ ReloadPropertyInformation<false>();
+ }
+}
+
+bool LookupIterator::HolderIsReceiver() const {
+ DCHECK(has_property_ || state_ == INTERCEPTOR || state_ == JSPROXY);
+ // Optimization that only works if configuration_ is not mutable.
+ if (!check_prototype_chain()) return true;
+ return *receiver_ == *holder_;
+}
+
+bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
+ DCHECK(has_property_ || state_ == INTERCEPTOR || state_ == JSPROXY);
+ // Optimization that only works if configuration_ is not mutable.
+ if (!check_prototype_chain()) return true;
+ DisallowHeapAllocation no_gc;
+ if (*receiver_ == *holder_) return true;
+ if (!receiver_->IsJSReceiver()) return false;
+ JSReceiver current = JSReceiver::cast(*receiver_);
+ JSReceiver object = *holder_;
+ if (!current.map().has_hidden_prototype()) return false;
+ // JSProxy do not occur as hidden prototypes.
+ if (object.IsJSProxy()) return false;
+ PrototypeIterator iter(isolate(), current, kStartAtPrototype,
+ PrototypeIterator::END_AT_NON_HIDDEN);
+ while (!iter.IsAtEnd()) {
+ if (iter.GetCurrent<JSReceiver>() == object) return true;
+ iter.Advance();
+ }
+ return false;
+}
+
+Handle<Object> LookupIterator::FetchValue() const {
+ Object result;
+ if (IsElement()) {
+ Handle<JSObject> holder = GetHolder<JSObject>();
+ ElementsAccessor* accessor = holder->GetElementsAccessor();
+ return accessor->Get(holder, number_);
+ } else if (holder_->IsJSGlobalObject()) {
+ Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
+ result = holder->global_dictionary().ValueAt(number_);
+ } else if (!holder_->HasFastProperties()) {
+ result = holder_->property_dictionary().ValueAt(number_);
+ } else if (property_details_.location() == kField) {
+ DCHECK_EQ(kData, property_details_.kind());
+ Handle<JSObject> holder = GetHolder<JSObject>();
+ FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
+ return JSObject::FastPropertyAt(holder, property_details_.representation(),
+ field_index);
+ } else {
+ result = holder_->map().instance_descriptors().GetStrongValue(number_);
+ }
+ return handle(result, isolate_);
+}
+
+bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
+ DCHECK(!IsElement());
+ DCHECK(holder_->HasFastProperties());
+ DCHECK_EQ(kField, property_details_.location());
+ DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
+ Handle<JSObject> holder = GetHolder<JSObject>();
+ FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
+ if (property_details_.representation().IsDouble()) {
+ if (!value.IsNumber()) return false;
+ uint64_t bits;
+ if (holder->IsUnboxedDoubleField(field_index)) {
+ bits = holder->RawFastDoublePropertyAsBitsAt(field_index);
+ } else {
+ Object current_value = holder->RawFastPropertyAt(field_index);
+ DCHECK(current_value.IsMutableHeapNumber());
+ bits = MutableHeapNumber::cast(current_value).value_as_bits();
+ }
+ // Use bit representation of double to to check for hole double, since
+ // manipulating the signaling NaN used for the hole in C++, e.g. with
+ // bit_cast or value(), will change its value on ia32 (the x87 stack is
+ // used to return values and stores to the stack silently clear the
+ // signalling bit).
+ if (bits == kHoleNanInt64) {
+ // Uninitialized double field.
+ return true;
+ }
+ return Object::SameNumberValue(bit_cast<double>(bits), value.Number());
+ } else {
+ Object current_value = holder->RawFastPropertyAt(field_index);
+ if (current_value.IsUninitialized(isolate()) || current_value == value) {
+ return true;
+ }
+ return current_value.IsNumber() && value.IsNumber() &&
+ Object::SameNumberValue(current_value.Number(), value.Number());
+ }
+}
+
+int LookupIterator::GetFieldDescriptorIndex() const {
+ DCHECK(has_property_);
+ DCHECK(holder_->HasFastProperties());
+ DCHECK_EQ(kField, property_details_.location());
+ DCHECK_EQ(kData, property_details_.kind());
+ return descriptor_number();
+}
+
+int LookupIterator::GetAccessorIndex() const {
+ DCHECK(has_property_);
+ DCHECK(holder_->HasFastProperties());
+ DCHECK_EQ(kDescriptor, property_details_.location());
+ DCHECK_EQ(kAccessor, property_details_.kind());
+ return descriptor_number();
+}
+
+Handle<Map> LookupIterator::GetFieldOwnerMap() const {
+ DCHECK(has_property_);
+ DCHECK(holder_->HasFastProperties());
+ DCHECK_EQ(kField, property_details_.location());
+ DCHECK(!IsElement());
+ Map holder_map = holder_->map();
+ return handle(holder_map.FindFieldOwner(isolate(), descriptor_number()),
+ isolate_);
+}
+
+FieldIndex LookupIterator::GetFieldIndex() const {
+ DCHECK(has_property_);
+ DCHECK(holder_->HasFastProperties());
+ DCHECK_EQ(kField, property_details_.location());
+ DCHECK(!IsElement());
+ return FieldIndex::ForDescriptor(holder_->map(), descriptor_number());
+}
+
+Handle<FieldType> LookupIterator::GetFieldType() const {
+ DCHECK(has_property_);
+ DCHECK(holder_->HasFastProperties());
+ DCHECK_EQ(kField, property_details_.location());
+ return handle(
+ holder_->map().instance_descriptors().GetFieldType(descriptor_number()),
+ isolate_);
+}
+
+Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
+ DCHECK(!IsElement());
+ Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
+ return handle(holder->global_dictionary().CellAt(dictionary_entry()),
+ isolate_);
+}
+
+Handle<Object> LookupIterator::GetAccessors() const {
+ DCHECK_EQ(ACCESSOR, state_);
+ return FetchValue();
+}
+
+Handle<Object> LookupIterator::GetDataValue() const {
+ DCHECK_EQ(DATA, state_);
+ Handle<Object> value = FetchValue();
+ return value;
+}
+
+void LookupIterator::WriteDataValue(Handle<Object> value,
+ bool initializing_store) {
+ DCHECK_EQ(DATA, state_);
+ Handle<JSReceiver> holder = GetHolder<JSReceiver>();
+ if (IsElement()) {
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->Set(object, number_, *value);
+ } else if (holder->HasFastProperties()) {
+ if (property_details_.location() == kField) {
+ // Check that in case of VariableMode::kConst field the existing value is
+ // equal to |value|.
+ DCHECK_IMPLIES(!initializing_store && property_details_.constness() ==
+ PropertyConstness::kConst,
+ IsConstFieldValueEqualTo(*value));
+ JSObject::cast(*holder).WriteToField(descriptor_number(),
+ property_details_, *value);
+ } else {
+ DCHECK_EQ(kDescriptor, property_details_.location());
+ DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
+ }
+ } else if (holder->IsJSGlobalObject()) {
+ GlobalDictionary dictionary =
+ JSGlobalObject::cast(*holder).global_dictionary();
+ dictionary.CellAt(dictionary_entry()).set_value(*value);
+ } else {
+ DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
+ NameDictionary dictionary = holder->property_dictionary();
+ dictionary.ValueAtPut(dictionary_entry(), *value);
+ }
+}
+
+template <bool is_element>
+bool LookupIterator::SkipInterceptor(JSObject holder) {
+ auto info = GetInterceptor<is_element>(holder);
+ if (!is_element && name_->IsSymbol() && !info.can_intercept_symbols()) {
+ return true;
+ }
+ if (info.non_masking()) {
+ switch (interceptor_state_) {
+ case InterceptorState::kUninitialized:
+ interceptor_state_ = InterceptorState::kSkipNonMasking;
+ V8_FALLTHROUGH;
+ case InterceptorState::kSkipNonMasking:
+ return true;
+ case InterceptorState::kProcessNonMasking:
+ return false;
+ }
+ }
+ return interceptor_state_ == InterceptorState::kProcessNonMasking;
+}
+
+JSReceiver LookupIterator::NextHolder(Map map) {
+ DisallowHeapAllocation no_gc;
+ if (map.prototype() == ReadOnlyRoots(heap()).null_value()) {
+ return JSReceiver();
+ }
+ if (!check_prototype_chain() && !map.has_hidden_prototype()) {
+ return JSReceiver();
+ }
+ return JSReceiver::cast(map.prototype());
+}
+
+LookupIterator::State LookupIterator::NotFound(JSReceiver const holder) const {
+ DCHECK(!IsElement());
+ if (!holder.IsJSTypedArray() || !name_->IsString()) return NOT_FOUND;
+ return IsSpecialIndex(String::cast(*name_)) ? INTEGER_INDEXED_EXOTIC
+ : NOT_FOUND;
+}
+
+namespace {
+
+template <bool is_element>
+bool HasInterceptor(Map map) {
+ return is_element ? map.has_indexed_interceptor()
+ : map.has_named_interceptor();
+}
+
+} // namespace
+
+template <bool is_element>
+LookupIterator::State LookupIterator::LookupInSpecialHolder(
+ Map const map, JSReceiver const holder) {
+ STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
+ switch (state_) {
+ case NOT_FOUND:
+ if (map.IsJSProxyMap()) {
+ if (is_element || !name_->IsPrivate()) return JSPROXY;
+ }
+ if (map.is_access_check_needed()) {
+ if (is_element || !name_->IsPrivate()) return ACCESS_CHECK;
+ }
+ V8_FALLTHROUGH;
+ case ACCESS_CHECK:
+ if (check_interceptor() && HasInterceptor<is_element>(map) &&
+ !SkipInterceptor<is_element>(JSObject::cast(holder))) {
+ if (is_element || !name_->IsPrivate()) return INTERCEPTOR;
+ }
+ V8_FALLTHROUGH;
+ case INTERCEPTOR:
+ if (!is_element && map.IsJSGlobalObjectMap()) {
+ GlobalDictionary dict =
+ JSGlobalObject::cast(holder).global_dictionary();
+ int number = dict.FindEntry(isolate(), name_);
+ if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
+ number_ = static_cast<uint32_t>(number);
+ PropertyCell cell = dict.CellAt(number_);
+ if (cell.value().IsTheHole(isolate_)) return NOT_FOUND;
+ property_details_ = cell.property_details();
+ has_property_ = true;
+ switch (property_details_.kind()) {
+ case v8::internal::kData:
+ return DATA;
+ case v8::internal::kAccessor:
+ return ACCESSOR;
+ }
+ }
+ return LookupInRegularHolder<is_element>(map, holder);
+ case ACCESSOR:
+ case DATA:
+ return NOT_FOUND;
+ case INTEGER_INDEXED_EXOTIC:
+ case JSPROXY:
+ case TRANSITION:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+}
+
+template <bool is_element>
+LookupIterator::State LookupIterator::LookupInRegularHolder(
+ Map const map, JSReceiver const holder) {
+ DisallowHeapAllocation no_gc;
+ if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
+ return NOT_FOUND;
+ }
+
+ if (is_element) {
+ JSObject js_object = JSObject::cast(holder);
+ ElementsAccessor* accessor = js_object.GetElementsAccessor();
+ FixedArrayBase backing_store = js_object.elements();
+ number_ =
+ accessor->GetEntryForIndex(isolate_, js_object, backing_store, index_);
+ if (number_ == kMaxUInt32) {
+ return holder.IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
+ }
+ property_details_ = accessor->GetDetails(js_object, number_);
+ if (map.has_frozen_or_sealed_elements()) {
+ PropertyAttributes attrs = map.has_sealed_elements() ? SEALED : FROZEN;
+ property_details_ = property_details_.CopyAddAttributes(attrs);
+ }
+ } else if (!map.is_dictionary_map()) {
+ DescriptorArray descriptors = map.instance_descriptors();
+ int number = descriptors.SearchWithCache(isolate_, *name_, map);
+ if (number == DescriptorArray::kNotFound) return NotFound(holder);
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = descriptors.GetDetails(number_);
+ } else {
+ DCHECK_IMPLIES(holder.IsJSProxy(), name()->IsPrivate());
+ NameDictionary dict = holder.property_dictionary();
+ int number = dict.FindEntry(isolate(), name_);
+ if (number == NameDictionary::kNotFound) return NotFound(holder);
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = dict.DetailsAt(number_);
+ }
+ has_property_ = true;
+ switch (property_details_.kind()) {
+ case v8::internal::kData:
+ return DATA;
+ case v8::internal::kAccessor:
+ return ACCESSOR;
+ }
+
+ UNREACHABLE();
+}
+
+Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
+ const {
+ DCHECK_EQ(ACCESS_CHECK, state_);
+ DisallowHeapAllocation no_gc;
+ AccessCheckInfo access_check_info =
+ AccessCheckInfo::Get(isolate_, Handle<JSObject>::cast(holder_));
+ if (!access_check_info.is_null()) {
+ Object interceptor = IsElement() ? access_check_info.indexed_interceptor()
+ : access_check_info.named_interceptor();
+ if (interceptor != Object()) {
+ return handle(InterceptorInfo::cast(interceptor), isolate_);
+ }
+ }
+ return Handle<InterceptorInfo>();
+}
+
+bool LookupIterator::TryLookupCachedProperty() {
+ return state() == LookupIterator::ACCESSOR &&
+ GetAccessors()->IsAccessorPair() && LookupCachedProperty();
+}
+
+bool LookupIterator::LookupCachedProperty() {
+ DCHECK_EQ(state(), LookupIterator::ACCESSOR);
+ DCHECK(GetAccessors()->IsAccessorPair());
+
+ AccessorPair accessor_pair = AccessorPair::cast(*GetAccessors());
+ Handle<Object> getter(accessor_pair.getter(), isolate());
+ MaybeHandle<Name> maybe_name =
+ FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), getter);
+ if (maybe_name.is_null()) return false;
+
+ // We have found a cached property! Modify the iterator accordingly.
+ name_ = maybe_name.ToHandleChecked();
+ Restart();
+ CHECK_EQ(state(), LookupIterator::DATA);
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/lookup.h b/deps/v8/src/objects/lookup.h
new file mode 100644
index 0000000000..820b8ef9b0
--- /dev/null
+++ b/deps/v8/src/objects/lookup.h
@@ -0,0 +1,281 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_LOOKUP_H_
+#define V8_OBJECTS_LOOKUP_H_
+
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/heap/factory.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/js-objects.h"
+#include "src/objects/map.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE LookupIterator final {
+ public:
+ enum Configuration {
+ // Configuration bits.
+ kInterceptor = 1 << 0,
+ kPrototypeChain = 1 << 1,
+
+ // Convenience combinations of bits.
+ OWN_SKIP_INTERCEPTOR = 0,
+ OWN = kInterceptor,
+ PROTOTYPE_CHAIN_SKIP_INTERCEPTOR = kPrototypeChain,
+ PROTOTYPE_CHAIN = kPrototypeChain | kInterceptor,
+ DEFAULT = PROTOTYPE_CHAIN
+ };
+
+ enum State {
+ ACCESS_CHECK,
+ INTEGER_INDEXED_EXOTIC,
+ INTERCEPTOR,
+ JSPROXY,
+ NOT_FOUND,
+ ACCESSOR,
+ DATA,
+ TRANSITION,
+ // Set state_ to BEFORE_PROPERTY to ensure that the next lookup will be a
+ // PROPERTY lookup.
+ BEFORE_PROPERTY = INTERCEPTOR
+ };
+
+ inline LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ Handle<Name> name,
+ Configuration configuration = DEFAULT);
+
+ inline LookupIterator(Handle<Object> receiver, Handle<Name> name,
+ Handle<JSReceiver> holder,
+ Configuration configuration = DEFAULT);
+
+ inline LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ Handle<Name> name, Handle<JSReceiver> holder,
+ Configuration configuration = DEFAULT);
+
+ inline LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ uint32_t index, Configuration configuration = DEFAULT);
+
+ LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
+ Handle<JSReceiver> holder,
+ Configuration configuration = DEFAULT)
+ : configuration_(configuration),
+ interceptor_state_(InterceptorState::kUninitialized),
+ property_details_(PropertyDetails::Empty()),
+ isolate_(isolate),
+ receiver_(receiver),
+ initial_holder_(holder),
+ index_(index),
+ number_(static_cast<uint32_t>(DescriptorArray::kNotFound)) {
+ // kMaxUInt32 isn't a valid index.
+ DCHECK_NE(kMaxUInt32, index_);
+ Start<true>();
+ }
+
+ static inline LookupIterator PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Configuration configuration = DEFAULT);
+
+ static inline LookupIterator PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Handle<JSReceiver> holder, Configuration configuration = DEFAULT);
+
+ static LookupIterator PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
+ bool* success, Handle<JSReceiver> holder,
+ Configuration configuration = DEFAULT);
+
+ static LookupIterator PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
+ bool* success, Configuration configuration = DEFAULT);
+
+ static LookupIterator ForTransitionHandler(
+ Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Handle<Object> value, MaybeHandle<Map> maybe_transition_map);
+
+ void Restart() {
+ InterceptorState state = InterceptorState::kUninitialized;
+ IsElement() ? RestartInternal<true>(state) : RestartInternal<false>(state);
+ }
+
+ Isolate* isolate() const { return isolate_; }
+ State state() const { return state_; }
+
+ Handle<Name> name() const {
+ DCHECK(!IsElement());
+ return name_;
+ }
+ inline Handle<Name> GetName();
+ uint32_t index() const { return index_; }
+
+ bool IsElement() const { return index_ != kMaxUInt32; }
+
+ bool IsFound() const { return state_ != NOT_FOUND; }
+ void Next();
+ void NotFound() {
+ has_property_ = false;
+ state_ = NOT_FOUND;
+ }
+
+ Heap* heap() const { return isolate_->heap(); }
+ Factory* factory() const { return isolate_->factory(); }
+ Handle<Object> GetReceiver() const { return receiver_; }
+
+ template <class T>
+ inline Handle<T> GetStoreTarget() const;
+ inline bool is_dictionary_holder() const;
+ inline Handle<Map> transition_map() const;
+ inline Handle<PropertyCell> transition_cell() const;
+ template <class T>
+ inline Handle<T> GetHolder() const;
+
+ bool HolderIsReceiver() const;
+ bool HolderIsReceiverOrHiddenPrototype() const;
+
+ bool check_prototype_chain() const {
+ return (configuration_ & kPrototypeChain) != 0;
+ }
+
+ /* ACCESS_CHECK */
+ bool HasAccess() const;
+
+ /* PROPERTY */
+ inline bool ExtendingNonExtensible(Handle<JSReceiver> receiver);
+ void PrepareForDataProperty(Handle<Object> value);
+ void PrepareTransitionToDataProperty(Handle<JSReceiver> receiver,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StoreOrigin store_origin);
+ inline bool IsCacheableTransition();
+ void ApplyTransitionToDataProperty(Handle<JSReceiver> receiver);
+ void ReconfigureDataProperty(Handle<Object> value,
+ PropertyAttributes attributes);
+ void Delete();
+ void TransitionToAccessorProperty(Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes);
+ void TransitionToAccessorPair(Handle<Object> pair,
+ PropertyAttributes attributes);
+ PropertyDetails property_details() const {
+ DCHECK(has_property_);
+ return property_details_;
+ }
+ PropertyAttributes property_attributes() const {
+ return property_details().attributes();
+ }
+ bool IsConfigurable() const { return property_details().IsConfigurable(); }
+ bool IsReadOnly() const { return property_details().IsReadOnly(); }
+ bool IsEnumerable() const { return property_details().IsEnumerable(); }
+ Representation representation() const {
+ return property_details().representation();
+ }
+ PropertyLocation location() const { return property_details().location(); }
+ PropertyConstness constness() const { return property_details().constness(); }
+ Handle<Map> GetFieldOwnerMap() const;
+ FieldIndex GetFieldIndex() const;
+ Handle<FieldType> GetFieldType() const;
+ int GetFieldDescriptorIndex() const;
+ int GetAccessorIndex() const;
+ Handle<PropertyCell> GetPropertyCell() const;
+ Handle<Object> GetAccessors() const;
+ inline Handle<InterceptorInfo> GetInterceptor() const;
+ Handle<InterceptorInfo> GetInterceptorForFailedAccessCheck() const;
+ Handle<Object> GetDataValue() const;
+ void WriteDataValue(Handle<Object> value, bool initializing_store);
+ inline void UpdateProtector();
+
+ // Lookup a 'cached' private property for an accessor.
+ // If not found returns false and leaves the LookupIterator unmodified.
+ bool TryLookupCachedProperty();
+ bool LookupCachedProperty();
+
+ private:
+ // For |ForTransitionHandler|.
+ LookupIterator(Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Handle<Map> transition_map, PropertyDetails details,
+ bool has_property);
+
+ void InternalUpdateProtector();
+
+ enum class InterceptorState {
+ kUninitialized,
+ kSkipNonMasking,
+ kProcessNonMasking
+ };
+
+ Handle<Map> GetReceiverMap() const;
+
+ V8_WARN_UNUSED_RESULT inline JSReceiver NextHolder(Map map);
+
+ template <bool is_element>
+ V8_EXPORT_PRIVATE void Start();
+ template <bool is_element>
+ void NextInternal(Map map, JSReceiver holder);
+ template <bool is_element>
+ inline State LookupInHolder(Map map, JSReceiver holder) {
+ return map.IsSpecialReceiverMap()
+ ? LookupInSpecialHolder<is_element>(map, holder)
+ : LookupInRegularHolder<is_element>(map, holder);
+ }
+ template <bool is_element>
+ State LookupInRegularHolder(Map map, JSReceiver holder);
+ template <bool is_element>
+ State LookupInSpecialHolder(Map map, JSReceiver holder);
+ template <bool is_element>
+ void RestartLookupForNonMaskingInterceptors() {
+ RestartInternal<is_element>(InterceptorState::kProcessNonMasking);
+ }
+ template <bool is_element>
+ void RestartInternal(InterceptorState interceptor_state);
+ Handle<Object> FetchValue() const;
+ bool IsConstFieldValueEqualTo(Object value) const;
+ template <bool is_element>
+ void ReloadPropertyInformation();
+
+ template <bool is_element>
+ bool SkipInterceptor(JSObject holder);
+ template <bool is_element>
+ static inline InterceptorInfo GetInterceptor(JSObject holder);
+
+ bool check_interceptor() const {
+ return (configuration_ & kInterceptor) != 0;
+ }
+ inline int descriptor_number() const;
+ inline int dictionary_entry() const;
+
+ static inline Configuration ComputeConfiguration(Configuration configuration,
+ Handle<Name> name);
+
+ static Handle<JSReceiver> GetRootForNonJSReceiver(
+ Isolate* isolate, Handle<Object> receiver, uint32_t index = kMaxUInt32);
+ static inline Handle<JSReceiver> GetRoot(Isolate* isolate,
+ Handle<Object> receiver,
+ uint32_t index = kMaxUInt32);
+
+ State NotFound(JSReceiver const holder) const;
+
+ // If configuration_ becomes mutable, update
+ // HolderIsReceiverOrHiddenPrototype.
+ const Configuration configuration_;
+ State state_;
+ bool has_property_;
+ InterceptorState interceptor_state_;
+ PropertyDetails property_details_;
+ Isolate* const isolate_;
+ Handle<Name> name_;
+ Handle<Object> transition_;
+ const Handle<Object> receiver_;
+ Handle<JSReceiver> holder_;
+ const Handle<JSReceiver> initial_holder_;
+ const uint32_t index_;
+ uint32_t number_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_LOOKUP_H_
diff --git a/deps/v8/src/objects/managed.h b/deps/v8/src/objects/managed.h
index f1d42380dc..9653efa1c2 100644
--- a/deps/v8/src/objects/managed.h
+++ b/deps/v8/src/objects/managed.h
@@ -6,10 +6,10 @@
#define V8_OBJECTS_MANAGED_H_
#include <memory>
-#include "src/global-handles.h"
-#include "src/handles.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
+#include "src/handles/handles.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
#include "src/objects/foreign.h"
namespace v8 {
@@ -59,7 +59,7 @@ class Managed : public Foreign {
// Get a reference to the shared pointer to the C++ object.
V8_INLINE const std::shared_ptr<CppType>& get() { return *GetSharedPtrPtr(); }
- static Managed cast(Object obj) { return Managed(obj->ptr()); }
+ static Managed cast(Object obj) { return Managed(obj.ptr()); }
static Managed unchecked_cast(Object obj) { return bit_cast<Managed>(obj); }
// Allocate a new {CppType} and wrap it in a {Managed<CppType>}.
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 792e12d126..8c26196fb5 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -7,19 +7,19 @@
#include "src/objects/map.h"
-#include "src/field-type.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/layout-descriptor-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/descriptor-array-inl.h"
+#include "src/objects/field-type.h"
#include "src/objects/instance-type-inl.h"
+#include "src/objects/layout-descriptor-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property.h"
#include "src/objects/prototype-info-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/templates-inl.h"
-#include "src/property.h"
-#include "src/transitions.h"
+#include "src/objects/transitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -31,17 +31,18 @@ OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
CAST_ACCESSOR(Map)
DescriptorArray Map::instance_descriptors() const {
- return DescriptorArray::cast(READ_FIELD(*this, kDescriptorsOffset));
+ return DescriptorArray::cast(READ_FIELD(*this, kInstanceDescriptorsOffset));
}
DescriptorArray Map::synchronized_instance_descriptors() const {
- return DescriptorArray::cast(ACQUIRE_READ_FIELD(*this, kDescriptorsOffset));
+ return DescriptorArray::cast(
+ ACQUIRE_READ_FIELD(*this, kInstanceDescriptorsOffset));
}
void Map::set_synchronized_instance_descriptors(DescriptorArray value,
WriteBarrierMode mode) {
- RELEASE_WRITE_FIELD(*this, kDescriptorsOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kDescriptorsOffset, value, mode);
+ RELEASE_WRITE_FIELD(*this, kInstanceDescriptorsOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kInstanceDescriptorsOffset, value, mode);
}
// A freshly allocated layout descriptor can be set on an existing map.
@@ -75,14 +76,14 @@ BIT_FIELD_ACCESSORS(Map, relaxed_bit_field, has_prototype_slot,
// |bit_field2| fields.
BIT_FIELD_ACCESSORS(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
BIT_FIELD_ACCESSORS(Map, bit_field2, is_prototype_map, Map::IsPrototypeMapBit)
-BIT_FIELD_ACCESSORS(Map, bit_field2, is_in_retained_map_list,
- Map::IsInRetainedMapListBit)
+BIT_FIELD_ACCESSORS(Map, bit_field2, has_hidden_prototype,
+ Map::HasHiddenPrototypeBit)
// |bit_field3| fields.
BIT_FIELD_ACCESSORS(Map, bit_field3, owns_descriptors, Map::OwnsDescriptorsBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, has_hidden_prototype,
- Map::HasHiddenPrototypeBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_in_retained_map_list,
+ Map::IsInRetainedMapListBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_migration_target,
Map::IsMigrationTargetBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_immutable_proto,
@@ -97,18 +98,18 @@ BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
InterceptorInfo Map::GetNamedInterceptor() {
DCHECK(has_named_interceptor());
FunctionTemplateInfo info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info->GetNamedPropertyHandler());
+ return InterceptorInfo::cast(info.GetNamedPropertyHandler());
}
InterceptorInfo Map::GetIndexedInterceptor() {
DCHECK(has_indexed_interceptor());
FunctionTemplateInfo info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info->GetIndexedPropertyHandler());
+ return InterceptorInfo::cast(info.GetIndexedPropertyHandler());
}
bool Map::IsMostGeneralFieldType(Representation representation,
FieldType field_type) {
- return !representation.IsHeapObject() || field_type->IsAny();
+ return !representation.IsHeapObject() || field_type.IsAny();
}
bool Map::CanHaveFastTransitionableElementsKind(InstanceType instance_type) {
@@ -122,26 +123,23 @@ bool Map::CanHaveFastTransitionableElementsKind() const {
// static
void Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
- Isolate* isolate, InstanceType instance_type, PropertyConstness* constness,
+ Isolate* isolate, InstanceType instance_type,
Representation* representation, Handle<FieldType>* field_type) {
if (CanHaveFastTransitionableElementsKind(instance_type)) {
// We don't support propagation of field generalization through elements
// kind transitions because they are inserted into the transition tree
// before field transitions. In order to avoid complexity of handling
// such a case we ensure that all maps with transitionable elements kinds
- // have the most general field type.
- if (representation->IsHeapObject()) {
- // The field type is either already Any or should become Any if it was
- // something else.
- *field_type = FieldType::Any(isolate);
- }
+ // have the most general field representation and type.
+ *field_type = FieldType::Any(isolate);
+ *representation = Representation::Tagged();
}
}
bool Map::IsUnboxedDoubleField(FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
- if (index.is_hidden_field() || !index.is_inobject()) return false;
- return !layout_descriptor()->IsTagged(index.property_index());
+ if (!index.is_inobject()) return false;
+ return !layout_descriptor().IsTagged(index.property_index());
}
bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
@@ -163,7 +161,7 @@ bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
}
PropertyDetails Map::GetLastDescriptorDetails() const {
- return instance_descriptors()->GetDetails(LastAdded());
+ return instance_descriptors().GetDetails(LastAdded());
}
int Map::LastAdded() const {
@@ -177,7 +175,7 @@ int Map::NumberOfOwnDescriptors() const {
}
void Map::SetNumberOfOwnDescriptors(int number) {
- DCHECK_LE(number, instance_descriptors()->number_of_descriptors());
+ DCHECK_LE(number, instance_descriptors().number_of_descriptors());
CHECK_LE(static_cast<unsigned>(number),
static_cast<unsigned>(kMaxNumberOfDescriptors));
set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
@@ -200,9 +198,8 @@ FixedArrayBase Map::GetInitialElements() const {
result = GetReadOnlyRoots().empty_fixed_array();
} else if (has_fast_sloppy_arguments_elements()) {
result = GetReadOnlyRoots().empty_sloppy_arguments_elements();
- } else if (has_fixed_typed_array_elements()) {
- result =
- GetReadOnlyRoots().EmptyFixedTypedArrayForTypedArray(elements_kind());
+ } else if (has_typed_array_elements()) {
+ result = GetReadOnlyRoots().empty_byte_array();
} else if (has_dictionary_elements()) {
result = GetReadOnlyRoots().empty_slow_element_dictionary();
} else {
@@ -293,12 +290,11 @@ Handle<Map> Map::AddMissingTransitionsForTesting(
}
InstanceType Map::instance_type() const {
- return static_cast<InstanceType>(
- READ_UINT16_FIELD(*this, kInstanceTypeOffset));
+ return static_cast<InstanceType>(ReadField<uint16_t>(kInstanceTypeOffset));
}
void Map::set_instance_type(InstanceType value) {
- WRITE_UINT16_FIELD(*this, kInstanceTypeOffset, value);
+ WriteField<uint16_t>(kInstanceTypeOffset, value);
}
int Map::UnusedPropertyFields() const {
@@ -373,19 +369,19 @@ void Map::SetOutOfObjectUnusedPropertyFields(int value) {
void Map::CopyUnusedPropertyFields(Map map) {
set_used_or_unused_instance_size_in_words(
- map->used_or_unused_instance_size_in_words());
- DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
+ map.used_or_unused_instance_size_in_words());
+ DCHECK_EQ(UnusedPropertyFields(), map.UnusedPropertyFields());
}
void Map::CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map map) {
- int value = map->used_or_unused_instance_size_in_words();
+ int value = map.used_or_unused_instance_size_in_words();
if (value >= JSValue::kFieldsAdded) {
// Unused in-object fields. Adjust the offset from the object’s start
// so it matches the distance to the object’s end.
- value += instance_size_in_words() - map->instance_size_in_words();
+ value += instance_size_in_words() - map.instance_size_in_words();
}
set_used_or_unused_instance_size_in_words(value);
- DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
+ DCHECK_EQ(UnusedPropertyFields(), map.UnusedPropertyFields());
}
void Map::AccountAddedPropertyField() {
@@ -420,10 +416,10 @@ void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
}
-byte Map::bit_field() const { return READ_BYTE_FIELD(*this, kBitFieldOffset); }
+byte Map::bit_field() const { return ReadField<byte>(kBitFieldOffset); }
void Map::set_bit_field(byte value) {
- WRITE_BYTE_FIELD(*this, kBitFieldOffset, value);
+ WriteField<byte>(kBitFieldOffset, value);
}
byte Map::relaxed_bit_field() const {
@@ -434,12 +430,10 @@ void Map::set_relaxed_bit_field(byte value) {
RELAXED_WRITE_BYTE_FIELD(*this, kBitFieldOffset, value);
}
-byte Map::bit_field2() const {
- return READ_BYTE_FIELD(*this, kBitField2Offset);
-}
+byte Map::bit_field2() const { return ReadField<byte>(kBitField2Offset); }
void Map::set_bit_field2(byte value) {
- WRITE_BYTE_FIELD(*this, kBitField2Offset, value);
+ WriteField<byte>(kBitField2Offset, value);
}
bool Map::is_abandoned_prototype_map() const {
@@ -447,8 +441,8 @@ bool Map::is_abandoned_prototype_map() const {
}
bool Map::should_be_fast_prototype_map() const {
- if (!prototype_info()->IsPrototypeInfo()) return false;
- return PrototypeInfo::cast(prototype_info())->should_be_fast_map();
+ if (!prototype_info().IsPrototypeInfo()) return false;
+ return PrototypeInfo::cast(prototype_info()).should_be_fast_map();
}
void Map::set_elements_kind(ElementsKind elements_kind) {
@@ -492,8 +486,8 @@ bool Map::has_fast_string_wrapper_elements() const {
return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
}
-bool Map::has_fixed_typed_array_elements() const {
- return IsFixedTypedArrayElementsKind(elements_kind());
+bool Map::has_typed_array_elements() const {
+ return IsTypedArrayElementsKind(elements_kind());
}
bool Map::has_dictionary_elements() const {
@@ -501,7 +495,7 @@ bool Map::has_dictionary_elements() const {
}
bool Map::has_frozen_or_sealed_elements() const {
- return IsPackedFrozenOrSealedElementsKind(elements_kind());
+ return IsFrozenOrSealedElementsKind(elements_kind());
}
bool Map::has_sealed_elements() const {
@@ -531,7 +525,7 @@ bool Map::is_stable() const { return !IsUnstableBit::decode(bit_field3()); }
bool Map::CanBeDeprecated() const {
int descriptor = LastAdded();
for (int i = 0; i <= descriptor; i++) {
- PropertyDetails details = instance_descriptors()->GetDetails(i);
+ PropertyDetails details = instance_descriptors().GetDetails(i);
if (details.representation().IsNone()) return true;
if (details.representation().IsSmi()) return true;
if (details.representation().IsDouble()) return true;
@@ -546,7 +540,7 @@ bool Map::CanBeDeprecated() const {
void Map::NotifyLeafMapLayoutChange(Isolate* isolate) {
if (is_stable()) {
mark_unstable();
- dependent_code()->DeoptimizeDependentCodeGroup(
+ dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPrototypeCheckGroup);
}
}
@@ -581,7 +575,7 @@ HeapObject Map::prototype() const {
}
void Map::set_prototype(HeapObject value, WriteBarrierMode mode) {
- DCHECK(value->IsNull() || value->IsJSReceiver());
+ DCHECK(value.IsNull() || value.IsJSReceiver());
WRITE_FIELD(*this, kPrototypeOffset, value);
CONDITIONAL_WRITE_BARRIER(*this, kPrototypeOffset, value, mode);
}
@@ -608,13 +602,13 @@ void Map::UpdateDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) {
SetInstanceDescriptors(isolate, descriptors, number_of_own_descriptors);
if (FLAG_unbox_double_fields) {
- if (layout_descriptor()->IsSlowLayout()) {
+ if (layout_descriptor().IsSlowLayout()) {
set_layout_descriptor(layout_desc);
}
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(layout_descriptor()->IsConsistentWithMap(*this));
+ CHECK(layout_descriptor().IsConsistentWithMap(*this));
CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
}
#else
@@ -627,14 +621,14 @@ void Map::UpdateDescriptors(Isolate* isolate, DescriptorArray descriptors,
void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors,
LayoutDescriptor layout_desc) {
SetInstanceDescriptors(isolate, descriptors,
- descriptors->number_of_descriptors());
+ descriptors.number_of_descriptors());
if (FLAG_unbox_double_fields) {
set_layout_descriptor(layout_desc);
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(layout_descriptor()->IsConsistentWithMap(*this));
+ CHECK(layout_descriptor().IsConsistentWithMap(*this));
}
#else
SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(*this));
@@ -666,11 +660,11 @@ LayoutDescriptor Map::GetLayoutDescriptor() const {
void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
DescriptorArray descriptors = instance_descriptors();
int number_of_own_descriptors = NumberOfOwnDescriptors();
- DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
{
// The following two operations need to happen before the marking write
// barrier.
- descriptors->Append(desc);
+ descriptors.Append(desc);
SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
number_of_own_descriptors + 1);
@@ -694,7 +688,7 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
HeapObject Map::GetBackPointer() const {
Object object = constructor_or_backpointer();
- if (object->IsMap()) {
+ if (object.IsMap()) {
return Map::cast(object);
}
return GetReadOnlyRoots().undefined_value();
@@ -723,10 +717,10 @@ void Map::set_prototype_info(Object value, WriteBarrierMode mode) {
void Map::SetBackPointer(Object value, WriteBarrierMode mode) {
CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
- CHECK(value->IsMap());
- CHECK(GetBackPointer()->IsUndefined());
- CHECK_IMPLIES(value->IsMap(), Map::cast(value)->GetConstructor() ==
- constructor_or_backpointer());
+ CHECK(value.IsMap());
+ CHECK(GetBackPointer().IsUndefined());
+ CHECK_IMPLIES(value.IsMap(), Map::cast(value).GetConstructor() ==
+ constructor_or_backpointer());
set_constructor_or_backpointer(value, mode);
}
@@ -737,34 +731,34 @@ ACCESSORS(Map, constructor_or_backpointer, Object,
bool Map::IsPrototypeValidityCellValid() const {
Object validity_cell = prototype_validity_cell();
- Object value = validity_cell->IsSmi() ? Smi::cast(validity_cell)
- : Cell::cast(validity_cell)->value();
+ Object value = validity_cell.IsSmi() ? Smi::cast(validity_cell)
+ : Cell::cast(validity_cell).value();
return value == Smi::FromInt(Map::kPrototypeChainValid);
}
Object Map::GetConstructor() const {
Object maybe_constructor = constructor_or_backpointer();
// Follow any back pointers.
- while (maybe_constructor->IsMap()) {
+ while (maybe_constructor.IsMap()) {
maybe_constructor =
- Map::cast(maybe_constructor)->constructor_or_backpointer();
+ Map::cast(maybe_constructor).constructor_or_backpointer();
}
return maybe_constructor;
}
FunctionTemplateInfo Map::GetFunctionTemplateInfo() const {
Object constructor = GetConstructor();
- if (constructor->IsJSFunction()) {
- DCHECK(JSFunction::cast(constructor)->shared()->IsApiFunction());
- return JSFunction::cast(constructor)->shared()->get_api_func_data();
+ if (constructor.IsJSFunction()) {
+ DCHECK(JSFunction::cast(constructor).shared().IsApiFunction());
+ return JSFunction::cast(constructor).shared().get_api_func_data();
}
- DCHECK(constructor->IsFunctionTemplateInfo());
+ DCHECK(constructor.IsFunctionTemplateInfo());
return FunctionTemplateInfo::cast(constructor);
}
void Map::SetConstructor(Object constructor, WriteBarrierMode mode) {
// Never overwrite a back pointer with a constructor.
- CHECK(!constructor_or_backpointer()->IsMap());
+ CHECK(!constructor_or_backpointer().IsMap());
set_constructor_or_backpointer(constructor, mode);
}
@@ -780,7 +774,7 @@ bool Map::IsInobjectSlackTrackingInProgress() const {
void Map::InobjectSlackTrackingStep(Isolate* isolate) {
// Slack tracking should only be performed on an initial map.
- DCHECK(GetBackPointer()->IsUndefined());
+ DCHECK(GetBackPointer().IsUndefined());
if (!IsInobjectSlackTrackingInProgress()) return;
int counter = construction_counter();
set_construction_counter(counter - 1);
@@ -813,7 +807,7 @@ int NormalizedMapCache::GetIndex(Handle<Map> map) {
bool HeapObject::IsNormalizedMapCache() const {
if (!IsWeakFixedArray()) return false;
- if (WeakFixedArray::cast(*this)->length() != NormalizedMapCache::kEntries) {
+ if (WeakFixedArray::cast(*this).length() != NormalizedMapCache::kEntries) {
return false;
}
return true;
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
new file mode 100644
index 0000000000..855fdabdf3
--- /dev/null
+++ b/deps/v8/src/objects/map-updater.cc
@@ -0,0 +1,805 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/map-updater.h"
+
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
+#include "src/objects/field-type.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
+#include "src/objects/property-details.h"
+#include "src/objects/transitions.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+inline bool EqualImmutableValues(Object obj1, Object obj2) {
+ if (obj1 == obj2) return true; // Valid for both kData and kAccessor kinds.
+ // TODO(ishell): compare AccessorPairs.
+ return false;
+}
+
+} // namespace
+
+MapUpdater::MapUpdater(Isolate* isolate, Handle<Map> old_map)
+ : isolate_(isolate),
+ old_map_(old_map),
+ old_descriptors_(old_map->instance_descriptors(), isolate_),
+ old_nof_(old_map_->NumberOfOwnDescriptors()),
+ new_elements_kind_(old_map_->elements_kind()),
+ is_transitionable_fast_elements_kind_(
+ IsTransitionableFastElementsKind(new_elements_kind_)) {
+ // We shouldn't try to update remote objects.
+ DCHECK(
+ !old_map->FindRootMap(isolate).GetConstructor().IsFunctionTemplateInfo());
+}
+
+Name MapUpdater::GetKey(int descriptor) const {
+ return old_descriptors_->GetKey(descriptor);
+}
+
+PropertyDetails MapUpdater::GetDetails(int descriptor) const {
+ DCHECK_LE(0, descriptor);
+ if (descriptor == modified_descriptor_) {
+ PropertyAttributes attributes = new_attributes_;
+ // If the original map was sealed or frozen, let us used the old
+ // attributes so that we follow the same transition path as before.
+ // Note that the user could not have changed the attributes because
+ // both seal and freeze make the properties non-configurable.
+ if (integrity_level_ == SEALED || integrity_level_ == FROZEN) {
+ attributes = old_descriptors_->GetDetails(descriptor).attributes();
+ }
+ return PropertyDetails(new_kind_, attributes, new_location_, new_constness_,
+ new_representation_);
+ }
+ return old_descriptors_->GetDetails(descriptor);
+}
+
+Object MapUpdater::GetValue(int descriptor) const {
+ DCHECK_LE(0, descriptor);
+ if (descriptor == modified_descriptor_) {
+ DCHECK_EQ(kDescriptor, new_location_);
+ return *new_value_;
+ }
+ DCHECK_EQ(kDescriptor, GetDetails(descriptor).location());
+ return old_descriptors_->GetStrongValue(descriptor);
+}
+
+FieldType MapUpdater::GetFieldType(int descriptor) const {
+ DCHECK_LE(0, descriptor);
+ if (descriptor == modified_descriptor_) {
+ DCHECK_EQ(kField, new_location_);
+ return *new_field_type_;
+ }
+ DCHECK_EQ(kField, GetDetails(descriptor).location());
+ return old_descriptors_->GetFieldType(descriptor);
+}
+
+Handle<FieldType> MapUpdater::GetOrComputeFieldType(
+ int descriptor, PropertyLocation location,
+ Representation representation) const {
+ DCHECK_LE(0, descriptor);
+ // |location| is just a pre-fetched GetDetails(descriptor).location().
+ DCHECK_EQ(location, GetDetails(descriptor).location());
+ if (location == kField) {
+ return handle(GetFieldType(descriptor), isolate_);
+ } else {
+ return GetValue(descriptor).OptimalType(isolate_, representation);
+ }
+}
+
+Handle<FieldType> MapUpdater::GetOrComputeFieldType(
+ Handle<DescriptorArray> descriptors, int descriptor,
+ PropertyLocation location, Representation representation) {
+ // |location| is just a pre-fetched GetDetails(descriptor).location().
+ DCHECK_EQ(descriptors->GetDetails(descriptor).location(), location);
+ if (location == kField) {
+ return handle(descriptors->GetFieldType(descriptor), isolate_);
+ } else {
+ return descriptors->GetStrongValue(descriptor)
+ .OptimalType(isolate_, representation);
+ }
+}
+
+Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
+ PropertyAttributes attributes,
+ PropertyConstness constness,
+ Representation representation,
+ Handle<FieldType> field_type) {
+ DCHECK_EQ(kInitialized, state_);
+ DCHECK_LE(0, descriptor);
+ DCHECK(!old_map_->is_dictionary_map());
+ modified_descriptor_ = descriptor;
+ new_kind_ = kData;
+ new_attributes_ = attributes;
+ new_location_ = kField;
+
+ PropertyDetails old_details =
+ old_descriptors_->GetDetails(modified_descriptor_);
+
+ // If property kind is not reconfigured merge the result with
+ // representation/field type from the old descriptor.
+ if (old_details.kind() == new_kind_) {
+ new_constness_ = GeneralizeConstness(constness, old_details.constness());
+
+ Representation old_representation = old_details.representation();
+ new_representation_ = representation.generalize(old_representation);
+
+ Handle<FieldType> old_field_type =
+ GetOrComputeFieldType(old_descriptors_, modified_descriptor_,
+ old_details.location(), new_representation_);
+
+ new_field_type_ =
+ Map::GeneralizeFieldType(old_representation, old_field_type,
+ new_representation_, field_type, isolate_);
+ } else {
+ // We don't know if this is a first property kind reconfiguration
+ // and we don't know which value was in this property previously
+ // therefore we can't treat such a property as constant.
+ new_constness_ = PropertyConstness::kMutable;
+ new_representation_ = representation;
+ new_field_type_ = field_type;
+ }
+
+ Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
+ isolate_, old_map_->instance_type(), &new_representation_,
+ &new_field_type_);
+
+ if (TryReconfigureToDataFieldInplace() == kEnd) return result_map_;
+ if (FindRootMap() == kEnd) return result_map_;
+ if (FindTargetMap() == kEnd) return result_map_;
+ if (ConstructNewMap() == kAtIntegrityLevelSource) {
+ ConstructNewMapWithIntegrityLevelTransition();
+ }
+ DCHECK_EQ(kEnd, state_);
+ return result_map_;
+}
+
+Handle<Map> MapUpdater::ReconfigureElementsKind(ElementsKind elements_kind) {
+ DCHECK_EQ(kInitialized, state_);
+ new_elements_kind_ = elements_kind;
+ is_transitionable_fast_elements_kind_ =
+ IsTransitionableFastElementsKind(new_elements_kind_);
+
+ if (FindRootMap() == kEnd) return result_map_;
+ if (FindTargetMap() == kEnd) return result_map_;
+ if (ConstructNewMap() == kAtIntegrityLevelSource) {
+ ConstructNewMapWithIntegrityLevelTransition();
+ }
+ DCHECK_EQ(kEnd, state_);
+ return result_map_;
+}
+
+Handle<Map> MapUpdater::Update() {
+ DCHECK_EQ(kInitialized, state_);
+ DCHECK(old_map_->is_deprecated());
+
+ if (FindRootMap() == kEnd) return result_map_;
+ if (FindTargetMap() == kEnd) return result_map_;
+ if (ConstructNewMap() == kAtIntegrityLevelSource) {
+ ConstructNewMapWithIntegrityLevelTransition();
+ }
+ DCHECK_EQ(kEnd, state_);
+ if (FLAG_fast_map_update) {
+ TransitionsAccessor(isolate_, old_map_).SetMigrationTarget(*result_map_);
+ }
+ return result_map_;
+}
+
+void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ Handle<FieldType> new_field_type) {
+ Map::GeneralizeField(isolate_, map, modify_index, new_constness,
+ new_representation, new_field_type);
+
+ DCHECK(*old_descriptors_ == old_map_->instance_descriptors() ||
+ *old_descriptors_ == integrity_source_map_->instance_descriptors());
+}
+
+MapUpdater::State MapUpdater::CopyGeneralizeAllFields(const char* reason) {
+ result_map_ = Map::CopyGeneralizeAllFields(
+ isolate_, old_map_, new_elements_kind_, modified_descriptor_, new_kind_,
+ new_attributes_, reason);
+ state_ = kEnd;
+ return state_; // Done.
+}
+
+MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
+ // Updating deprecated maps in-place doesn't make sense.
+ if (old_map_->is_deprecated()) return state_;
+
+ if (new_representation_.IsNone()) return state_; // Not done yet.
+
+ PropertyDetails old_details =
+ old_descriptors_->GetDetails(modified_descriptor_);
+ Representation old_representation = old_details.representation();
+ if (!old_representation.CanBeInPlaceChangedTo(new_representation_)) {
+ return state_; // Not done yet.
+ }
+
+ DCHECK_EQ(new_kind_, old_details.kind());
+ DCHECK_EQ(new_attributes_, old_details.attributes());
+ DCHECK_EQ(kField, old_details.location());
+ if (FLAG_trace_generalization) {
+ old_map_->PrintGeneralization(
+ isolate_, stdout, "uninitialized field", modified_descriptor_, old_nof_,
+ old_nof_, false, old_representation, new_representation_,
+ old_details.constness(), new_constness_,
+ handle(old_descriptors_->GetFieldType(modified_descriptor_), isolate_),
+ MaybeHandle<Object>(), new_field_type_, MaybeHandle<Object>());
+ }
+ Handle<Map> field_owner(
+ old_map_->FindFieldOwner(isolate_, modified_descriptor_), isolate_);
+
+ GeneralizeField(field_owner, modified_descriptor_, new_constness_,
+ new_representation_, new_field_type_);
+ // Check that the descriptor array was updated.
+ DCHECK(old_descriptors_->GetDetails(modified_descriptor_)
+ .representation()
+ .Equals(new_representation_));
+ DCHECK(old_descriptors_->GetFieldType(modified_descriptor_)
+ .NowIs(new_field_type_));
+
+ result_map_ = old_map_;
+ state_ = kEnd;
+ return state_; // Done.
+}
+
+bool MapUpdater::TrySaveIntegrityLevelTransitions() {
+ // Figure out the most restrictive integrity level transition (it should
+ // be the last one in the transition tree).
+ Handle<Map> previous =
+ handle(Map::cast(old_map_->GetBackPointer()), isolate_);
+ Symbol integrity_level_symbol;
+ TransitionsAccessor last_transitions(isolate_, previous);
+ if (!last_transitions.HasIntegrityLevelTransitionTo(
+ *old_map_, &integrity_level_symbol, &integrity_level_)) {
+ // The last transition was not integrity level transition - just bail out.
+ // This can happen in the following cases:
+ // - there are private symbol transitions following the integrity level
+ // transitions (see crbug.com/v8/8854).
+ // - there is a getter added in addition to an existing setter (or a setter
+ // in addition to an existing getter).
+ return false;
+ }
+ integrity_level_symbol_ = handle(integrity_level_symbol, isolate_);
+ integrity_source_map_ = previous;
+
+ // Now walk up the back pointer chain and skip all integrity level
+ // transitions. If we encounter any non-integrity level transition interleaved
+ // with integrity level transitions, just bail out.
+ while (!integrity_source_map_->is_extensible()) {
+ previous =
+ handle(Map::cast(integrity_source_map_->GetBackPointer()), isolate_);
+ TransitionsAccessor transitions(isolate_, previous);
+ if (!transitions.HasIntegrityLevelTransitionTo(*integrity_source_map_)) {
+ return false;
+ }
+ integrity_source_map_ = previous;
+ }
+
+ // Integrity-level transitions never change number of descriptors.
+ CHECK_EQ(old_map_->NumberOfOwnDescriptors(),
+ integrity_source_map_->NumberOfOwnDescriptors());
+
+ has_integrity_level_transition_ = true;
+ old_descriptors_ =
+ handle(integrity_source_map_->instance_descriptors(), isolate_);
+ return true;
+}
+
+MapUpdater::State MapUpdater::FindRootMap() {
+ DCHECK_EQ(kInitialized, state_);
+ // Check the state of the root map.
+ root_map_ = handle(old_map_->FindRootMap(isolate_), isolate_);
+ ElementsKind from_kind = root_map_->elements_kind();
+ ElementsKind to_kind = new_elements_kind_;
+
+ if (root_map_->is_deprecated()) {
+ state_ = kEnd;
+ result_map_ = handle(
+ JSFunction::cast(root_map_->GetConstructor()).initial_map(), isolate_);
+ result_map_ = Map::AsElementsKind(isolate_, result_map_, to_kind);
+ DCHECK(result_map_->is_dictionary_map());
+ return state_;
+ }
+
+ if (!old_map_->EquivalentToForTransition(*root_map_)) {
+ return CopyGeneralizeAllFields("GenAll_NotEquivalent");
+ } else if (old_map_->is_extensible() != root_map_->is_extensible()) {
+ DCHECK(!old_map_->is_extensible());
+ DCHECK(root_map_->is_extensible());
+ // We have an integrity level transition in the tree, let us make a note
+ // of that transition to be able to replay it later.
+ if (!TrySaveIntegrityLevelTransitions()) {
+ return CopyGeneralizeAllFields("GenAll_PrivateSymbolsOnNonExtensible");
+ }
+
+ // We want to build transitions to the original element kind (before
+ // the seal transitions), so change {to_kind} accordingly.
+ DCHECK(to_kind == DICTIONARY_ELEMENTS ||
+ to_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
+ IsTypedArrayElementsKind(to_kind) ||
+ IsFrozenOrSealedElementsKind(to_kind));
+ to_kind = integrity_source_map_->elements_kind();
+ }
+
+ // TODO(ishell): Add a test for SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
+ if (from_kind != to_kind && to_kind != DICTIONARY_ELEMENTS &&
+ to_kind != SLOW_STRING_WRAPPER_ELEMENTS &&
+ to_kind != SLOW_SLOPPY_ARGUMENTS_ELEMENTS &&
+ !(IsTransitionableFastElementsKind(from_kind) &&
+ IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
+ return CopyGeneralizeAllFields("GenAll_InvalidElementsTransition");
+ }
+
+ int root_nof = root_map_->NumberOfOwnDescriptors();
+ if (modified_descriptor_ >= 0 && modified_descriptor_ < root_nof) {
+ PropertyDetails old_details =
+ old_descriptors_->GetDetails(modified_descriptor_);
+ if (old_details.kind() != new_kind_ ||
+ old_details.attributes() != new_attributes_) {
+ return CopyGeneralizeAllFields("GenAll_RootModification1");
+ }
+ if (old_details.location() != kField) {
+ return CopyGeneralizeAllFields("GenAll_RootModification2");
+ }
+ if (!new_representation_.fits_into(old_details.representation())) {
+ return CopyGeneralizeAllFields("GenAll_RootModification4");
+ }
+
+ DCHECK_EQ(kData, old_details.kind());
+ DCHECK_EQ(kData, new_kind_);
+ DCHECK_EQ(kField, new_location_);
+
+ // Modify root map in-place. The GeneralizeField method is a no-op
+ // if the {old_map_} is already general enough to hold the requested
+ // {new_constness_} and {new_field_type_}.
+ GeneralizeField(old_map_, modified_descriptor_, new_constness_,
+ old_details.representation(), new_field_type_);
+ }
+
+ // From here on, use the map with correct elements kind as root map.
+ root_map_ = Map::AsElementsKind(isolate_, root_map_, to_kind);
+ state_ = kAtRootMap;
+ return state_; // Not done yet.
+}
+
+MapUpdater::State MapUpdater::FindTargetMap() {
+ DCHECK_EQ(kAtRootMap, state_);
+ target_map_ = root_map_;
+
+ int root_nof = root_map_->NumberOfOwnDescriptors();
+ for (int i = root_nof; i < old_nof_; ++i) {
+ PropertyDetails old_details = GetDetails(i);
+ Map transition = TransitionsAccessor(isolate_, target_map_)
+ .SearchTransition(GetKey(i), old_details.kind(),
+ old_details.attributes());
+ if (transition.is_null()) break;
+ Handle<Map> tmp_map(transition, isolate_);
+
+ Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
+ isolate_);
+
+ // Check if target map is incompatible.
+ PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
+ DCHECK_EQ(old_details.kind(), tmp_details.kind());
+ DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
+ if (old_details.kind() == kAccessor &&
+ !EqualImmutableValues(GetValue(i),
+ tmp_descriptors->GetStrongValue(i))) {
+ // TODO(ishell): mutable accessors are not implemented yet.
+ return CopyGeneralizeAllFields("GenAll_Incompatible");
+ }
+ if (!IsGeneralizableTo(old_details.location(), tmp_details.location())) {
+ break;
+ }
+ Representation tmp_representation = tmp_details.representation();
+ if (!old_details.representation().fits_into(tmp_representation)) {
+ break;
+ }
+
+ if (tmp_details.location() == kField) {
+ Handle<FieldType> old_field_type =
+ GetOrComputeFieldType(i, old_details.location(), tmp_representation);
+ GeneralizeField(tmp_map, i, old_details.constness(), tmp_representation,
+ old_field_type);
+ } else {
+ // kDescriptor: Check that the value matches.
+ if (!EqualImmutableValues(GetValue(i),
+ tmp_descriptors->GetStrongValue(i))) {
+ break;
+ }
+ }
+ DCHECK(!tmp_map->is_deprecated());
+ target_map_ = tmp_map;
+ }
+
+ // Directly change the map if the target map is more general.
+ int target_nof = target_map_->NumberOfOwnDescriptors();
+ if (target_nof == old_nof_) {
+#ifdef DEBUG
+ if (modified_descriptor_ >= 0) {
+ DescriptorArray target_descriptors = target_map_->instance_descriptors();
+ PropertyDetails details =
+ target_descriptors.GetDetails(modified_descriptor_);
+ DCHECK_EQ(new_kind_, details.kind());
+ DCHECK_EQ(GetDetails(modified_descriptor_).attributes(),
+ details.attributes());
+ DCHECK(IsGeneralizableTo(new_constness_, details.constness()));
+ DCHECK_EQ(new_location_, details.location());
+ DCHECK(new_representation_.fits_into(details.representation()));
+ if (new_location_ == kField) {
+ DCHECK_EQ(kField, details.location());
+ DCHECK(new_field_type_->NowIs(
+ target_descriptors.GetFieldType(modified_descriptor_)));
+ } else {
+ DCHECK(details.location() == kField ||
+ EqualImmutableValues(
+ *new_value_,
+ target_descriptors.GetStrongValue(modified_descriptor_)));
+ }
+ }
+#endif
+ if (*target_map_ != *old_map_) {
+ old_map_->NotifyLeafMapLayoutChange(isolate_);
+ }
+ if (!has_integrity_level_transition_) {
+ result_map_ = target_map_;
+ state_ = kEnd;
+ return state_; // Done.
+ }
+
+ // We try to replay the integrity level transition here.
+ Map transition = TransitionsAccessor(isolate_, target_map_)
+ .SearchSpecial(*integrity_level_symbol_);
+ if (!transition.is_null()) {
+ result_map_ = handle(transition, isolate_);
+ state_ = kEnd;
+ return state_; // Done.
+ }
+ }
+
+ // Find the last compatible target map in the transition tree.
+ for (int i = target_nof; i < old_nof_; ++i) {
+ PropertyDetails old_details = GetDetails(i);
+ Map transition = TransitionsAccessor(isolate_, target_map_)
+ .SearchTransition(GetKey(i), old_details.kind(),
+ old_details.attributes());
+ if (transition.is_null()) break;
+ Handle<Map> tmp_map(transition, isolate_);
+ Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
+ isolate_);
+#ifdef DEBUG
+ // Check that target map is compatible.
+ PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
+ DCHECK_EQ(old_details.kind(), tmp_details.kind());
+ DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
+#endif
+ if (old_details.kind() == kAccessor &&
+ !EqualImmutableValues(GetValue(i),
+ tmp_descriptors->GetStrongValue(i))) {
+ return CopyGeneralizeAllFields("GenAll_Incompatible");
+ }
+ DCHECK(!tmp_map->is_deprecated());
+ target_map_ = tmp_map;
+ }
+
+ state_ = kAtTargetMap;
+ return state_; // Not done yet.
+}
+
+Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
+ InstanceType instance_type = old_map_->instance_type();
+ int target_nof = target_map_->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> target_descriptors(
+ target_map_->instance_descriptors(), isolate_);
+
+ // Allocate a new descriptor array large enough to hold the required
+ // descriptors, with minimally the exact same size as the old descriptor
+ // array.
+ int new_slack =
+ std::max<int>(old_nof_, old_descriptors_->number_of_descriptors()) -
+ old_nof_;
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::Allocate(isolate_, old_nof_, new_slack);
+ DCHECK(new_descriptors->number_of_all_descriptors() >
+ target_descriptors->number_of_all_descriptors() ||
+ new_descriptors->number_of_slack_descriptors() > 0 ||
+ new_descriptors->number_of_descriptors() ==
+ old_descriptors_->number_of_descriptors());
+ DCHECK(new_descriptors->number_of_descriptors() == old_nof_);
+
+ int root_nof = root_map_->NumberOfOwnDescriptors();
+
+ // Given that we passed root modification check in FindRootMap() so
+ // the root descriptors are either not modified at all or already more
+ // general than we requested. Take |root_nof| entries as is.
+ // 0 -> |root_nof|
+ int current_offset = 0;
+ for (int i = 0; i < root_nof; ++i) {
+ PropertyDetails old_details = old_descriptors_->GetDetails(i);
+ if (old_details.location() == kField) {
+ current_offset += old_details.field_width_in_words();
+ }
+ Descriptor d(handle(GetKey(i), isolate_),
+ MaybeObjectHandle(old_descriptors_->GetValue(i), isolate_),
+ old_details);
+ new_descriptors->Set(i, &d);
+ }
+
+ // Merge "updated" old_descriptor entries with target_descriptor entries.
+ // |root_nof| -> |target_nof|
+ for (int i = root_nof; i < target_nof; ++i) {
+ Handle<Name> key(GetKey(i), isolate_);
+ PropertyDetails old_details = GetDetails(i);
+ PropertyDetails target_details = target_descriptors->GetDetails(i);
+
+ PropertyKind next_kind = old_details.kind();
+ PropertyAttributes next_attributes = old_details.attributes();
+ DCHECK_EQ(next_kind, target_details.kind());
+ DCHECK_EQ(next_attributes, target_details.attributes());
+
+ PropertyConstness next_constness = GeneralizeConstness(
+ old_details.constness(), target_details.constness());
+
+ // Note: failed values equality check does not invalidate per-object
+ // property constness.
+ PropertyLocation next_location =
+ old_details.location() == kField ||
+ target_details.location() == kField ||
+ !EqualImmutableValues(target_descriptors->GetStrongValue(i),
+ GetValue(i))
+ ? kField
+ : kDescriptor;
+
+ // Ensure that mutable values are stored in fields.
+ DCHECK_IMPLIES(next_constness == PropertyConstness::kMutable,
+ next_location == kField);
+
+ Representation next_representation =
+ old_details.representation().generalize(
+ target_details.representation());
+
+ if (next_location == kField) {
+ Handle<FieldType> old_field_type =
+ GetOrComputeFieldType(i, old_details.location(), next_representation);
+
+ Handle<FieldType> target_field_type =
+ GetOrComputeFieldType(target_descriptors, i,
+ target_details.location(), next_representation);
+
+ Handle<FieldType> next_field_type = Map::GeneralizeFieldType(
+ old_details.representation(), old_field_type, next_representation,
+ target_field_type, isolate_);
+
+ Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
+ isolate_, instance_type, &next_representation, &next_field_type);
+
+ MaybeObjectHandle wrapped_type(
+ Map::WrapFieldType(isolate_, next_field_type));
+ Descriptor d;
+ if (next_kind == kData) {
+ d = Descriptor::DataField(key, current_offset, next_attributes,
+ next_constness, next_representation,
+ wrapped_type);
+ } else {
+ // TODO(ishell): mutable accessors are not implemented yet.
+ UNIMPLEMENTED();
+ }
+ current_offset += d.GetDetails().field_width_in_words();
+ new_descriptors->Set(i, &d);
+ } else {
+ DCHECK_EQ(kDescriptor, next_location);
+ DCHECK_EQ(PropertyConstness::kConst, next_constness);
+
+ Handle<Object> value(GetValue(i), isolate_);
+ DCHECK_EQ(kAccessor, next_kind);
+ Descriptor d = Descriptor::AccessorConstant(key, value, next_attributes);
+ new_descriptors->Set(i, &d);
+ }
+ }
+
+ // Take "updated" old_descriptor entries.
+ // |target_nof| -> |old_nof|
+ for (int i = target_nof; i < old_nof_; ++i) {
+ PropertyDetails old_details = GetDetails(i);
+ Handle<Name> key(GetKey(i), isolate_);
+
+ PropertyKind next_kind = old_details.kind();
+ PropertyAttributes next_attributes = old_details.attributes();
+ PropertyConstness next_constness = old_details.constness();
+ PropertyLocation next_location = old_details.location();
+ Representation next_representation = old_details.representation();
+
+ Descriptor d;
+ if (next_location == kField) {
+ Handle<FieldType> next_field_type =
+ GetOrComputeFieldType(i, old_details.location(), next_representation);
+
+ // If the |new_elements_kind_| is still transitionable then the old map's
+ // elements kind is also transitionable and therefore the old descriptors
+ // array must already have generalized field type.
+ CHECK_IMPLIES(
+ is_transitionable_fast_elements_kind_,
+ Map::IsMostGeneralFieldType(next_representation, *next_field_type));
+
+ MaybeObjectHandle wrapped_type(
+ Map::WrapFieldType(isolate_, next_field_type));
+ Descriptor d;
+ if (next_kind == kData) {
+ d = Descriptor::DataField(key, current_offset, next_attributes,
+ next_constness, next_representation,
+ wrapped_type);
+ } else {
+ // TODO(ishell): mutable accessors are not implemented yet.
+ UNIMPLEMENTED();
+ }
+ current_offset += d.GetDetails().field_width_in_words();
+ new_descriptors->Set(i, &d);
+ } else {
+ DCHECK_EQ(kDescriptor, next_location);
+ DCHECK_EQ(PropertyConstness::kConst, next_constness);
+
+ Handle<Object> value(GetValue(i), isolate_);
+ if (next_kind == kData) {
+ d = Descriptor::DataConstant(key, value, next_attributes);
+ } else {
+ DCHECK_EQ(kAccessor, next_kind);
+ d = Descriptor::AccessorConstant(key, value, next_attributes);
+ }
+ new_descriptors->Set(i, &d);
+ }
+ }
+
+ new_descriptors->Sort();
+ return new_descriptors;
+}
+
+Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
+ DisallowHeapAllocation no_allocation;
+
+ int root_nof = root_map_->NumberOfOwnDescriptors();
+ Map current = *root_map_;
+ for (int i = root_nof; i < old_nof_; i++) {
+ Name name = descriptors->GetKey(i);
+ PropertyDetails details = descriptors->GetDetails(i);
+ Map next =
+ TransitionsAccessor(isolate_, current, &no_allocation)
+ .SearchTransition(name, details.kind(), details.attributes());
+ if (next.is_null()) break;
+ DescriptorArray next_descriptors = next.instance_descriptors();
+
+ PropertyDetails next_details = next_descriptors.GetDetails(i);
+ DCHECK_EQ(details.kind(), next_details.kind());
+ DCHECK_EQ(details.attributes(), next_details.attributes());
+ if (details.constness() != next_details.constness()) break;
+ if (details.location() != next_details.location()) break;
+ if (!details.representation().Equals(next_details.representation())) break;
+
+ if (next_details.location() == kField) {
+ FieldType next_field_type = next_descriptors.GetFieldType(i);
+ if (!descriptors->GetFieldType(i).NowIs(next_field_type)) {
+ break;
+ }
+ } else {
+ if (!EqualImmutableValues(descriptors->GetStrongValue(i),
+ next_descriptors.GetStrongValue(i))) {
+ break;
+ }
+ }
+ current = next;
+ }
+ return handle(current, isolate_);
+}
+
+MapUpdater::State MapUpdater::ConstructNewMap() {
+ Handle<DescriptorArray> new_descriptors = BuildDescriptorArray();
+
+ Handle<Map> split_map = FindSplitMap(new_descriptors);
+ int split_nof = split_map->NumberOfOwnDescriptors();
+ if (old_nof_ == split_nof) {
+ CHECK(has_integrity_level_transition_);
+ state_ = kAtIntegrityLevelSource;
+ return state_;
+ }
+
+ PropertyDetails split_details = GetDetails(split_nof);
+ TransitionsAccessor transitions(isolate_, split_map);
+
+ // Invalidate a transition target at |key|.
+ Map maybe_transition = transitions.SearchTransition(
+ GetKey(split_nof), split_details.kind(), split_details.attributes());
+ if (!maybe_transition.is_null()) {
+ maybe_transition.DeprecateTransitionTree(isolate_);
+ }
+
+ // If |maybe_transition| is not nullptr then the transition array already
+ // contains entry for given descriptor. This means that the transition
+ // could be inserted regardless of whether transitions array is full or not.
+ if (maybe_transition.is_null() && !transitions.CanHaveMoreTransitions()) {
+ return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
+ }
+
+ old_map_->NotifyLeafMapLayoutChange(isolate_);
+
+ if (FLAG_trace_generalization && modified_descriptor_ >= 0) {
+ PropertyDetails old_details =
+ old_descriptors_->GetDetails(modified_descriptor_);
+ PropertyDetails new_details =
+ new_descriptors->GetDetails(modified_descriptor_);
+ MaybeHandle<FieldType> old_field_type;
+ MaybeHandle<FieldType> new_field_type;
+ MaybeHandle<Object> old_value;
+ MaybeHandle<Object> new_value;
+ if (old_details.location() == kField) {
+ old_field_type = handle(
+ old_descriptors_->GetFieldType(modified_descriptor_), isolate_);
+ } else {
+ old_value = handle(old_descriptors_->GetStrongValue(modified_descriptor_),
+ isolate_);
+ }
+ if (new_details.location() == kField) {
+ new_field_type =
+ handle(new_descriptors->GetFieldType(modified_descriptor_), isolate_);
+ } else {
+ new_value = handle(new_descriptors->GetStrongValue(modified_descriptor_),
+ isolate_);
+ }
+
+ old_map_->PrintGeneralization(
+ isolate_, stdout, "", modified_descriptor_, split_nof, old_nof_,
+ old_details.location() == kDescriptor && new_location_ == kField,
+ old_details.representation(), new_details.representation(),
+ old_details.constness(), new_details.constness(), old_field_type,
+ old_value, new_field_type, new_value);
+ }
+
+ Handle<LayoutDescriptor> new_layout_descriptor =
+ LayoutDescriptor::New(isolate_, split_map, new_descriptors, old_nof_);
+
+ Handle<Map> new_map = Map::AddMissingTransitions(
+ isolate_, split_map, new_descriptors, new_layout_descriptor);
+
+ // Deprecated part of the transition tree is no longer reachable, so replace
+ // current instance descriptors in the "survived" part of the tree with
+ // the new descriptors to maintain descriptors sharing invariant.
+ split_map->ReplaceDescriptors(isolate_, *new_descriptors,
+ *new_layout_descriptor);
+
+ if (has_integrity_level_transition_) {
+ target_map_ = new_map;
+ state_ = kAtIntegrityLevelSource;
+ } else {
+ result_map_ = new_map;
+ state_ = kEnd;
+ }
+ return state_; // Done.
+}
+
+MapUpdater::State MapUpdater::ConstructNewMapWithIntegrityLevelTransition() {
+ DCHECK_EQ(kAtIntegrityLevelSource, state_);
+
+ TransitionsAccessor transitions(isolate_, target_map_);
+ if (!transitions.CanHaveMoreTransitions()) {
+ return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
+ }
+
+ result_map_ = Map::CopyForPreventExtensions(
+ isolate_, target_map_, integrity_level_, integrity_level_symbol_,
+ "CopyForPreventExtensions",
+ old_map_->elements_kind() == DICTIONARY_ELEMENTS);
+ DCHECK_IMPLIES(old_map_->elements_kind() == DICTIONARY_ELEMENTS,
+ result_map_->elements_kind() == DICTIONARY_ELEMENTS);
+
+ state_ = kEnd;
+ return state_;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/map-updater.h b/deps/v8/src/objects/map-updater.h
new file mode 100644
index 0000000000..3ba86eacbc
--- /dev/null
+++ b/deps/v8/src/objects/map-updater.h
@@ -0,0 +1,205 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MAP_UPDATER_H_
+#define V8_OBJECTS_MAP_UPDATER_H_
+
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/field-type.h"
+#include "src/objects/map.h"
+#include "src/objects/property-details.h"
+
+namespace v8 {
+namespace internal {
+
+// The |MapUpdater| class implements all sorts of map reconfigurations
+// including changes of elements kind, property attributes, property kind,
+// property location and field representations/type changes. It ensures that
+// the reconfigured map and all the intermediate maps are properly integrated
+// into the exising transition tree.
+//
+// To avoid high degrees over polymorphism, and to stabilize quickly, on every
+// rewrite the new type is deduced by merging the current type with any
+// potential new (partial) version of the type in the transition tree.
+// To do this, on each rewrite:
+// - Search the root of the transition tree using FindRootMap, remember
+// the integrity level (preventExtensions/seal/freeze) transitions.
+// - Find/create a |root_map| with requested |new_elements_kind|.
+// - Find |target_map|, the newest matching version of this map using the
+// "updated" |old_map|'s descriptor array (i.e. whose entry at |modify_index|
+// is considered to be of |new_kind| and having |new_attributes|) to walk
+// the transition tree. If there was an integrity level transition on the path
+// to the old map, use the descriptor array of the map preceding the first
+// integrity level transition (|integrity_source_map|), and try to replay
+// the integrity level transition afterwards.
+// - Merge/generalize the "updated" descriptor array of the |old_map| and
+// descriptor array of the |target_map|.
+// - Generalize the |modify_index| descriptor using |new_representation| and
+// |new_field_type|.
+// - Walk the tree again starting from the root towards |target_map|. Stop at
+// |split_map|, the first map who's descriptor array does not match the merged
+// descriptor array.
+// - If |target_map| == |split_map|, and there are no integrity level
+// transitions, |target_map| is in the expected state. Return it.
+// - Otherwise, invalidate the outdated transition target from |target_map|, and
+// replace its transition tree with a new branch for the updated descriptors.
+// - If the |old_map| had integrity level transition, create the new map for it.
+class MapUpdater {
+ public:
+ MapUpdater(Isolate* isolate, Handle<Map> old_map);
+
+ // Prepares for reconfiguring of a property at |descriptor| to data field
+ // with given |attributes| and |representation|/|field_type| and
+ // performs the steps 1-5.
+ Handle<Map> ReconfigureToDataField(int descriptor,
+ PropertyAttributes attributes,
+ PropertyConstness constness,
+ Representation representation,
+ Handle<FieldType> field_type);
+
+ // Prepares for reconfiguring elements kind and performs the steps 1-5.
+ Handle<Map> ReconfigureElementsKind(ElementsKind elements_kind);
+
+ // Prepares for updating deprecated map to most up-to-date non-deprecated
+ // version and performs the steps 1-5.
+ Handle<Map> Update();
+
+ private:
+ enum State {
+ kInitialized,
+ kAtRootMap,
+ kAtTargetMap,
+ kAtIntegrityLevelSource,
+ kEnd
+ };
+
+ // Try to reconfigure property in-place without rebuilding transition tree
+ // and creating new maps. See implementation for details.
+ State TryReconfigureToDataFieldInplace();
+
+ // Step 1.
+ // - Search the root of the transition tree using FindRootMap.
+ // - Find/create a |root_map_| with requested |new_elements_kind_|.
+ State FindRootMap();
+
+ // Step 2.
+ // - Find |target_map|, the newest matching version of this map using the
+ // "updated" |old_map|'s descriptor array (i.e. whose entry at
+ // |modify_index| is considered to be of |new_kind| and having
+ // |new_attributes|) to walk the transition tree. If there was an integrity
+ // level transition on the path to the old map, use the descriptor array
+ // of the map preceding the first integrity level transition
+ // (|integrity_source_map|), and try to replay the integrity level
+ // transition afterwards.
+ State FindTargetMap();
+
+ // Step 3.
+ // - Merge/generalize the "updated" descriptor array of the |old_map_| and
+ // descriptor array of the |target_map_|.
+ // - Generalize the |modified_descriptor_| using |new_representation| and
+ // |new_field_type_|.
+ Handle<DescriptorArray> BuildDescriptorArray();
+
+ // Step 4.
+ // - Walk the tree again starting from the root towards |target_map|. Stop at
+ // |split_map|, the first map who's descriptor array does not match the
+ // merged descriptor array.
+ Handle<Map> FindSplitMap(Handle<DescriptorArray> descriptors);
+
+ // Step 5.
+ // - If |target_map| == |split_map|, |target_map| is in the expected state.
+ // Return it.
+ // - Otherwise, invalidate the outdated transition target from |target_map|,
+ // and replace its transition tree with a new branch for the updated
+ // descriptors.
+ State ConstructNewMap();
+
+ // Step 6 (if there was
+ // - If the |old_map| had integrity level transition, create the new map
+ // for it.
+ State ConstructNewMapWithIntegrityLevelTransition();
+
+ // When a requested reconfiguration can not be done the result is a copy
+ // of |old_map_| where every field has |Tagged| representation and |Any|
+ // field type. This map is disconnected from the transition tree.
+ State CopyGeneralizeAllFields(const char* reason);
+
+ // Returns name of a |descriptor| property.
+ inline Name GetKey(int descriptor) const;
+
+ // Returns property details of a |descriptor| in "updated" |old_descrtiptors_|
+ // array.
+ inline PropertyDetails GetDetails(int descriptor) const;
+
+ // Returns value of a |descriptor| with kDescriptor location in "updated"
+ // |old_descrtiptors_| array.
+ inline Object GetValue(int descriptor) const;
+
+ // Returns field type for a |descriptor| with kField location in "updated"
+ // |old_descrtiptors_| array.
+ inline FieldType GetFieldType(int descriptor) const;
+
+ // If a |descriptor| property in "updated" |old_descriptors_| has kField
+ // location then returns it's field type otherwise computes optimal field
+ // type for the descriptor's value and |representation|. The |location|
+ // value must be a pre-fetched location for |descriptor|.
+ inline Handle<FieldType> GetOrComputeFieldType(
+ int descriptor, PropertyLocation location,
+ Representation representation) const;
+
+ // If a |descriptor| property in given |descriptors| array has kField
+ // location then returns it's field type otherwise computes optimal field
+ // type for the descriptor's value and |representation|.
+ // The |location| value must be a pre-fetched location for |descriptor|.
+ inline Handle<FieldType> GetOrComputeFieldType(
+ Handle<DescriptorArray> descriptors, int descriptor,
+ PropertyLocation location, Representation representation);
+
+ void GeneralizeField(Handle<Map> map, int modify_index,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ Handle<FieldType> new_field_type);
+
+ bool TrySaveIntegrityLevelTransitions();
+
+ Isolate* isolate_;
+ Handle<Map> old_map_;
+ Handle<DescriptorArray> old_descriptors_;
+ Handle<Map> root_map_;
+ Handle<Map> target_map_;
+ Handle<Map> result_map_;
+ int old_nof_;
+
+ // Information about integrity level transitions.
+ bool has_integrity_level_transition_ = false;
+ PropertyAttributes integrity_level_ = NONE;
+ Handle<Symbol> integrity_level_symbol_;
+ Handle<Map> integrity_source_map_;
+
+ State state_ = kInitialized;
+ ElementsKind new_elements_kind_;
+ bool is_transitionable_fast_elements_kind_;
+
+ // If |modified_descriptor_| is not equal to -1 then the fields below form
+ // an "update" of the |old_map_|'s descriptors.
+ int modified_descriptor_ = -1;
+ PropertyKind new_kind_ = kData;
+ PropertyAttributes new_attributes_ = NONE;
+ PropertyConstness new_constness_ = PropertyConstness::kMutable;
+ PropertyLocation new_location_ = kField;
+ Representation new_representation_ = Representation::None();
+
+ // Data specific to kField location.
+ Handle<FieldType> new_field_type_;
+
+ // Data specific to kDescriptor location.
+ Handle<Object> new_value_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_MAP_UPDATER_H_
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 80ea74a176..43d8c305c5 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -4,24 +4,24 @@
#include "src/objects/map.h"
-#include "src/bootstrapper.h"
-#include "src/counters-inl.h"
-#include "src/field-type.h"
-#include "src/frames.h"
-#include "src/handles-inl.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/maybe-handles.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/isolate.h"
-#include "src/layout-descriptor.h"
-#include "src/log.h"
-#include "src/map-updater.h"
-#include "src/maybe-handles.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters-inl.h"
+#include "src/logging/log.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/field-type.h"
#include "src/objects/js-objects.h"
+#include "src/objects/layout-descriptor.h"
+#include "src/objects/map-updater.h"
#include "src/objects/maybe-object.h"
#include "src/objects/oddball.h"
-#include "src/ostreams.h"
-#include "src/property.h"
-#include "src/transitions-inl.h"
+#include "src/objects/property.h"
+#include "src/objects/transitions-inl.h"
+#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -34,12 +34,12 @@ Map Map::GetPrototypeChainRootMap(Isolate* isolate) const {
}
int constructor_function_index = GetConstructorFunctionIndex();
if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
- Context native_context = isolate->context()->native_context();
+ Context native_context = isolate->context().native_context();
JSFunction constructor_function =
- JSFunction::cast(native_context->get(constructor_function_index));
- return constructor_function->initial_map();
+ JSFunction::cast(native_context.get(constructor_function_index));
+ return constructor_function.initial_map();
}
- return ReadOnlyRoots(isolate).null_value()->map();
+ return ReadOnlyRoots(isolate).null_value().map();
}
// static
@@ -73,9 +73,9 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
PropertyAttributes attributes) {
OFStream os(file);
os << "[reconfiguring]";
- Name name = instance_descriptors()->GetKey(modify_index);
- if (name->IsString()) {
- String::cast(name)->PrintOn(file);
+ Name name = instance_descriptors().GetKey(modify_index);
+ if (name.IsString()) {
+ String::cast(name).PrintOn(file);
} else {
os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
}
@@ -88,7 +88,7 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
VisitorId Map::GetVisitorId(Map map) {
STATIC_ASSERT(kVisitorIdCount <= 256);
- const int instance_type = map->instance_type();
+ const int instance_type = map.instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
switch (instance_type & kStringRepresentationMask) {
@@ -301,7 +301,7 @@ VisitorId Map::GetVisitorId(Map map) {
case WASM_TABLE_TYPE:
case JS_BOUND_FUNCTION_TYPE: {
const bool has_raw_data_fields =
- (FLAG_unbox_double_fields && !map->HasFastPointerLayout()) ||
+ (FLAG_unbox_double_fields && !map.HasFastPointerLayout()) ||
(COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0);
return has_raw_data_fields ? kVisitJSObject : kVisitJSObjectFast;
}
@@ -325,21 +325,6 @@ VisitorId Map::GetVisitorId(Map map) {
case BIGINT_TYPE:
return kVisitBigInt;
- case FIXED_UINT8_ARRAY_TYPE:
- case FIXED_INT8_ARRAY_TYPE:
- case FIXED_UINT16_ARRAY_TYPE:
- case FIXED_INT16_ARRAY_TYPE:
- case FIXED_UINT32_ARRAY_TYPE:
- case FIXED_INT32_ARRAY_TYPE:
- case FIXED_FLOAT32_ARRAY_TYPE:
- case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
- case FIXED_BIGUINT64_ARRAY_TYPE:
- case FIXED_BIGINT64_ARRAY_TYPE:
- return kVisitFixedTypedArrayBase;
-
- case FIXED_FLOAT64_ARRAY_TYPE:
- return kVisitFixedFloat64Array;
-
case ALLOCATION_SITE_TYPE:
return kVisitAllocationSite;
@@ -349,6 +334,9 @@ VisitorId Map::GetVisitorId(Map map) {
if (instance_type == PROTOTYPE_INFO_TYPE) {
return kVisitPrototypeInfo;
}
+ if (instance_type == WASM_CAPI_FUNCTION_DATA_TYPE) {
+ return kVisitWasmCapiFunctionData;
+ }
return kVisitStruct;
case LOAD_HANDLER_TYPE:
@@ -364,13 +352,14 @@ void Map::PrintGeneralization(
Isolate* isolate, FILE* file, const char* reason, int modify_index,
int split, int descriptors, bool descriptor_to_field,
Representation old_representation, Representation new_representation,
+ PropertyConstness old_constness, PropertyConstness new_constness,
MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value) {
OFStream os(file);
os << "[generalizing]";
- Name name = instance_descriptors()->GetKey(modify_index);
- if (name->IsString()) {
- String::cast(name)->PrintOn(file);
+ Name name = instance_descriptors().GetKey(modify_index);
+ if (name.IsString()) {
+ String::cast(name).PrintOn(file);
} else {
os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
}
@@ -384,7 +373,7 @@ void Map::PrintGeneralization(
} else {
old_field_type.ToHandleChecked()->PrintTo(os);
}
- os << "}";
+ os << ";" << old_constness << "}";
}
os << "->" << new_representation.Mnemonic() << "{";
if (new_field_type.is_null()) {
@@ -392,7 +381,7 @@ void Map::PrintGeneralization(
} else {
new_field_type.ToHandleChecked()->PrintTo(os);
}
- os << "} (";
+ os << ";" << new_constness << "} (";
if (strlen(reason) > 0) {
os << reason;
} else {
@@ -429,9 +418,9 @@ MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
PropertyConstness constness,
Representation representation,
TransitionFlag flag) {
- DCHECK(DescriptorArray::kNotFound ==
- map->instance_descriptors()->Search(*name,
- map->NumberOfOwnDescriptors()));
+ DCHECK(
+ DescriptorArray::kNotFound ==
+ map->instance_descriptors().Search(*name, map->NumberOfOwnDescriptors()));
// Ensure the descriptor array does not get too big.
if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) {
@@ -447,13 +436,11 @@ MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
type = FieldType::Any(isolate);
} else {
Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
- isolate, map->instance_type(), &constness, &representation, &type);
+ isolate, map->instance_type(), &representation, &type);
}
MaybeObjectHandle wrapped_type = WrapFieldType(isolate, type);
- DCHECK_IMPLIES(!FLAG_track_constant_fields,
- constness == PropertyConstness::kMutable);
Descriptor d = Descriptor::DataField(name, index, attributes, constness,
representation, wrapped_type);
Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
@@ -471,23 +458,15 @@ MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
return MaybeHandle<Map>();
}
- if (FLAG_track_constant_fields) {
- Representation representation = constant->OptimalRepresentation();
- Handle<FieldType> type = constant->OptimalType(isolate, representation);
- return CopyWithField(isolate, map, name, type, attributes,
- PropertyConstness::kConst, representation, flag);
- } else {
- // Allocate new instance descriptors with (name, constant) added.
- Descriptor d =
- Descriptor::DataConstant(isolate, name, 0, constant, attributes);
- Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
- return new_map;
- }
+ Representation representation = constant->OptimalRepresentation();
+ Handle<FieldType> type = constant->OptimalType(isolate, representation);
+ return CopyWithField(isolate, map, name, type, attributes,
+ PropertyConstness::kConst, representation, flag);
}
bool Map::TransitionRemovesTaggedField(Map target) const {
int inobject = NumberOfFields();
- int target_inobject = target->NumberOfFields();
+ int target_inobject = target.NumberOfFields();
for (int i = target_inobject; i < inobject; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*this, i);
if (!IsUnboxedDoubleField(index)) return true;
@@ -497,11 +476,11 @@ bool Map::TransitionRemovesTaggedField(Map target) const {
bool Map::TransitionChangesTaggedFieldToUntaggedField(Map target) const {
int inobject = NumberOfFields();
- int target_inobject = target->NumberOfFields();
+ int target_inobject = target.NumberOfFields();
int limit = Min(inobject, target_inobject);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(target, i);
- if (!IsUnboxedDoubleField(index) && target->IsUnboxedDoubleField(index)) {
+ if (!IsUnboxedDoubleField(index) && target.IsUnboxedDoubleField(index)) {
return true;
}
}
@@ -514,9 +493,9 @@ bool Map::TransitionRequiresSynchronizationWithGC(Map target) const {
}
bool Map::InstancesNeedRewriting(Map target) const {
- int target_number_of_fields = target->NumberOfFields();
- int target_inobject = target->GetInObjectProperties();
- int target_unused = target->UnusedPropertyFields();
+ int target_number_of_fields = target.NumberOfFields();
+ int target_inobject = target.GetInObjectProperties();
+ int target_unused = target.UnusedPropertyFields();
int old_number_of_fields;
return InstancesNeedRewriting(target, target_number_of_fields,
@@ -534,11 +513,11 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
// If smi descriptors were replaced by double descriptors, rewrite.
DescriptorArray old_desc = instance_descriptors();
- DescriptorArray new_desc = target->instance_descriptors();
+ DescriptorArray new_desc = target.instance_descriptors();
int limit = NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
- if (new_desc->GetDetails(i).representation().IsDouble() !=
- old_desc->GetDetails(i).representation().IsDouble()) {
+ if (new_desc.GetDetails(i).representation().IsDouble() !=
+ old_desc.GetDetails(i).representation().IsDouble()) {
return true;
}
}
@@ -562,7 +541,7 @@ int Map::NumberOfFields() const {
DescriptorArray descriptors = instance_descriptors();
int result = 0;
for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
- if (descriptors->GetDetails(i).location() == kField) result++;
+ if (descriptors.GetDetails(i).location() == kField) result++;
}
return result;
}
@@ -572,7 +551,7 @@ Map::FieldCounts Map::GetFieldCounts() const {
int mutable_count = 0;
int const_count = 0;
for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
switch (details.constness()) {
case PropertyConstness::kMutable:
@@ -631,14 +610,15 @@ Handle<Map> Map::CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
MaybeHandle<FieldType> field_type = FieldType::None(isolate);
if (details.location() == kField) {
field_type = handle(
- map->instance_descriptors()->GetFieldType(modify_index), isolate);
+ map->instance_descriptors().GetFieldType(modify_index), isolate);
}
map->PrintGeneralization(
isolate, stdout, reason, modify_index,
new_map->NumberOfOwnDescriptors(), new_map->NumberOfOwnDescriptors(),
details.location() == kDescriptor, details.representation(),
- Representation::Tagged(), field_type, MaybeHandle<Object>(),
- FieldType::Any(isolate), MaybeHandle<Object>());
+ Representation::Tagged(), details.constness(), details.constness(),
+ field_type, MaybeHandle<Object>(), FieldType::Any(isolate),
+ MaybeHandle<Object>());
}
}
new_map->set_elements_kind(elements_kind);
@@ -651,14 +631,14 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
TransitionsAccessor transitions(isolate, *this, &no_gc);
int num_transitions = transitions.NumberOfTransitions();
for (int i = 0; i < num_transitions; ++i) {
- transitions.GetTarget(i)->DeprecateTransitionTree(isolate);
+ transitions.GetTarget(i).DeprecateTransitionTree(isolate);
}
- DCHECK(!constructor_or_backpointer()->IsFunctionTemplateInfo());
+ DCHECK(!constructor_or_backpointer().IsFunctionTemplateInfo());
set_is_deprecated(true);
if (FLAG_trace_maps) {
LOG(isolate, MapEvent("Deprecate", *this, Map()));
}
- dependent_code()->DeoptimizeDependentCodeGroup(
+ dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kTransitionGroup);
NotifyLeafMapLayoutChange(isolate);
}
@@ -668,7 +648,7 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
LayoutDescriptor new_layout_descriptor) {
// Don't overwrite the empty descriptor array or initial map's descriptors.
- if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined(isolate)) {
+ if (NumberOfOwnDescriptors() == 0 || GetBackPointer().IsUndefined(isolate)) {
return;
}
@@ -678,13 +658,13 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
// all its elements.
Map current = *this;
MarkingBarrierForDescriptorArray(isolate->heap(), current, to_replace,
- to_replace->number_of_descriptors());
- while (current->instance_descriptors() == to_replace) {
- Object next = current->GetBackPointer();
- if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
- current->SetEnumLength(kInvalidEnumCacheSentinel);
- current->UpdateDescriptors(isolate, new_descriptors, new_layout_descriptor,
- current->NumberOfOwnDescriptors());
+ to_replace.number_of_descriptors());
+ while (current.instance_descriptors() == to_replace) {
+ Object next = current.GetBackPointer();
+ if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
+ current.SetEnumLength(kInvalidEnumCacheSentinel);
+ current.UpdateDescriptors(isolate, new_descriptors, new_layout_descriptor,
+ current.NumberOfOwnDescriptors());
current = Map::cast(next);
}
set_owns_descriptors(false);
@@ -693,13 +673,13 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
Map Map::FindRootMap(Isolate* isolate) const {
Map result = *this;
while (true) {
- Object back = result->GetBackPointer();
- if (back->IsUndefined(isolate)) {
+ Object back = result.GetBackPointer();
+ if (back.IsUndefined(isolate)) {
// Initial map always owns descriptors and doesn't have unused entries
// in the descriptor array.
- DCHECK(result->owns_descriptors());
- DCHECK_EQ(result->NumberOfOwnDescriptors(),
- result->instance_descriptors()->number_of_descriptors());
+ DCHECK(result.owns_descriptors());
+ DCHECK_EQ(result.NumberOfOwnDescriptors(),
+ result.instance_descriptors().number_of_descriptors());
return result;
}
result = Map::cast(back);
@@ -708,13 +688,13 @@ Map Map::FindRootMap(Isolate* isolate) const {
Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
DisallowHeapAllocation no_allocation;
- DCHECK_EQ(kField, instance_descriptors()->GetDetails(descriptor).location());
+ DCHECK_EQ(kField, instance_descriptors().GetDetails(descriptor).location());
Map result = *this;
while (true) {
- Object back = result->GetBackPointer();
- if (back->IsUndefined(isolate)) break;
+ Object back = result.GetBackPointer();
+ if (back.IsUndefined(isolate)) break;
const Map parent = Map::cast(back);
- if (parent->NumberOfOwnDescriptors() <= descriptor) break;
+ if (parent.NumberOfOwnDescriptors() <= descriptor) break;
result = parent;
}
return result;
@@ -727,7 +707,7 @@ void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
// We store raw pointers in the queue, so no allocations are allowed.
DisallowHeapAllocation no_allocation;
- PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
+ PropertyDetails details = instance_descriptors().GetDetails(descriptor);
if (details.location() != kField) return;
DCHECK_EQ(kData, details.kind());
@@ -745,8 +725,8 @@ void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
Map target = transitions.GetTarget(i);
backlog.push(target);
}
- DescriptorArray descriptors = current->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
+ DescriptorArray descriptors = current.instance_descriptors();
+ PropertyDetails details = descriptors.GetDetails(descriptor);
// It is allowed to change representation here only from None
// to something or from Smi or HeapObject to Tagged.
@@ -756,19 +736,17 @@ void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
// Skip if already updated the shared descriptor.
if (new_constness != details.constness() ||
!new_representation.Equals(details.representation()) ||
- descriptors->GetFieldType(descriptor) != *new_wrapped_type.object()) {
- DCHECK_IMPLIES(!FLAG_track_constant_fields,
- new_constness == PropertyConstness::kMutable);
+ descriptors.GetFieldType(descriptor) != *new_wrapped_type.object()) {
Descriptor d = Descriptor::DataField(
- name, descriptors->GetFieldIndex(descriptor), details.attributes(),
+ name, descriptors.GetFieldIndex(descriptor), details.attributes(),
new_constness, new_representation, new_wrapped_type);
- descriptors->Replace(descriptor, &d);
+ descriptors.Replace(descriptor, &d);
}
}
}
bool FieldTypeIsCleared(Representation rep, FieldType type) {
- return type->IsNone() && rep.IsHeapObject();
+ return type.IsNone() && rep.IsHeapObject();
}
// static
@@ -833,15 +811,16 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index,
MaybeObjectHandle wrapped_type(WrapFieldType(isolate, new_field_type));
field_owner->UpdateFieldType(isolate, modify_index, name, new_constness,
new_representation, wrapped_type);
- field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
+ field_owner->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kFieldOwnerGroup);
if (FLAG_trace_generalization) {
map->PrintGeneralization(
isolate, stdout, "field type generalization", modify_index,
map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false,
- details.representation(), details.representation(), old_field_type,
- MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
+ details.representation(), details.representation(), old_constness,
+ new_constness, old_field_type, MaybeHandle<Object>(), new_field_type,
+ MaybeHandle<Object>());
}
}
@@ -877,7 +856,7 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
do {
target = TransitionsAccessor(isolate, target, &no_allocation)
.GetMigrationTarget();
- } while (!target.is_null() && target->is_deprecated());
+ } while (!target.is_null() && target.is_deprecated());
if (target.is_null()) return Map();
// TODO(ishell): if this validation ever become a bottleneck consider adding a
@@ -888,12 +867,12 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
// types instead of old_map's types.
// Go to slow map updating if the old_map has fast properties with cleared
// field types.
- int old_nof = old_map->NumberOfOwnDescriptors();
- DescriptorArray old_descriptors = old_map->instance_descriptors();
+ int old_nof = old_map.NumberOfOwnDescriptors();
+ DescriptorArray old_descriptors = old_map.instance_descriptors();
for (int i = 0; i < old_nof; i++) {
- PropertyDetails old_details = old_descriptors->GetDetails(i);
+ PropertyDetails old_details = old_descriptors.GetDetails(i);
if (old_details.location() == kField && old_details.kind() == kData) {
- FieldType old_type = old_descriptors->GetFieldType(i);
+ FieldType old_type = old_descriptors.GetFieldType(i);
if (FieldTypeIsCleared(old_details.representation(), old_type)) {
return Map();
}
@@ -947,8 +926,8 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// Figure out the most restrictive integrity level transition (it should
// be the last one in the transition tree).
- DCHECK(!map->is_extensible());
- Map previous = Map::cast(map->GetBackPointer());
+ DCHECK(!map.is_extensible());
+ Map previous = Map::cast(map.GetBackPointer());
TransitionsAccessor last_transitions(isolate, previous, no_allocation);
if (!last_transitions.HasIntegrityLevelTransitionTo(
map, &(info.integrity_level_symbol), &(info.integrity_level))) {
@@ -965,8 +944,8 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// Now walk up the back pointer chain and skip all integrity level
// transitions. If we encounter any non-integrity level transition interleaved
// with integrity level transitions, just bail out.
- while (!source_map->is_extensible()) {
- previous = Map::cast(source_map->GetBackPointer());
+ while (!source_map.is_extensible()) {
+ previous = Map::cast(source_map.GetBackPointer());
TransitionsAccessor transitions(isolate, previous, no_allocation);
if (!transitions.HasIntegrityLevelTransitionTo(source_map)) {
return info;
@@ -975,7 +954,7 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
}
// Integrity-level transitions never change number of descriptors.
- CHECK_EQ(map->NumberOfOwnDescriptors(), source_map->NumberOfOwnDescriptors());
+ CHECK_EQ(map.NumberOfOwnDescriptors(), source_map.NumberOfOwnDescriptors());
info.has_integrity_level_transition = true;
info.integrity_level_source_map = source_map;
@@ -989,26 +968,25 @@ Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
DisallowDeoptimization no_deoptimization(isolate);
// Check the state of the root map.
- Map root_map = old_map->FindRootMap(isolate);
- if (root_map->is_deprecated()) {
- JSFunction constructor = JSFunction::cast(root_map->GetConstructor());
- DCHECK(constructor->has_initial_map());
- DCHECK(constructor->initial_map()->is_dictionary_map());
- if (constructor->initial_map()->elements_kind() !=
- old_map->elements_kind()) {
+ Map root_map = old_map.FindRootMap(isolate);
+ if (root_map.is_deprecated()) {
+ JSFunction constructor = JSFunction::cast(root_map.GetConstructor());
+ DCHECK(constructor.has_initial_map());
+ DCHECK(constructor.initial_map().is_dictionary_map());
+ if (constructor.initial_map().elements_kind() != old_map.elements_kind()) {
return Map();
}
- return constructor->initial_map();
+ return constructor.initial_map();
}
- if (!old_map->EquivalentToForTransition(root_map)) return Map();
+ if (!old_map.EquivalentToForTransition(root_map)) return Map();
- ElementsKind from_kind = root_map->elements_kind();
- ElementsKind to_kind = old_map->elements_kind();
+ ElementsKind from_kind = root_map.elements_kind();
+ ElementsKind to_kind = old_map.elements_kind();
IntegrityLevelTransitionInfo info(old_map);
- if (root_map->is_extensible() != old_map->is_extensible()) {
- DCHECK(!old_map->is_extensible());
- DCHECK(root_map->is_extensible());
+ if (root_map.is_extensible() != old_map.is_extensible()) {
+ DCHECK(!old_map.is_extensible());
+ DCHECK(root_map.is_extensible());
info = DetectIntegrityLevelTransitions(old_map, isolate, &no_allocation);
// Bail out if there were some private symbol transitions mixed up
// with the integrity level transitions.
@@ -1017,18 +995,19 @@ Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
// the integrity level transition sets the elements to dictionary mode.
DCHECK(to_kind == DICTIONARY_ELEMENTS ||
to_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
- IsFixedTypedArrayElementsKind(to_kind));
- to_kind = info.integrity_level_source_map->elements_kind();
+ IsTypedArrayElementsKind(to_kind) ||
+ IsHoleyFrozenOrSealedElementsKind(to_kind));
+ to_kind = info.integrity_level_source_map.elements_kind();
}
if (from_kind != to_kind) {
// Try to follow existing elements kind transitions.
- root_map = root_map->LookupElementsTransitionMap(isolate, to_kind);
+ root_map = root_map.LookupElementsTransitionMap(isolate, to_kind);
if (root_map.is_null()) return Map();
// From here on, use the map with correct elements kind as root map.
}
// Replay the transitions as they were before the integrity level transition.
- Map result = root_map->TryReplayPropertyTransitions(
+ Map result = root_map.TryReplayPropertyTransitions(
isolate, info.integrity_level_source_map);
if (result.is_null()) return Map();
@@ -1039,9 +1018,9 @@ Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
}
DCHECK_IMPLIES(!result.is_null(),
- old_map->elements_kind() == result->elements_kind());
+ old_map.elements_kind() == result.elements_kind());
DCHECK_IMPLIES(!result.is_null(),
- old_map->instance_type() == result->instance_type());
+ old_map.instance_type() == result.instance_type());
return result;
}
@@ -1051,21 +1030,21 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
int root_nof = NumberOfOwnDescriptors();
- int old_nof = old_map->NumberOfOwnDescriptors();
- DescriptorArray old_descriptors = old_map->instance_descriptors();
+ int old_nof = old_map.NumberOfOwnDescriptors();
+ DescriptorArray old_descriptors = old_map.instance_descriptors();
Map new_map = *this;
for (int i = root_nof; i < old_nof; ++i) {
- PropertyDetails old_details = old_descriptors->GetDetails(i);
+ PropertyDetails old_details = old_descriptors.GetDetails(i);
Map transition =
TransitionsAccessor(isolate, new_map, &no_allocation)
- .SearchTransition(old_descriptors->GetKey(i), old_details.kind(),
+ .SearchTransition(old_descriptors.GetKey(i), old_details.kind(),
old_details.attributes());
if (transition.is_null()) return Map();
new_map = transition;
- DescriptorArray new_descriptors = new_map->instance_descriptors();
+ DescriptorArray new_descriptors = new_map.instance_descriptors();
- PropertyDetails new_details = new_descriptors->GetDetails(i);
+ PropertyDetails new_details = new_descriptors.GetDetails(i);
DCHECK_EQ(old_details.kind(), new_details.kind());
DCHECK_EQ(old_details.attributes(), new_details.attributes());
if (!IsGeneralizableTo(old_details.constness(), new_details.constness())) {
@@ -1077,46 +1056,37 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
}
if (new_details.location() == kField) {
if (new_details.kind() == kData) {
- FieldType new_type = new_descriptors->GetFieldType(i);
+ FieldType new_type = new_descriptors.GetFieldType(i);
// Cleared field types need special treatment. They represent lost
// knowledge, so we must first generalize the new_type to "Any".
if (FieldTypeIsCleared(new_details.representation(), new_type)) {
return Map();
}
DCHECK_EQ(kData, old_details.kind());
- if (old_details.location() == kField) {
- FieldType old_type = old_descriptors->GetFieldType(i);
- if (FieldTypeIsCleared(old_details.representation(), old_type) ||
- !old_type->NowIs(new_type)) {
- return Map();
- }
- } else {
- DCHECK_EQ(kDescriptor, old_details.location());
- DCHECK(!FLAG_track_constant_fields);
- Object old_value = old_descriptors->GetStrongValue(i);
- if (!new_type->NowContains(old_value)) {
- return Map();
- }
+ DCHECK_EQ(kField, old_details.location());
+ FieldType old_type = old_descriptors.GetFieldType(i);
+ if (FieldTypeIsCleared(old_details.representation(), old_type) ||
+ !old_type.NowIs(new_type)) {
+ return Map();
}
-
} else {
DCHECK_EQ(kAccessor, new_details.kind());
#ifdef DEBUG
- FieldType new_type = new_descriptors->GetFieldType(i);
- DCHECK(new_type->IsAny());
+ FieldType new_type = new_descriptors.GetFieldType(i);
+ DCHECK(new_type.IsAny());
#endif
UNREACHABLE();
}
} else {
DCHECK_EQ(kDescriptor, new_details.location());
if (old_details.location() == kField ||
- old_descriptors->GetStrongValue(i) !=
- new_descriptors->GetStrongValue(i)) {
+ old_descriptors.GetStrongValue(i) !=
+ new_descriptors.GetStrongValue(i)) {
return Map();
}
}
}
- if (new_map->NumberOfOwnDescriptors() != old_nof) return Map();
+ if (new_map.NumberOfOwnDescriptors() != old_nof) return Map();
return new_map;
}
@@ -1168,11 +1138,11 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
descriptors->number_of_descriptors());
Map current = *map;
- while (current->instance_descriptors() == *descriptors) {
- Object next = current->GetBackPointer();
- if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
- current->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
- current->NumberOfOwnDescriptors());
+ while (current.instance_descriptors() == *descriptors) {
+ Object next = current.GetBackPointer();
+ if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
+ current.UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ current.NumberOfOwnDescriptors());
current = Map::cast(next);
}
map->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
@@ -1182,7 +1152,7 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
// static
Handle<Map> Map::GetObjectCreateMap(Isolate* isolate,
Handle<HeapObject> prototype) {
- Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ Handle<Map> map(isolate->native_context()->object_function().initial_map(),
isolate);
if (map->prototype() == *prototype) return map;
if (prototype->IsNull(isolate)) {
@@ -1190,7 +1160,7 @@ Handle<Map> Map::GetObjectCreateMap(Isolate* isolate,
}
if (prototype->IsJSObject()) {
Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
- if (!js_prototype->map()->is_prototype_map()) {
+ if (!js_prototype->map().is_prototype_map()) {
JSObject::OptimizeAsPrototype(js_prototype);
}
Handle<PrototypeInfo> info =
@@ -1212,7 +1182,7 @@ Handle<Map> Map::GetObjectCreateMap(Isolate* isolate,
// static
MaybeHandle<Map> Map::TryGetObjectCreateMap(Isolate* isolate,
Handle<HeapObject> prototype) {
- Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ Handle<Map> map(isolate->native_context()->object_function().initial_map(),
isolate);
if (map->prototype() == *prototype) return map;
if (prototype->IsNull(isolate)) {
@@ -1220,7 +1190,7 @@ MaybeHandle<Map> Map::TryGetObjectCreateMap(Isolate* isolate,
}
if (!prototype->IsJSObject()) return MaybeHandle<Map>();
Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
- if (!js_prototype->map()->is_prototype_map()) return MaybeHandle<Map>();
+ if (!js_prototype->map().is_prototype_map()) return MaybeHandle<Map>();
Handle<PrototypeInfo> info =
Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
if (!info->HasObjectCreateMap()) return MaybeHandle<Map>();
@@ -1235,6 +1205,15 @@ static bool ContainsMap(MapHandles const& maps, Map map) {
return false;
}
+static bool HasElementsKind(MapHandles const& maps,
+ ElementsKind elements_kind) {
+ for (Handle<Map> current : maps) {
+ if (!current.is_null() && current->elements_kind() == elements_kind)
+ return true;
+ }
+ return false;
+}
+
Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
MapHandles const& candidates) {
DisallowHeapAllocation no_allocation;
@@ -1250,22 +1229,25 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
// Check the state of the root map.
Map root_map = FindRootMap(isolate);
if (!EquivalentToForElementsKindTransition(root_map)) return Map();
- root_map = root_map->LookupElementsTransitionMap(isolate, kind);
+ root_map = root_map.LookupElementsTransitionMap(isolate, kind);
DCHECK(!root_map.is_null());
// Starting from the next existing elements kind transition try to
// replay the property transitions that does not involve instance rewriting
// (ElementsTransitionAndStoreStub does not support that).
- for (root_map = root_map->ElementsTransitionMap();
- !root_map.is_null() && root_map->has_fast_elements();
- root_map = root_map->ElementsTransitionMap()) {
- Map current = root_map->TryReplayPropertyTransitions(isolate, *this);
+ for (root_map = root_map.ElementsTransitionMap();
+ !root_map.is_null() && root_map.has_fast_elements();
+ root_map = root_map.ElementsTransitionMap()) {
+ // If root_map's elements kind doesn't match any of the elements kind in
+ // the candidates there is no need to do any additional work.
+ if (!HasElementsKind(candidates, root_map.elements_kind())) continue;
+ Map current = root_map.TryReplayPropertyTransitions(isolate, *this);
if (current.is_null()) continue;
if (InstancesNeedRewriting(current)) continue;
if (ContainsMap(candidates, current) &&
- (packed || !IsFastPackedElementsKind(current->elements_kind()))) {
+ (packed || !IsFastPackedElementsKind(current.elements_kind()))) {
transition = current;
- packed = packed && IsFastPackedElementsKind(current->elements_kind());
+ packed = packed && IsFastPackedElementsKind(current.elements_kind());
}
}
}
@@ -1275,25 +1257,25 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
static Map FindClosestElementsTransition(Isolate* isolate, Map map,
ElementsKind to_kind) {
// Ensure we are requested to search elements kind transition "near the root".
- DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
- map->NumberOfOwnDescriptors());
+ DCHECK_EQ(map.FindRootMap(isolate).NumberOfOwnDescriptors(),
+ map.NumberOfOwnDescriptors());
Map current_map = map;
- ElementsKind kind = map->elements_kind();
+ ElementsKind kind = map.elements_kind();
while (kind != to_kind) {
- Map next_map = current_map->ElementsTransitionMap();
+ Map next_map = current_map.ElementsTransitionMap();
if (next_map.is_null()) return current_map;
- kind = next_map->elements_kind();
+ kind = next_map.elements_kind();
current_map = next_map;
}
- DCHECK_EQ(to_kind, current_map->elements_kind());
+ DCHECK_EQ(to_kind, current_map.elements_kind());
return current_map;
}
Map Map::LookupElementsTransitionMap(Isolate* isolate, ElementsKind to_kind) {
Map to_map = FindClosestElementsTransition(isolate, *this, to_kind);
- if (to_map->elements_kind() == to_kind) return to_map;
+ if (to_map.elements_kind() == to_kind) return to_map;
return Map();
}
@@ -1314,24 +1296,24 @@ Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
ElementsKind from_kind = map->elements_kind();
if (from_kind == to_kind) return map;
- Context native_context = isolate->context()->native_context();
+ Context native_context = isolate->context().native_context();
if (from_kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
- if (*map == native_context->fast_aliased_arguments_map()) {
+ if (*map == native_context.fast_aliased_arguments_map()) {
DCHECK_EQ(SLOW_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
- return handle(native_context->slow_aliased_arguments_map(), isolate);
+ return handle(native_context.slow_aliased_arguments_map(), isolate);
}
} else if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
- if (*map == native_context->slow_aliased_arguments_map()) {
+ if (*map == native_context.slow_aliased_arguments_map()) {
DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
- return handle(native_context->fast_aliased_arguments_map(), isolate);
+ return handle(native_context.fast_aliased_arguments_map(), isolate);
}
} else if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
// Reuse map transitions for JSArrays.
DisallowHeapAllocation no_gc;
- if (native_context->GetInitialJSArrayMap(from_kind) == *map) {
+ if (native_context.GetInitialJSArrayMap(from_kind) == *map) {
Object maybe_transitioned_map =
- native_context->get(Context::ArrayMapIndex(to_kind));
- if (maybe_transitioned_map->IsMap()) {
+ native_context.get(Context::ArrayMapIndex(to_kind));
+ if (maybe_transitioned_map.IsMap()) {
return handle(Map::cast(maybe_transitioned_map), isolate);
}
}
@@ -1341,8 +1323,8 @@ Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
// Check if we can go back in the elements kind transition chain.
if (IsHoleyElementsKind(from_kind) &&
to_kind == GetPackedElementsKind(from_kind) &&
- map->GetBackPointer()->IsMap() &&
- Map::cast(map->GetBackPointer())->elements_kind() == to_kind) {
+ map->GetBackPointer().IsMap() &&
+ Map::cast(map->GetBackPointer()).elements_kind() == to_kind) {
return handle(Map::cast(map->GetBackPointer()), isolate);
}
@@ -1410,8 +1392,8 @@ int Map::NumberOfEnumerableProperties() const {
DescriptorArray descs = instance_descriptors();
int limit = NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
- if ((descs->GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
- !descs->GetKey(i)->FilterKey(ENUMERABLE_STRINGS)) {
+ if ((descs.GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
+ !descs.GetKey(i).FilterKey(ENUMERABLE_STRINGS)) {
result++;
}
}
@@ -1423,7 +1405,7 @@ int Map::NextFreePropertyIndex() const {
int number_of_own_descriptors = NumberOfOwnDescriptors();
DescriptorArray descs = instance_descriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
- PropertyDetails details = descs->GetDetails(i);
+ PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
int candidate = details.field_index() + details.field_width_in_words();
if (candidate > free_index) free_index = candidate;
@@ -1448,20 +1430,20 @@ bool Map::DictionaryElementsInPrototypeChainOnly(Isolate* isolate) {
for (PrototypeIterator iter(isolate, *this); !iter.IsAtEnd();
iter.Advance()) {
// Be conservative, don't walk into proxies.
- if (iter.GetCurrent()->IsJSProxy()) return true;
+ if (iter.GetCurrent().IsJSProxy()) return true;
// String wrappers have non-configurable, non-writable elements.
- if (iter.GetCurrent()->IsStringWrapper()) return true;
+ if (iter.GetCurrent().IsStringWrapper()) return true;
JSObject current = iter.GetCurrent<JSObject>();
- if (current->HasDictionaryElements() &&
- current->element_dictionary()->requires_slow_elements()) {
+ if (current.HasDictionaryElements() &&
+ current.element_dictionary().requires_slow_elements()) {
return true;
}
- if (current->HasSlowArgumentsElements()) {
- FixedArray parameter_map = FixedArray::cast(current->elements());
- Object arguments = parameter_map->get(1);
- if (NumberDictionary::cast(arguments)->requires_slow_elements()) {
+ if (current.HasSlowArgumentsElements()) {
+ FixedArray parameter_map = FixedArray::cast(current.elements());
+ Object arguments = parameter_map.get(1);
+ if (NumberDictionary::cast(arguments).requires_slow_elements()) {
return true;
}
}
@@ -1486,6 +1468,7 @@ Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
new_bit_field3 =
EnumLengthBits::update(new_bit_field3, kInvalidEnumCacheSentinel);
new_bit_field3 = IsDeprecatedBit::update(new_bit_field3, false);
+ new_bit_field3 = IsInRetainedMapListBit::update(new_bit_field3, false);
if (!map->is_dictionary_map()) {
new_bit_field3 = IsUnstableBit::update(new_bit_field3, false);
}
@@ -1517,26 +1500,29 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
// applied to the shared map, dependent code and weak cell cache.
Handle<Map> fresh = Map::CopyNormalized(isolate, fast_map, mode);
+ STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
+ Map::kDependentCodeOffset + kTaggedSize);
+ DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
+ reinterpret_cast<void*>(new_map->address()),
+ Map::kBitField3Offset));
+ // The IsInRetainedMapListBit might be different if the {new_map}
+ // that we got from the {cache} was already embedded into optimized
+ // code somewhere.
+ DCHECK_EQ(fresh->bit_field3() & ~IsInRetainedMapListBit::kMask,
+ new_map->bit_field3() & ~IsInRetainedMapListBit::kMask);
+ int offset = Map::kBitField3Offset + kInt32Size;
+ DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address() + offset),
+ reinterpret_cast<void*>(new_map->address() + offset),
+ Map::kDependentCodeOffset - offset));
+ offset = Map::kPrototypeValidityCellOffset + kTaggedSize;
if (new_map->is_prototype_map()) {
// For prototype maps, the PrototypeInfo is not copied.
- DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
- reinterpret_cast<void*>(new_map->address()),
- kTransitionsOrPrototypeInfoOffset));
+ STATIC_ASSERT(Map::kTransitionsOrPrototypeInfoOffset ==
+ Map::kPrototypeValidityCellOffset + kTaggedSize);
+ offset = kTransitionsOrPrototypeInfoOffset + kTaggedSize;
DCHECK_EQ(fresh->raw_transitions(),
MaybeObject::FromObject(Smi::kZero));
- STATIC_ASSERT(kDescriptorsOffset ==
- kTransitionsOrPrototypeInfoOffset + kTaggedSize);
- DCHECK_EQ(0, memcmp(fresh->RawField(kDescriptorsOffset).ToVoidPtr(),
- new_map->RawField(kDescriptorsOffset).ToVoidPtr(),
- kDependentCodeOffset - kDescriptorsOffset));
- } else {
- DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
- reinterpret_cast<void*>(new_map->address()),
- Map::kDependentCodeOffset));
}
- STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
- Map::kDependentCodeOffset + kTaggedSize);
- int offset = Map::kPrototypeValidityCellOffset + kTaggedSize;
DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address() + offset),
reinterpret_cast<void*>(new_map->address() + offset),
Map::kSize - offset));
@@ -1600,8 +1586,8 @@ void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
// Function's initial map is a sloppy function map. Same holds for
// GeneratorFunction / AsyncFunction and its initial map.
Object constructor = map->GetConstructor();
- DCHECK(constructor->IsJSFunction());
- DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
+ DCHECK(constructor.IsJSFunction());
+ DCHECK(*map == JSFunction::cast(constructor).initial_map() ||
*map == *isolate->strict_function_map() ||
*map == *isolate->strict_function_with_name_map() ||
*map == *isolate->generator_function_map() ||
@@ -1617,7 +1603,7 @@ void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
// does not contain descriptors that do not belong to the map.
DCHECK(map->owns_descriptors());
DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors()->number_of_descriptors());
+ map->instance_descriptors().number_of_descriptors());
}
} // namespace
@@ -1673,7 +1659,7 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
// array, implying that its NumberOfOwnDescriptors equals the number of
// descriptors in the descriptor array.
DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors()->number_of_descriptors());
+ map->instance_descriptors().number_of_descriptors());
Handle<Map> result = CopyDropDescriptors(isolate, map);
Handle<Name> name = descriptor->GetKey();
@@ -1720,24 +1706,14 @@ void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
child->may_have_interesting_symbols());
DCHECK_IMPLIES(parent->may_have_interesting_symbols(),
child->may_have_interesting_symbols());
- // Do not track transitions during bootstrap except for element transitions.
- if (isolate->bootstrapper()->IsActive() &&
- !name.is_identical_to(isolate->factory()->elements_transition_symbol())) {
- if (FLAG_trace_maps) {
- LOG(isolate,
- MapEvent("Transition", *parent, *child,
- child->is_prototype_map() ? "prototype" : "", *name));
- }
- return;
- }
- if (!parent->GetBackPointer()->IsUndefined(isolate)) {
+ if (!parent->GetBackPointer().IsUndefined(isolate)) {
parent->set_owns_descriptors(false);
} else {
// |parent| is initial map and it must keep the ownership, there must be no
// descriptors in the descriptors array that do not belong to the map.
DCHECK(parent->owns_descriptors());
DCHECK_EQ(parent->NumberOfOwnDescriptors(),
- parent->instance_descriptors()->number_of_descriptors());
+ parent->instance_descriptors().number_of_descriptors());
}
if (parent->is_prototype_map()) {
DCHECK(child->is_prototype_map());
@@ -1864,7 +1840,7 @@ void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
+ CHECK(child->layout_descriptor().IsConsistentWithMap(*child));
}
#else
SLOW_DCHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
@@ -1891,14 +1867,14 @@ Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
Map maybe_elements_transition_map;
if (flag == INSERT_TRANSITION) {
// Ensure we are requested to add elements kind transition "near the root".
- DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
+ DCHECK_EQ(map->FindRootMap(isolate).NumberOfOwnDescriptors(),
map->NumberOfOwnDescriptors());
maybe_elements_transition_map = map->ElementsTransitionMap();
- DCHECK(maybe_elements_transition_map.is_null() ||
- (maybe_elements_transition_map->elements_kind() ==
- DICTIONARY_ELEMENTS &&
- kind == DICTIONARY_ELEMENTS));
+ DCHECK(
+ maybe_elements_transition_map.is_null() ||
+ (maybe_elements_transition_map.elements_kind() == DICTIONARY_ELEMENTS &&
+ kind == DICTIONARY_ELEMENTS));
DCHECK(!IsFastElementsKind(kind) ||
IsMoreGeneralElementsKindTransition(map->elements_kind(), kind));
DCHECK(kind != map->elements_kind());
@@ -2025,25 +2001,29 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
return copy;
}
-Handle<Map> Map::CopyForPreventExtensions(Isolate* isolate, Handle<Map> map,
- PropertyAttributes attrs_to_add,
- Handle<Symbol> transition_marker,
- const char* reason) {
+Handle<Map> Map::CopyForPreventExtensions(
+ Isolate* isolate, Handle<Map> map, PropertyAttributes attrs_to_add,
+ Handle<Symbol> transition_marker, const char* reason,
+ bool old_map_is_dictionary_elements_kind) {
int num_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
isolate, handle(map->instance_descriptors(), isolate), num_descriptors,
attrs_to_add);
Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
isolate);
+ // Do not track transitions during bootstrapping.
+ TransitionFlag flag =
+ isolate->bootstrapper()->IsActive() ? OMIT_TRANSITION : INSERT_TRANSITION;
Handle<Map> new_map = CopyReplaceDescriptors(
- isolate, map, new_desc, new_layout_descriptor, INSERT_TRANSITION,
- transition_marker, reason, SPECIAL_TRANSITION);
+ isolate, map, new_desc, new_layout_descriptor, flag, transition_marker,
+ reason, SPECIAL_TRANSITION);
new_map->set_is_extensible(false);
- if (!IsFixedTypedArrayElementsKind(map->elements_kind())) {
+ if (!IsTypedArrayElementsKind(map->elements_kind())) {
ElementsKind new_kind = IsStringWrapperElementsKind(map->elements_kind())
? SLOW_STRING_WRAPPER_ELEMENTS
: DICTIONARY_ELEMENTS;
- if (FLAG_enable_sealed_frozen_elements_kind) {
+ if (FLAG_enable_sealed_frozen_elements_kind &&
+ !old_map_is_dictionary_elements_kind) {
switch (map->elements_kind()) {
case PACKED_ELEMENTS:
if (attrs_to_add == SEALED) {
@@ -2057,6 +2037,18 @@ Handle<Map> Map::CopyForPreventExtensions(Isolate* isolate, Handle<Map> map,
new_kind = PACKED_FROZEN_ELEMENTS;
}
break;
+ case HOLEY_ELEMENTS:
+ if (attrs_to_add == SEALED) {
+ new_kind = HOLEY_SEALED_ELEMENTS;
+ } else if (attrs_to_add == FROZEN) {
+ new_kind = HOLEY_FROZEN_ELEMENTS;
+ }
+ break;
+ case HOLEY_SEALED_ELEMENTS:
+ if (attrs_to_add == FROZEN) {
+ new_kind = HOLEY_FROZEN_ELEMENTS;
+ }
+ break;
default:
break;
}
@@ -2070,12 +2062,12 @@ namespace {
bool CanHoldValue(DescriptorArray descriptors, int descriptor,
PropertyConstness constness, Object value) {
- PropertyDetails details = descriptors->GetDetails(descriptor);
+ PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == kField) {
if (details.kind() == kData) {
return IsGeneralizableTo(constness, details.constness()) &&
- value->FitsRepresentation(details.representation()) &&
- descriptors->GetFieldType(descriptor)->NowContains(value);
+ value.FitsRepresentation(details.representation()) &&
+ descriptors.GetFieldType(descriptor).NowContains(value);
} else {
DCHECK_EQ(kAccessor, details.kind());
return false;
@@ -2084,15 +2076,8 @@ bool CanHoldValue(DescriptorArray descriptors, int descriptor,
} else {
DCHECK_EQ(kDescriptor, details.location());
DCHECK_EQ(PropertyConstness::kConst, details.constness());
- if (details.kind() == kData) {
- DCHECK(!FLAG_track_constant_fields);
- DCHECK(descriptors->GetStrongValue(descriptor) != value ||
- value->FitsRepresentation(details.representation()));
- return descriptors->GetStrongValue(descriptor) == value;
- } else {
- DCHECK_EQ(kAccessor, details.kind());
- return false;
- }
+ DCHECK_EQ(kAccessor, details.kind());
+ return false;
}
UNREACHABLE();
}
@@ -2107,7 +2092,7 @@ Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
}
PropertyAttributes attributes =
- map->instance_descriptors()->GetDetails(descriptor).attributes();
+ map->instance_descriptors().GetDetails(descriptor).attributes();
Representation representation = value->OptimalRepresentation();
Handle<FieldType> type = value->OptimalType(isolate, representation);
@@ -2154,26 +2139,23 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> transition(maybe_transition, isolate);
int descriptor = transition->LastAdded();
- DCHECK_EQ(attributes, transition->instance_descriptors()
- ->GetDetails(descriptor)
- .attributes());
+ DCHECK_EQ(
+ attributes,
+ transition->instance_descriptors().GetDetails(descriptor).attributes());
return UpdateDescriptorForValue(isolate, transition, descriptor, constness,
value);
}
- TransitionFlag flag = INSERT_TRANSITION;
+ // Do not track transitions during bootstrapping.
+ TransitionFlag flag =
+ isolate->bootstrapper()->IsActive() ? OMIT_TRANSITION : INSERT_TRANSITION;
MaybeHandle<Map> maybe_map;
if (!map->TooManyFastProperties(store_origin)) {
- if (!FLAG_track_constant_fields && value->IsJSFunction()) {
- maybe_map =
- Map::CopyWithConstant(isolate, map, name, value, attributes, flag);
- } else {
- Representation representation = value->OptimalRepresentation();
- Handle<FieldType> type = value->OptimalType(isolate, representation);
- maybe_map = Map::CopyWithField(isolate, map, name, type, attributes,
- constness, representation, flag);
- }
+ Representation representation = value->OptimalRepresentation();
+ Handle<FieldType> type = value->OptimalType(isolate, representation);
+ maybe_map = Map::CopyWithField(isolate, map, name, type, attributes,
+ constness, representation, flag);
}
Handle<Map> result;
@@ -2185,18 +2167,18 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
ScopedVector<char> name_buffer(100);
name->NameShortPrint(name_buffer);
buffer.reset(new ScopedVector<char>(128));
- SNPrintF(*buffer, "TooManyFastProperties %s", name_buffer.start());
- reason = buffer->start();
+ SNPrintF(*buffer, "TooManyFastProperties %s", name_buffer.begin());
+ reason = buffer->begin();
}
#endif
Handle<Object> maybe_constructor(map->GetConstructor(), isolate);
if (FLAG_feedback_normalization && map->new_target_is_base() &&
maybe_constructor->IsJSFunction() &&
- !JSFunction::cast(*maybe_constructor)->shared()->native()) {
+ !JSFunction::cast(*maybe_constructor).shared().native()) {
Handle<JSFunction> constructor =
Handle<JSFunction>::cast(maybe_constructor);
DCHECK_NE(*constructor,
- constructor->context()->native_context()->object_function());
+ constructor->context().native_context().object_function());
Handle<Map> initial_map(constructor->initial_map(), isolate);
result = Map::Normalize(isolate, initial_map, CLEAR_INOBJECT_PROPERTIES,
reason);
@@ -2205,7 +2187,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
JSFunction::SetInitialMap(constructor, result, prototype);
// Deoptimize all code that embeds the previous initial map.
- initial_map->dependent_code()->DeoptimizeDependentCodeGroup(
+ initial_map->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kInitialMapChangedGroup);
if (!result->EquivalentToForNormalization(*map,
CLEAR_INOBJECT_PROPERTIES)) {
@@ -2226,7 +2208,7 @@ Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
// Dictionaries have to be reconfigured in-place.
DCHECK(!map->is_dictionary_map());
- if (!map->GetBackPointer()->IsMap()) {
+ if (!map->GetBackPointer().IsMap()) {
// There is no benefit from reconstructing transition tree for maps without
// back pointers.
return CopyGeneralizeAllFields(isolate, map, map->elements_kind(),
@@ -2241,7 +2223,7 @@ Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
MapUpdater mu(isolate, map);
DCHECK_EQ(kData, kind); // Only kData case is supported so far.
Handle<Map> new_map = mu.ReconfigureToDataField(
- descriptor, attributes, kDefaultFieldConstness, Representation::None(),
+ descriptor, attributes, PropertyConstness::kConst, Representation::None(),
FieldType::None(isolate));
return new_map;
}
@@ -2277,12 +2259,12 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> transition(maybe_transition, isolate);
DescriptorArray descriptors = transition->instance_descriptors();
int descriptor = transition->LastAdded();
- DCHECK(descriptors->GetKey(descriptor)->Equals(*name));
+ DCHECK(descriptors.GetKey(descriptor).Equals(*name));
- DCHECK_EQ(kAccessor, descriptors->GetDetails(descriptor).kind());
- DCHECK_EQ(attributes, descriptors->GetDetails(descriptor).attributes());
+ DCHECK_EQ(kAccessor, descriptors.GetDetails(descriptor).kind());
+ DCHECK_EQ(attributes, descriptors.GetDetails(descriptor).attributes());
- Handle<Object> maybe_pair(descriptors->GetStrongValue(descriptor), isolate);
+ Handle<Object> maybe_pair(descriptors.GetStrongValue(descriptor), isolate);
if (!maybe_pair->IsAccessorPair()) {
return Map::Normalize(isolate, map, mode,
"TransitionToAccessorFromNonPair");
@@ -2303,7 +2285,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
if (descriptor != map->LastAdded()) {
return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
}
- PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
+ PropertyDetails old_details = old_descriptors.GetDetails(descriptor);
if (old_details.kind() != kAccessor) {
return Map::Normalize(isolate, map, mode,
"AccessorsOverwritingNonAccessors");
@@ -2313,7 +2295,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
return Map::Normalize(isolate, map, mode, "AccessorsWithAttributes");
}
- Handle<Object> maybe_pair(old_descriptors->GetStrongValue(descriptor),
+ Handle<Object> maybe_pair(old_descriptors.GetStrongValue(descriptor),
isolate);
if (!maybe_pair->IsAccessorPair()) {
return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonPair");
@@ -2324,12 +2306,12 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
bool overwriting_accessor = false;
if (!getter->IsNull(isolate) &&
- !current_pair->get(ACCESSOR_GETTER)->IsNull(isolate) &&
+ !current_pair->get(ACCESSOR_GETTER).IsNull(isolate) &&
current_pair->get(ACCESSOR_GETTER) != *getter) {
overwriting_accessor = true;
}
if (!setter->IsNull(isolate) &&
- !current_pair->get(ACCESSOR_SETTER)->IsNull(isolate) &&
+ !current_pair->get(ACCESSOR_SETTER).IsNull(isolate) &&
current_pair->get(ACCESSOR_SETTER) != *setter) {
overwriting_accessor = true;
}
@@ -2349,7 +2331,9 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
pair->SetComponents(*getter, *setter);
- TransitionFlag flag = INSERT_TRANSITION;
+ // Do not track transitions during bootstrapping.
+ TransitionFlag flag =
+ isolate->bootstrapper()->IsActive() ? OMIT_TRANSITION : INSERT_TRANSITION;
Descriptor d = Descriptor::AccessorConstant(name, pair, attributes);
return Map::CopyInsertDescriptor(isolate, map, &d, flag);
}
@@ -2361,7 +2345,7 @@ Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
// Share descriptors only if map owns descriptors and it not an initial map.
if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
- !map->GetBackPointer()->IsUndefined(isolate) &&
+ !map->GetBackPointer().IsUndefined(isolate) &&
TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
return ShareDescriptor(isolate, map, descriptors, descriptor);
}
@@ -2444,31 +2428,31 @@ int Map::Hash() {
namespace {
bool CheckEquivalent(const Map first, const Map second) {
- return first->GetConstructor() == second->GetConstructor() &&
- first->prototype() == second->prototype() &&
- first->instance_type() == second->instance_type() &&
- first->bit_field() == second->bit_field() &&
- first->is_extensible() == second->is_extensible() &&
- first->new_target_is_base() == second->new_target_is_base() &&
- first->has_hidden_prototype() == second->has_hidden_prototype();
+ return first.GetConstructor() == second.GetConstructor() &&
+ first.prototype() == second.prototype() &&
+ first.instance_type() == second.instance_type() &&
+ first.bit_field() == second.bit_field() &&
+ first.is_extensible() == second.is_extensible() &&
+ first.new_target_is_base() == second.new_target_is_base() &&
+ first.has_hidden_prototype() == second.has_hidden_prototype();
}
} // namespace
bool Map::EquivalentToForTransition(const Map other) const {
- CHECK_EQ(GetConstructor(), other->GetConstructor());
- CHECK_EQ(instance_type(), other->instance_type());
- CHECK_EQ(has_hidden_prototype(), other->has_hidden_prototype());
+ CHECK_EQ(GetConstructor(), other.GetConstructor());
+ CHECK_EQ(instance_type(), other.instance_type());
+ CHECK_EQ(has_hidden_prototype(), other.has_hidden_prototype());
- if (bit_field() != other->bit_field()) return false;
- if (new_target_is_base() != other->new_target_is_base()) return false;
- if (prototype() != other->prototype()) return false;
+ if (bit_field() != other.bit_field()) return false;
+ if (new_target_is_base() != other.new_target_is_base()) return false;
+ if (prototype() != other.prototype()) return false;
if (instance_type() == JS_FUNCTION_TYPE) {
// JSFunctions require more checks to ensure that sloppy function is
// not equivalent to strict function.
- int nof = Min(NumberOfOwnDescriptors(), other->NumberOfOwnDescriptors());
- return instance_descriptors()->IsEqualUpTo(other->instance_descriptors(),
- nof);
+ int nof = Min(NumberOfOwnDescriptors(), other.NumberOfOwnDescriptors());
+ return instance_descriptors().IsEqualUpTo(other.instance_descriptors(),
+ nof);
}
return true;
}
@@ -2482,10 +2466,10 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const {
DescriptorArray descriptors = instance_descriptors();
int nof = NumberOfOwnDescriptors();
for (int i = 0; i < nof; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
DCHECK(IsMostGeneralFieldType(details.representation(),
- descriptors->GetFieldType(i)));
+ descriptors.GetFieldType(i)));
}
}
#endif
@@ -2495,15 +2479,15 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const {
bool Map::EquivalentToForNormalization(const Map other,
PropertyNormalizationMode mode) const {
int properties =
- mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other->GetInObjectProperties();
- return CheckEquivalent(*this, other) && bit_field2() == other->bit_field2() &&
+ mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other.GetInObjectProperties();
+ return CheckEquivalent(*this, other) && bit_field2() == other.bit_field2() &&
GetInObjectProperties() == properties &&
JSObject::GetEmbedderFieldCount(*this) ==
JSObject::GetEmbedderFieldCount(other);
}
static void GetMinInobjectSlack(Map map, void* data) {
- int slack = map->UnusedPropertyFields();
+ int slack = map.UnusedPropertyFields();
if (*reinterpret_cast<int*>(data) > slack) {
*reinterpret_cast<int*>(data) = slack;
}
@@ -2512,7 +2496,7 @@ static void GetMinInobjectSlack(Map map, void* data) {
int Map::ComputeMinObjectSlack(Isolate* isolate) {
DisallowHeapAllocation no_gc;
// Has to be an initial map.
- DCHECK(GetBackPointer()->IsUndefined(isolate));
+ DCHECK(GetBackPointer().IsUndefined(isolate));
int slack = UnusedPropertyFields();
TransitionsAccessor transitions(isolate, *this, &no_gc);
@@ -2525,22 +2509,22 @@ static void ShrinkInstanceSize(Map map, void* data) {
DCHECK_GE(slack, 0);
#ifdef DEBUG
int old_visitor_id = Map::GetVisitorId(map);
- int new_unused = map->UnusedPropertyFields() - slack;
+ int new_unused = map.UnusedPropertyFields() - slack;
#endif
- map->set_instance_size(map->InstanceSizeFromSlack(slack));
- map->set_construction_counter(Map::kNoSlackTracking);
+ map.set_instance_size(map.InstanceSizeFromSlack(slack));
+ map.set_construction_counter(Map::kNoSlackTracking);
DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
- DCHECK_EQ(new_unused, map->UnusedPropertyFields());
+ DCHECK_EQ(new_unused, map.UnusedPropertyFields());
}
static void StopSlackTracking(Map map, void* data) {
- map->set_construction_counter(Map::kNoSlackTracking);
+ map.set_construction_counter(Map::kNoSlackTracking);
}
void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
DisallowHeapAllocation no_gc;
// Has to be an initial map.
- DCHECK(GetBackPointer()->IsUndefined(isolate));
+ DCHECK(GetBackPointer().IsUndefined(isolate));
int slack = ComputeMinObjectSlack(isolate);
TransitionsAccessor transitions(isolate, *this, &no_gc);
@@ -2563,12 +2547,12 @@ void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
// static
Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
Isolate* isolate) {
- Object maybe_proto_info = prototype->map()->prototype_info();
- if (maybe_proto_info->IsPrototypeInfo()) {
+ Object maybe_proto_info = prototype->map().prototype_info();
+ if (maybe_proto_info.IsPrototypeInfo()) {
return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
}
Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
- prototype->map()->set_prototype_info(*proto_info);
+ prototype->map().set_prototype_info(*proto_info);
return proto_info;
}
@@ -2576,7 +2560,7 @@ Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
Isolate* isolate) {
Object maybe_proto_info = prototype_map->prototype_info();
- if (maybe_proto_info->IsPrototypeInfo()) {
+ if (maybe_proto_info.IsPrototypeInfo()) {
return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
}
Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
@@ -2587,7 +2571,7 @@ Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
// static
void Map::SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
Isolate* isolate) {
- if (value == false && !map->prototype_info()->IsPrototypeInfo()) {
+ if (value == false && !map->prototype_info().IsPrototypeInfo()) {
// "False" is the implicit default value, so there's nothing to do.
return;
}
@@ -2605,7 +2589,7 @@ Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
maybe_prototype = isolate->global_object();
} else {
maybe_prototype =
- handle(map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
+ handle(map->GetPrototypeChainRootMap(isolate).prototype(), isolate);
}
if (!maybe_prototype->IsJSObject()) {
return handle(Smi::FromInt(Map::kPrototypeChainValid), isolate);
@@ -2616,9 +2600,9 @@ Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
JSObject::LazyRegisterPrototypeUser(handle(prototype->map(), isolate),
isolate);
- Object maybe_cell = prototype->map()->prototype_validity_cell();
+ Object maybe_cell = prototype->map().prototype_validity_cell();
// Return existing cell if it's still valid.
- if (maybe_cell->IsCell()) {
+ if (maybe_cell.IsCell()) {
Handle<Cell> cell(Cell::cast(maybe_cell), isolate);
if (cell->value() == Smi::FromInt(Map::kPrototypeChainValid)) {
return cell;
@@ -2627,17 +2611,17 @@ Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
// Otherwise create a new cell.
Handle<Cell> cell = isolate->factory()->NewCell(
handle(Smi::FromInt(Map::kPrototypeChainValid), isolate));
- prototype->map()->set_prototype_validity_cell(*cell);
+ prototype->map().set_prototype_validity_cell(*cell);
return cell;
}
// static
bool Map::IsPrototypeChainInvalidated(Map map) {
- DCHECK(map->is_prototype_map());
- Object maybe_cell = map->prototype_validity_cell();
- if (maybe_cell->IsCell()) {
+ DCHECK(map.is_prototype_map());
+ Object maybe_cell = map.prototype_validity_cell();
+ if (maybe_cell.IsCell()) {
Cell cell = Cell::cast(maybe_cell);
- return cell->value() != Smi::FromInt(Map::kPrototypeChainValid);
+ return cell.value() != Smi::FromInt(Map::kPrototypeChainValid);
}
return true;
}
@@ -2649,27 +2633,13 @@ void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
RuntimeCallTimerScope stats_scope(isolate, *map,
RuntimeCallCounterId::kMap_SetPrototype);
- bool is_hidden = false;
if (prototype->IsJSObject()) {
Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
JSObject::OptimizeAsPrototype(prototype_jsobj, enable_prototype_setup_mode);
-
- Object maybe_constructor = prototype_jsobj->map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
- JSFunction constructor = JSFunction::cast(maybe_constructor);
- Object data = constructor->shared()->function_data();
- is_hidden = (data->IsFunctionTemplateInfo() &&
- FunctionTemplateInfo::cast(data)->hidden_prototype()) ||
- prototype->IsJSGlobalObject();
- } else if (maybe_constructor->IsFunctionTemplateInfo()) {
- is_hidden =
- FunctionTemplateInfo::cast(maybe_constructor)->hidden_prototype() ||
- prototype->IsJSGlobalObject();
- }
} else {
DCHECK(prototype->IsNull(isolate) || prototype->IsJSProxy());
}
- map->set_has_hidden_prototype(is_hidden);
+ map->set_has_hidden_prototype(prototype->IsJSGlobalObject());
WriteBarrierMode wb_mode =
prototype->IsNull(isolate) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
@@ -2711,7 +2681,7 @@ MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
}
Map normalized_map = Map::cast(heap_object);
- if (!normalized_map->EquivalentToForNormalization(*fast_map, mode)) {
+ if (!normalized_map.EquivalentToForNormalization(*fast_map, mode)) {
return MaybeHandle<Map>();
}
return handle(normalized_map, GetIsolate());
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 96c09e1664..814f8ed3be 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -5,10 +5,11 @@
#ifndef V8_OBJECTS_MAP_H_
#define V8_OBJECTS_MAP_H_
-#include "src/globals.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
#include "src/objects/code.h"
#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -41,8 +42,6 @@ enum InstanceType : uint16_t;
V(FeedbackCell) \
V(FeedbackVector) \
V(FixedArray) \
- V(FixedFloat64Array) \
- V(FixedTypedArrayBase) \
V(FreeSpace) \
V(JSApiObject) \
V(JSArrayBuffer) \
@@ -72,6 +71,7 @@ enum InstanceType : uint16_t;
V(TransitionArray) \
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
+ V(WasmCapiFunctionData) \
V(WasmInstanceObject) \
V(WeakArray) \
V(WeakCell)
@@ -140,7 +140,7 @@ using MapHandles = std::vector<Handle<Map>>;
// | Byte | [bit_field2] |
// | | - is_extensible (bit 0) |
// | | - is_prototype_map (bit 1) |
-// | | - is_in_retained_map_list (bit 2) |
+// | | - has_hidden_prototype (bit 2) |
// | | - elements_kind (bits 3..7) |
// +----+----------+---------------------------------------------+
// | Int | [bit_field3] |
@@ -148,7 +148,7 @@ using MapHandles = std::vector<Handle<Map>>;
// | | - number_of_own_descriptors (bit 10..19) |
// | | - is_dictionary_map (bit 20) |
// | | - owns_descriptors (bit 21) |
-// | | - has_hidden_prototype (bit 22) |
+// | | - is_in_retained_map_list (bit 22) |
// | | - is_deprecated (bit 23) |
// | | - is_unstable (bit 24) |
// | | - is_migration_target (bit 25) |
@@ -165,11 +165,6 @@ using MapHandles = std::vector<Handle<Map>>;
// +---------------+---------------------------------------------+
// | TaggedPointer | [constructor_or_backpointer] |
// +---------------+---------------------------------------------+
-// | TaggedPointer | If Map is a prototype map: |
-// | | [prototype_info] |
-// | | Else: |
-// | | [raw_transitions] |
-// +---------------+---------------------------------------------+
// | TaggedPointer | [instance_descriptors] |
// +*************************************************************+
// ! TaggedPointer ! [layout_descriptors] !
@@ -179,6 +174,13 @@ using MapHandles = std::vector<Handle<Map>>;
// +*************************************************************+
// | TaggedPointer | [dependent_code] |
// +---------------+---------------------------------------------+
+// | TaggedPointer | [prototype_validity_cell] |
+// +---------------+---------------------------------------------+
+// | TaggedPointer | If Map is a prototype map: |
+// | | [prototype_info] |
+// | | Else: |
+// | | [raw_transitions] |
+// +---------------+---------------------------------------------+
class Map : public HeapObject {
public:
@@ -263,10 +265,10 @@ class Map : public HeapObject {
DECL_PRIMITIVE_ACCESSORS(bit_field2, byte)
// Bit positions for |bit_field2|.
-#define MAP_BIT_FIELD2_FIELDS(V, _) \
- V(IsExtensibleBit, bool, 1, _) \
- V(IsPrototypeMapBit, bool, 1, _) \
- V(IsInRetainedMapListBit, bool, 1, _) \
+#define MAP_BIT_FIELD2_FIELDS(V, _) \
+ V(IsExtensibleBit, bool, 1, _) \
+ V(IsPrototypeMapBit, bool, 1, _) \
+ V(HasHiddenPrototypeBit, bool, 1, _) \
V(ElementsKindBits, ElementsKind, 5, _)
DEFINE_BIT_FIELDS(MAP_BIT_FIELD2_FIELDS)
@@ -287,7 +289,7 @@ class Map : public HeapObject {
V(NumberOfOwnDescriptorsBits, int, kDescriptorIndexBitCount, _) \
V(IsDictionaryMapBit, bool, 1, _) \
V(OwnsDescriptorsBit, bool, 1, _) \
- V(HasHiddenPrototypeBit, bool, 1, _) \
+ V(IsInRetainedMapListBit, bool, 1, _) \
V(IsDeprecatedBit, bool, 1, _) \
V(IsUnstableBit, bool, 1, _) \
V(IsMigrationTargetBit, bool, 1, _) \
@@ -419,7 +421,7 @@ class Map : public HeapObject {
inline bool has_sloppy_arguments_elements() const;
inline bool has_fast_sloppy_arguments_elements() const;
inline bool has_fast_string_wrapper_elements() const;
- inline bool has_fixed_typed_array_elements() const;
+ inline bool has_typed_array_elements() const;
inline bool has_dictionary_elements() const;
inline bool has_frozen_or_sealed_elements() const;
inline bool has_sealed_elements() const;
@@ -518,17 +520,16 @@ class Map : public HeapObject {
static inline bool IsMostGeneralFieldType(Representation representation,
FieldType field_type);
- // Generalizes constness, representation and field_type if objects with given
- // instance type can have fast elements that can be transitioned by stubs or
- // optimized code to more general elements kind.
+ // Generalizes representation and field_type if objects with given
+ // instance type can have fast elements that can be transitioned by
+ // stubs or optimized code to more general elements kind.
// This generalization is necessary in order to ensure that elements kind
// transitions performed by stubs / optimized code don't silently transition
- // PropertyConstness::kMutable fields back to VariableMode::kConst state or
+ // fields with representation "Tagged" back to "Smi" or "HeapObject" or
// fields with HeapObject representation and "Any" type back to "Class" type.
static inline void GeneralizeIfCanHaveTransitionableFastElementsKind(
Isolate* isolate, InstanceType instance_type,
- PropertyConstness* constness, Representation* representation,
- Handle<FieldType>* field_type);
+ Representation* representation, Handle<FieldType>* field_type);
V8_EXPORT_PRIVATE static Handle<Map> ReconfigureProperty(
Isolate* isolate, Handle<Map> map, int modify_index,
@@ -724,9 +725,8 @@ class Map : public HeapObject {
V8_EXPORT_PRIVATE static Handle<Map> CopyForPreventExtensions(
Isolate* isolate, Handle<Map> map, PropertyAttributes attrs_to_add,
- Handle<Symbol> transition_marker, const char* reason);
-
- static Handle<Map> FixProxy(Handle<Map> map, InstanceType type, int size);
+ Handle<Symbol> transition_marker, const char* reason,
+ bool old_map_is_dictionary_elements_kind = false);
// Maximal number of fast properties. Used to restrict the number of map
// transitions to avoid an explosion in the number of maps for objects used as
@@ -829,34 +829,8 @@ class Map : public HeapObject {
static const int kMaxPreAllocatedPropertyFields = 255;
- // Layout description.
-#define MAP_FIELDS(V) \
- /* Raw data fields. */ \
- V(kInstanceSizeInWordsOffset, kUInt8Size) \
- V(kInObjectPropertiesStartOrConstructorFunctionIndexOffset, kUInt8Size) \
- V(kUsedOrUnusedInstanceSizeInWordsOffset, kUInt8Size) \
- V(kVisitorIdOffset, kUInt8Size) \
- V(kInstanceTypeOffset, kUInt16Size) \
- V(kBitFieldOffset, kUInt8Size) \
- V(kBitField2Offset, kUInt8Size) \
- V(kBitField3Offset, kUInt32Size) \
- /* Adds padding to make tagged fields kTaggedSize-aligned. */ \
- V(kOptionalPaddingOffset, OBJECT_POINTER_PADDING(kOptionalPaddingOffset)) \
- /* Pointer fields. */ \
- V(kPointerFieldsBeginOffset, 0) \
- V(kPrototypeOffset, kTaggedSize) \
- V(kConstructorOrBackPointerOffset, kTaggedSize) \
- V(kTransitionsOrPrototypeInfoOffset, kTaggedSize) \
- V(kDescriptorsOffset, kTaggedSize) \
- V(kLayoutDescriptorOffset, FLAG_unbox_double_fields ? kTaggedSize : 0) \
- V(kDependentCodeOffset, kTaggedSize) \
- V(kPrototypeValidityCellOffset, kTaggedSize) \
- V(kPointerFieldsEndOffset, 0) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS)
-#undef MAP_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_MAP_FIELDS)
STATIC_ASSERT(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
@@ -987,6 +961,7 @@ class Map : public HeapObject {
Isolate* isolate, FILE* file, const char* reason, int modify_index,
int split, int descriptors, bool constant_to_field,
Representation old_representation, Representation new_representation,
+ PropertyConstness old_constness, PropertyConstness new_constness,
MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value);
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index 72c328d29b..02f5b485ce 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -8,109 +8,54 @@
#include "src/objects/maybe-object.h"
#ifdef V8_COMPRESS_POINTERS
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#endif
-#include "src/objects/heap-object-inl.h"
-#include "src/objects/slots-inl.h"
#include "src/objects/smi-inl.h"
+#include "src/objects/tagged-impl-inl.h"
namespace v8 {
namespace internal {
-bool MaybeObject::ToSmi(Smi* value) {
- if (HAS_SMI_TAG(ptr_)) {
- *value = Smi::cast(Object(ptr_));
- return true;
- }
- return false;
-}
-
-Smi MaybeObject::ToSmi() const {
- DCHECK(HAS_SMI_TAG(ptr_));
- return Smi::cast(Object(ptr_));
-}
-
-bool MaybeObject::IsStrongOrWeak() const {
- if (IsSmi() || IsCleared()) {
- return false;
- }
- return true;
-}
-
-bool MaybeObject::GetHeapObject(HeapObject* result) const {
- if (IsSmi() || IsCleared()) {
- return false;
- }
- *result = GetHeapObject();
- return true;
-}
-
-bool MaybeObject::GetHeapObject(HeapObject* result,
- HeapObjectReferenceType* reference_type) const {
- if (IsSmi() || IsCleared()) {
- return false;
- }
- *reference_type = HasWeakHeapObjectTag(ptr_)
- ? HeapObjectReferenceType::WEAK
- : HeapObjectReferenceType::STRONG;
- *result = GetHeapObject();
- return true;
-}
-
-bool MaybeObject::IsStrong() const {
- return !HasWeakHeapObjectTag(ptr_) && !IsSmi();
-}
-
-bool MaybeObject::GetHeapObjectIfStrong(HeapObject* result) const {
- if (!HasWeakHeapObjectTag(ptr_) && !IsSmi()) {
- *result = HeapObject::cast(Object(ptr_));
- return true;
- }
- return false;
-}
+//
+// MaybeObject implementation.
+//
-HeapObject MaybeObject::GetHeapObjectAssumeStrong() const {
- DCHECK(IsStrong());
- return HeapObject::cast(Object(ptr_));
+// static
+MaybeObject MaybeObject::FromSmi(Smi smi) {
+ DCHECK(HAS_SMI_TAG(smi.ptr()));
+ return MaybeObject(smi.ptr());
}
-bool MaybeObject::IsWeak() const {
- return HasWeakHeapObjectTag(ptr_) && !IsCleared();
+// static
+MaybeObject MaybeObject::FromObject(Object object) {
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(object.ptr()));
+ return MaybeObject(object.ptr());
}
-bool MaybeObject::IsWeakOrCleared() const { return HasWeakHeapObjectTag(ptr_); }
-
-bool MaybeObject::GetHeapObjectIfWeak(HeapObject* result) const {
- if (IsWeak()) {
- *result = GetHeapObject();
- return true;
- }
- return false;
+MaybeObject MaybeObject::MakeWeak(MaybeObject object) {
+ DCHECK(object.IsStrongOrWeak());
+ return MaybeObject(object.ptr() | kWeakHeapObjectMask);
}
-HeapObject MaybeObject::GetHeapObjectAssumeWeak() const {
- DCHECK(IsWeak());
- return GetHeapObject();
-}
+//
+// HeapObjectReference implementation.
+//
-HeapObject MaybeObject::GetHeapObject() const {
- DCHECK(!IsSmi());
- DCHECK(!IsCleared());
- return HeapObject::cast(Object(ptr_ & ~kWeakHeapObjectMask));
-}
+HeapObjectReference::HeapObjectReference(Object object)
+ : MaybeObject(object.ptr()) {}
-Object MaybeObject::GetHeapObjectOrSmi() const {
- if (IsSmi()) {
- return Object(ptr_);
- }
- return GetHeapObject();
+// static
+HeapObjectReference HeapObjectReference::Strong(Object object) {
+ DCHECK(!object.IsSmi());
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return HeapObjectReference(object);
}
-bool MaybeObject::IsObject() const { return IsSmi() || IsStrong(); }
-
-MaybeObject MaybeObject::MakeWeak(MaybeObject object) {
- DCHECK(object.IsStrongOrWeak());
- return MaybeObject(object.ptr_ | kWeakHeapObjectMask);
+// static
+HeapObjectReference HeapObjectReference::Weak(Object object) {
+ DCHECK(!object.IsSmi());
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return HeapObjectReference(object.ptr() | kWeakHeapObjectMask);
}
// static
@@ -137,18 +82,18 @@ void HeapObjectReference::Update(THeapObjectSlot slot, HeapObject value) {
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
Address old_value = (*slot).ptr();
DCHECK(!HAS_SMI_TAG(old_value));
- Address new_value = value->ptr();
+ Address new_value = value.ptr();
DCHECK(Internals::HasHeapObjectTag(new_value));
#ifdef DEBUG
- bool weak_before = HasWeakHeapObjectTag(old_value);
+ bool weak_before = HAS_WEAK_HEAP_OBJECT_TAG(old_value);
#endif
slot.store(
HeapObjectReference(new_value | (old_value & kWeakHeapObjectMask)));
#ifdef DEBUG
- bool weak_after = HasWeakHeapObjectTag((*slot).ptr());
+ bool weak_after = HAS_WEAK_HEAP_OBJECT_TAG((*slot).ptr());
DCHECK_EQ(weak_before, weak_after);
#endif
}
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index e62099b2d5..a1645c0604 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -5,121 +5,31 @@
#ifndef V8_OBJECTS_MAYBE_OBJECT_H_
#define V8_OBJECTS_MAYBE_OBJECT_H_
-#include "include/v8-internal.h"
-#include "include/v8.h"
-#include "src/globals.h"
-#include "src/objects.h"
-#include "src/objects/smi.h"
+#include "src/objects/tagged-impl.h"
namespace v8 {
namespace internal {
-class HeapObject;
-class Isolate;
-class StringStream;
-
// A MaybeObject is either a SMI, a strong reference to a HeapObject, a weak
// reference to a HeapObject, or a cleared weak reference. It's used for
// implementing in-place weak references (see design doc: goo.gl/j6SdcK )
-class MaybeObject {
+class MaybeObject : public TaggedImpl<HeapObjectReferenceType::WEAK, Address> {
public:
- MaybeObject() : ptr_(kNullAddress) {}
- explicit MaybeObject(Address ptr) : ptr_(ptr) {}
-
- bool operator==(const MaybeObject& other) const { return ptr_ == other.ptr_; }
- bool operator!=(const MaybeObject& other) const { return ptr_ != other.ptr_; }
-
- Address ptr() const { return ptr_; }
-
- // Enable incremental transition of client code.
- MaybeObject* operator->() { return this; }
- const MaybeObject* operator->() const { return this; }
-
- bool IsSmi() const { return HAS_SMI_TAG(ptr_); }
- inline bool ToSmi(Smi* value);
- inline Smi ToSmi() const;
-
- bool IsCleared() const {
- return static_cast<uint32_t>(ptr_) == kClearedWeakHeapObjectLower32;
- }
-
- inline bool IsStrongOrWeak() const;
- inline bool IsStrong() const;
-
- // If this MaybeObject is a strong pointer to a HeapObject, returns true and
- // sets *result. Otherwise returns false.
- inline bool GetHeapObjectIfStrong(HeapObject* result) const;
-
- // DCHECKs that this MaybeObject is a strong pointer to a HeapObject and
- // returns the HeapObject.
- inline HeapObject GetHeapObjectAssumeStrong() const;
-
- inline bool IsWeak() const;
- inline bool IsWeakOrCleared() const;
-
- // If this MaybeObject is a weak pointer to a HeapObject, returns true and
- // sets *result. Otherwise returns false.
- inline bool GetHeapObjectIfWeak(HeapObject* result) const;
-
- // DCHECKs that this MaybeObject is a weak pointer to a HeapObject and
- // returns the HeapObject.
- inline HeapObject GetHeapObjectAssumeWeak() const;
+ constexpr MaybeObject() : TaggedImpl(kNullAddress) {}
+ constexpr explicit MaybeObject(Address ptr) : TaggedImpl(ptr) {}
- // If this MaybeObject is a strong or weak pointer to a HeapObject, returns
- // true and sets *result. Otherwise returns false.
- inline bool GetHeapObject(HeapObject* result) const;
- inline bool GetHeapObject(HeapObject* result,
- HeapObjectReferenceType* reference_type) const;
+ // These operator->() overloads are required for handlified code.
+ constexpr const MaybeObject* operator->() const { return this; }
- // DCHECKs that this MaybeObject is a strong or a weak pointer to a HeapObject
- // and returns the HeapObject.
- inline HeapObject GetHeapObject() const;
+ V8_INLINE static MaybeObject FromSmi(Smi smi);
- // DCHECKs that this MaybeObject is a strong or a weak pointer to a HeapObject
- // or a SMI and returns the HeapObject or SMI.
- inline Object GetHeapObjectOrSmi() const;
+ V8_INLINE static MaybeObject FromObject(Object object);
- inline bool IsObject() const;
- template <typename T>
- T cast() const {
- DCHECK(!HasWeakHeapObjectTag(ptr_));
- return T::cast(Object(ptr_));
- }
-
- static MaybeObject FromSmi(Smi smi) {
- DCHECK(HAS_SMI_TAG(smi->ptr()));
- return MaybeObject(smi->ptr());
- }
-
- static MaybeObject FromObject(Object object) {
- DCHECK(!HasWeakHeapObjectTag(object.ptr()));
- return MaybeObject(object.ptr());
- }
-
- static inline MaybeObject MakeWeak(MaybeObject object);
+ V8_INLINE static MaybeObject MakeWeak(MaybeObject object);
#ifdef VERIFY_HEAP
static void VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p);
#endif
-
- // Prints this object without details.
- void ShortPrint(FILE* out = stdout);
-
- // Prints this object without details to a message accumulator.
- void ShortPrint(StringStream* accumulator);
-
- void ShortPrint(std::ostream& os);
-
-#ifdef OBJECT_PRINT
- void Print();
- void Print(std::ostream& os);
-#else
- void Print() { ShortPrint(); }
- void Print(std::ostream& os) { ShortPrint(os); }
-#endif
-
- private:
- Address ptr_;
};
// A HeapObjectReference is either a strong reference to a HeapObject, a weak
@@ -127,19 +37,11 @@ class MaybeObject {
class HeapObjectReference : public MaybeObject {
public:
explicit HeapObjectReference(Address address) : MaybeObject(address) {}
- explicit HeapObjectReference(Object object) : MaybeObject(object->ptr()) {}
-
- static HeapObjectReference Strong(Object object) {
- DCHECK(!object->IsSmi());
- DCHECK(!HasWeakHeapObjectTag(object));
- return HeapObjectReference(object);
- }
-
- static HeapObjectReference Weak(Object object) {
- DCHECK(!object->IsSmi());
- DCHECK(!HasWeakHeapObjectTag(object));
- return HeapObjectReference(object->ptr() | kWeakHeapObjectMask);
- }
+ V8_INLINE explicit HeapObjectReference(Object object);
+
+ V8_INLINE static HeapObjectReference Strong(Object object);
+
+ V8_INLINE static HeapObjectReference Weak(Object object);
V8_INLINE static HeapObjectReference ClearedValue(Isolate* isolate);
diff --git a/deps/v8/src/objects/microtask-inl.h b/deps/v8/src/objects/microtask-inl.h
index 036b227056..91fa5890cb 100644
--- a/deps/v8/src/objects/microtask-inl.h
+++ b/deps/v8/src/objects/microtask-inl.h
@@ -7,8 +7,8 @@
#include "src/objects/microtask.h"
-#include "src/contexts-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/contexts-inl.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/js-objects-inl.h"
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
index b3c81533a6..d631bf6903 100644
--- a/deps/v8/src/objects/microtask.h
+++ b/deps/v8/src/objects/microtask.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_MICROTASK_H_
#define V8_OBJECTS_MICROTASK_H_
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index 09e19343e1..a3bc31b63a 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/module.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
#include "src/objects/scope-info.h"
// Has to be the last include (doesn't have include guards):
@@ -40,7 +40,7 @@ SMI_ACCESSORS(Module, hash, kHashOffset)
ModuleInfo Module::info() const {
return (status() >= kEvaluating)
? ModuleInfo::cast(code())
- : GetSharedFunctionInfo()->scope_info()->ModuleDescriptorInfo();
+ : GetSharedFunctionInfo().scope_info().ModuleDescriptorInfo();
}
CAST_ACCESSOR(JSModuleNamespace)
@@ -84,12 +84,12 @@ FixedArray ModuleInfo::module_request_positions() const {
#ifdef DEBUG
bool ModuleInfo::Equals(ModuleInfo other) const {
- return regular_exports() == other->regular_exports() &&
- regular_imports() == other->regular_imports() &&
- special_exports() == other->special_exports() &&
- namespace_imports() == other->namespace_imports() &&
- module_requests() == other->module_requests() &&
- module_request_positions() == other->module_request_positions();
+ return regular_exports() == other.regular_exports() &&
+ regular_imports() == other.regular_imports() &&
+ special_exports() == other.special_exports() &&
+ namespace_imports() == other.namespace_imports() &&
+ module_requests() == other.module_requests() &&
+ module_request_positions() == other.module_request_positions();
}
#endif
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 4e2ae75b06..ea40989df1 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -7,15 +7,15 @@
#include "src/objects/module.h"
-#include "src/accessors.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/ast/modules.h"
-#include "src/objects-inl.h"
+#include "src/builtins/accessors.h"
#include "src/objects/cell-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -120,7 +120,7 @@ void Module::CreateIndirectExport(Isolate* isolate, Handle<Module> module,
Handle<String> name,
Handle<ModuleInfoEntry> entry) {
Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(name)->IsTheHole(isolate));
+ DCHECK(exports->Lookup(name).IsTheHole(isolate));
exports = ObjectHashTable::Put(exports, name, entry);
module->set_exports(*exports);
}
@@ -130,12 +130,12 @@ void Module::CreateExport(Isolate* isolate, Handle<Module> module,
DCHECK_LT(0, names->length());
Handle<Cell> cell =
isolate->factory()->NewCell(isolate->factory()->undefined_value());
- module->regular_exports()->set(ExportIndex(cell_index), *cell);
+ module->regular_exports().set(ExportIndex(cell_index), *cell);
Handle<ObjectHashTable> exports(module->exports(), isolate);
for (int i = 0, n = names->length(); i < n; ++i) {
Handle<String> name(String::cast(names->get(i)), isolate);
- DCHECK(exports->Lookup(name)->IsTheHole(isolate));
+ DCHECK(exports->Lookup(name).IsTheHole(isolate));
exports = ObjectHashTable::Put(exports, name, cell);
}
module->set_exports(*exports);
@@ -146,21 +146,20 @@ Cell Module::GetCell(int cell_index) {
Object cell;
switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
case ModuleDescriptor::kImport:
- cell = regular_imports()->get(ImportIndex(cell_index));
+ cell = regular_imports().get(ImportIndex(cell_index));
break;
case ModuleDescriptor::kExport:
- cell = regular_exports()->get(ExportIndex(cell_index));
+ cell = regular_exports().get(ExportIndex(cell_index));
break;
case ModuleDescriptor::kInvalid:
UNREACHABLE();
- break;
}
return Cell::cast(cell);
}
Handle<Object> Module::LoadVariable(Isolate* isolate, Handle<Module> module,
int cell_index) {
- return handle(module->GetCell(cell_index)->value(), isolate);
+ return handle(module->GetCell(cell_index).value(), isolate);
}
void Module::StoreVariable(Handle<Module> module, int cell_index,
@@ -168,7 +167,7 @@ void Module::StoreVariable(Handle<Module> module, int cell_index,
DisallowHeapAllocation no_gc;
DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
ModuleDescriptor::kExport);
- module->GetCell(cell_index)->set_value(*value);
+ module->GetCell(cell_index).set_value(*value);
}
#ifdef DEBUG
@@ -177,7 +176,7 @@ void Module::PrintStatusTransition(Status new_status) {
StdoutStream os;
os << "Changing module status from " << status() << " to " << new_status
<< " for ";
- script()->GetNameOrSourceURL()->Print(os);
+ script().GetNameOrSourceURL().Print(os);
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -216,24 +215,24 @@ void Module::Reset(Isolate* isolate, Handle<Module> module) {
DCHECK(module->status() == kPreInstantiating ||
module->status() == kInstantiating);
- DCHECK(module->exception()->IsTheHole(isolate));
- DCHECK(module->import_meta()->IsTheHole(isolate));
+ DCHECK(module->exception().IsTheHole(isolate));
+ DCHECK(module->import_meta().IsTheHole(isolate));
// The namespace object cannot exist, because it would have been created
// by RunInitializationCode, which is called only after this module's SCC
// succeeds instantiation.
- DCHECK(!module->module_namespace()->IsJSModuleNamespace());
+ DCHECK(!module->module_namespace().IsJSModuleNamespace());
Handle<ObjectHashTable> exports =
- ObjectHashTable::New(isolate, module->info()->RegularExportCount());
+ ObjectHashTable::New(isolate, module->info().RegularExportCount());
Handle<FixedArray> regular_exports =
- factory->NewFixedArray(module->regular_exports()->length());
+ factory->NewFixedArray(module->regular_exports().length());
Handle<FixedArray> regular_imports =
- factory->NewFixedArray(module->regular_imports()->length());
+ factory->NewFixedArray(module->regular_imports().length());
Handle<FixedArray> requested_modules =
- factory->NewFixedArray(module->requested_modules()->length());
+ factory->NewFixedArray(module->requested_modules().length());
if (module->status() == kInstantiating) {
- module->set_code(JSFunction::cast(module->code())->shared());
+ module->set_code(JSFunction::cast(module->code()).shared());
}
#ifdef DEBUG
module->PrintStatusTransition(kUninstantiated);
@@ -249,9 +248,9 @@ void Module::Reset(Isolate* isolate, Handle<Module> module) {
void Module::RecordError(Isolate* isolate) {
DisallowHeapAllocation no_alloc;
- DCHECK(exception()->IsTheHole(isolate));
+ DCHECK(exception().IsTheHole(isolate));
Object the_exception = isolate->pending_exception();
- DCHECK(!the_exception->IsTheHole(isolate));
+ DCHECK(!the_exception.IsTheHole(isolate));
set_code(info());
#ifdef DEBUG
@@ -264,7 +263,7 @@ void Module::RecordError(Isolate* isolate) {
Object Module::GetException() {
DisallowHeapAllocation no_alloc;
DCHECK_EQ(status(), Module::kErrored);
- DCHECK(!exception()->IsTheHole());
+ DCHECK(!exception().IsTheHole());
return exception();
}
@@ -275,14 +274,14 @@ SharedFunctionInfo Module::GetSharedFunctionInfo() const {
switch (status()) {
case kUninstantiated:
case kPreInstantiating:
- DCHECK(code()->IsSharedFunctionInfo());
+ DCHECK(code().IsSharedFunctionInfo());
return SharedFunctionInfo::cast(code());
case kInstantiating:
- DCHECK(code()->IsJSFunction());
- return JSFunction::cast(code())->shared();
+ DCHECK(code().IsJSFunction());
+ return JSFunction::cast(code()).shared();
case kInstantiated:
- DCHECK(code()->IsJSGeneratorObject());
- return JSGeneratorObject::cast(code())->function()->shared();
+ DCHECK(code().IsJSGeneratorObject());
+ return JSGeneratorObject::cast(code()).function().shared();
case kEvaluating:
case kEvaluated:
case kErrored:
@@ -297,9 +296,9 @@ MaybeHandle<Cell> Module::ResolveImport(Isolate* isolate, Handle<Module> module,
MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
Handle<Module> requested_module(
- Module::cast(module->requested_modules()->get(module_request)), isolate);
+ Module::cast(module->requested_modules().get(module_request)), isolate);
Handle<String> specifier(
- String::cast(module->info()->module_requests()->get(module_request)),
+ String::cast(module->info().module_requests().get(module_request)),
isolate);
MaybeHandle<Cell> result =
Module::ResolveExport(isolate, requested_module, specifier, name, loc,
@@ -315,7 +314,7 @@ MaybeHandle<Cell> Module::ResolveExport(Isolate* isolate, Handle<Module> module,
Module::ResolveSet* resolve_set) {
DCHECK_GE(module->status(), kPreInstantiating);
DCHECK_NE(module->status(), kEvaluating);
- Handle<Object> object(module->exports()->Lookup(export_name), isolate);
+ Handle<Object> object(module->exports().Lookup(export_name), isolate);
if (object->IsCell()) {
// Already resolved (e.g. because it's a local export).
return Handle<Cell>::cast(object);
@@ -363,7 +362,7 @@ MaybeHandle<Cell> Module::ResolveExport(Isolate* isolate, Handle<Module> module,
// The export table may have changed but the entry in question should be
// unchanged.
Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(export_name)->IsModuleInfoEntry());
+ DCHECK(exports->Lookup(export_name).IsModuleInfoEntry());
exports = ObjectHashTable::Put(exports, export_name, cell);
module->set_exports(*exports);
@@ -384,12 +383,12 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
// Go through all star exports looking for the given name. If multiple star
// exports provide the name, make sure they all map it to the same cell.
Handle<Cell> unique_cell;
- Handle<FixedArray> special_exports(module->info()->special_exports(),
+ Handle<FixedArray> special_exports(module->info().special_exports(),
isolate);
for (int i = 0, n = special_exports->length(); i < n; ++i) {
i::Handle<i::ModuleInfoEntry> entry(
i::ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- if (!entry->export_name()->IsUndefined(isolate)) {
+ if (!entry->export_name().IsUndefined(isolate)) {
continue; // Indirect export.
}
@@ -415,7 +414,7 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
if (!unique_cell.is_null()) {
// Found a unique star export for this name.
Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(export_name)->IsTheHole(isolate));
+ DCHECK(exports->Lookup(export_name).IsTheHole(isolate));
exports = ObjectHashTable::Put(exports, export_name, unique_cell);
module->set_exports(*exports);
return unique_cell;
@@ -439,7 +438,7 @@ bool Module::Instantiate(Isolate* isolate, Handle<Module> module,
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Instantiating module ";
- module->script()->GetNameOrSourceURL()->Print(os);
+ module->script().GetNameOrSourceURL().Print(os);
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -532,7 +531,7 @@ bool Module::PrepareInstantiate(Isolate* isolate, Handle<Module> module,
bool Module::RunInitializationCode(Isolate* isolate, Handle<Module> module) {
DCHECK_EQ(module->status(), kInstantiating);
Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
- DCHECK_EQ(MODULE_SCOPE, function->shared()->scope_info()->scope_type());
+ DCHECK_EQ(MODULE_SCOPE, function->shared().scope_info().scope_type());
Handle<Object> receiver = isolate->factory()->undefined_value();
Handle<Object> argv[] = {module};
MaybeHandle<Object> maybe_generator =
@@ -637,7 +636,7 @@ bool Module::FinishInstantiate(Isolate* isolate, Handle<Module> module,
.ToHandle(&cell)) {
return false;
}
- module->regular_imports()->set(ImportIndex(entry->cell_index()), *cell);
+ module->regular_imports().set(ImportIndex(entry->cell_index()), *cell);
}
// Resolve indirect exports.
@@ -664,7 +663,7 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Evaluating module ";
- module->script()->GetNameOrSourceURL()->Print(os);
+ module->script().GetNameOrSourceURL().Print(os);
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -710,7 +709,7 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
Handle<JSGeneratorObject> generator(JSGeneratorObject::cast(module->code()),
isolate);
module->set_code(
- generator->function()->shared()->scope_info()->ModuleDescriptorInfo());
+ generator->function().shared().scope_info().ModuleDescriptorInfo());
module->SetStatus(kEvaluating);
module->set_dfs_index(*dfs_index);
module->set_dfs_ancestor_index(*dfs_index);
@@ -748,10 +747,10 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr),
Object);
- DCHECK(JSIteratorResult::cast(*result)->done()->BooleanValue(isolate));
+ DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate));
CHECK(MaybeTransitionComponent(isolate, module, stack, kEvaluated));
- return handle(JSIteratorResult::cast(*result)->value(), isolate);
+ return handle(JSIteratorResult::cast(*result).value(), isolate);
}
namespace {
@@ -760,7 +759,7 @@ void FetchStarExports(Isolate* isolate, Handle<Module> module, Zone* zone,
UnorderedModuleSet* visited) {
DCHECK_GE(module->status(), Module::kInstantiating);
- if (module->module_namespace()->IsJSModuleNamespace()) return; // Shortcut.
+ if (module->module_namespace().IsJSModuleNamespace()) return; // Shortcut.
bool cycle = !visited->insert(module).second;
if (cycle) return;
@@ -771,17 +770,16 @@ void FetchStarExports(Isolate* isolate, Handle<Module> module, Zone* zone,
// Maybe split special_exports into indirect_exports and star_exports.
ReadOnlyRoots roots(isolate);
- Handle<FixedArray> special_exports(module->info()->special_exports(),
- isolate);
+ Handle<FixedArray> special_exports(module->info().special_exports(), isolate);
for (int i = 0, n = special_exports->length(); i < n; ++i) {
Handle<ModuleInfoEntry> entry(
ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- if (!entry->export_name()->IsUndefined(roots)) {
+ if (!entry->export_name().IsUndefined(roots)) {
continue; // Indirect export.
}
Handle<Module> requested_module(
- Module::cast(module->requested_modules()->get(entry->module_request())),
+ Module::cast(module->requested_modules().get(entry->module_request())),
isolate);
// Recurse.
@@ -799,7 +797,7 @@ void FetchStarExports(Isolate* isolate, Handle<Module> module, Zone* zone,
Handle<String> name(String::cast(key), isolate);
if (name->Equals(roots.default_string())) continue;
- if (!exports->Lookup(name)->IsTheHole(roots)) continue;
+ if (!exports->Lookup(name).IsTheHole(roots)) continue;
Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(i)), isolate);
auto insert_result = more_exports.insert(std::make_pair(name, cell));
@@ -834,7 +832,7 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
Handle<Module> module,
int module_request) {
Handle<Module> requested_module(
- Module::cast(module->requested_modules()->get(module_request)), isolate);
+ Module::cast(module->requested_modules().get(module_request)), isolate);
return Module::GetModuleNamespace(isolate, requested_module);
}
@@ -900,7 +898,7 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
MaybeHandle<Object> JSModuleNamespace::GetExport(Isolate* isolate,
Handle<String> name) {
- Handle<Object> object(module()->exports()->Lookup(name), isolate);
+ Handle<Object> object(module().exports().Lookup(name), isolate);
if (object->IsTheHole(isolate)) {
return isolate->factory()->undefined_value();
}
@@ -922,7 +920,7 @@ Maybe<PropertyAttributes> JSModuleNamespace::GetPropertyAttributes(
Isolate* isolate = it->isolate();
- Handle<Object> lookup(object->module()->exports()->Lookup(name), isolate);
+ Handle<Object> lookup(object->module().exports().Lookup(name), isolate);
if (lookup->IsTheHole(isolate)) {
return Just(ABSENT);
}
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 5137d92351..a1672dce7e 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_MODULE_H_
#define V8_OBJECTS_MODULE_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index af1724b76d..b3e04bbd50 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -45,28 +45,26 @@ void Symbol::set_is_private_name() {
}
bool Name::IsUniqueName() const {
- uint32_t type = map()->instance_type();
+ uint32_t type = map().instance_type();
bool result = (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
(kStringTag | kNotInternalizedTag);
SLOW_DCHECK(result == HeapObject::IsUniqueName());
return result;
}
-uint32_t Name::hash_field() {
- return READ_UINT32_FIELD(*this, kHashFieldOffset);
-}
+uint32_t Name::hash_field() { return ReadField<uint32_t>(kHashFieldOffset); }
void Name::set_hash_field(uint32_t value) {
- WRITE_UINT32_FIELD(*this, kHashFieldOffset, value);
+ WriteField<uint32_t>(kHashFieldOffset, value);
}
bool Name::Equals(Name other) {
if (other == *this) return true;
- if ((this->IsInternalizedString() && other->IsInternalizedString()) ||
- this->IsSymbol() || other->IsSymbol()) {
+ if ((this->IsInternalizedString() && other.IsInternalizedString()) ||
+ this->IsSymbol() || other.IsSymbol()) {
return false;
}
- return String::cast(*this)->SlowEquals(String::cast(other));
+ return String::cast(*this).SlowEquals(String::cast(other));
}
bool Name::Equals(Isolate* isolate, Handle<Name> one, Handle<Name> two) {
@@ -90,26 +88,26 @@ uint32_t Name::Hash() {
uint32_t field = hash_field();
if (IsHashFieldComputed(field)) return field >> kHashShift;
// Slow case: compute hash code and set it. Has to be a string.
- return String::cast(*this)->ComputeAndSetHash();
+ return String::cast(*this).ComputeAndSetHash();
}
bool Name::IsInterestingSymbol() const {
- return IsSymbol() && Symbol::cast(*this)->is_interesting_symbol();
+ return IsSymbol() && Symbol::cast(*this).is_interesting_symbol();
}
bool Name::IsPrivate() {
- return this->IsSymbol() && Symbol::cast(*this)->is_private();
+ return this->IsSymbol() && Symbol::cast(*this).is_private();
}
bool Name::IsPrivateName() {
bool is_private_name =
- this->IsSymbol() && Symbol::cast(*this)->is_private_name();
+ this->IsSymbol() && Symbol::cast(*this).is_private_name();
DCHECK_IMPLIES(is_private_name, IsPrivate());
return is_private_name;
}
bool Name::AsArrayIndex(uint32_t* index) {
- return IsString() && String::cast(*this)->AsArrayIndex(index);
+ return IsString() && String::cast(*this).AsArrayIndex(index);
}
// static
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index c17f73f775..8b2a8f0a01 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_NAME_H_
#define V8_OBJECTS_NAME_H_
-#include "src/objects.h"
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "src/objects/objects.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -71,8 +71,6 @@ class Name : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_NAME_FIELDS)
- static const int kHeaderSize = kSize;
-
// Mask constant for checking if a name has a computed hash code
// and if it is a string that is an array index. The least significant bit
// indicates whether a hash code has been computed. If the hash code has
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
new file mode 100644
index 0000000000..78452de502
--- /dev/null
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -0,0 +1,270 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_OBJECT_LIST_MACROS_H_
+#define V8_OBJECTS_OBJECT_LIST_MACROS_H_
+
+namespace v8 {
+namespace internal {
+
+class AbstractCode;
+class AccessorPair;
+class AccessCheckInfo;
+class AllocationSite;
+class ByteArray;
+class CachedTemplateObject;
+class Cell;
+class ClosureFeedbackCellArray;
+class ConsString;
+class DependentCode;
+class ElementsAccessor;
+class EnumCache;
+class FixedArrayBase;
+class FixedDoubleArray;
+class FreeSpace;
+class FunctionLiteral;
+class FunctionTemplateInfo;
+class JSAsyncGeneratorObject;
+class JSGlobalProxy;
+class JSPromise;
+class JSProxy;
+class JSProxyRevocableResult;
+class KeyAccumulator;
+class LayoutDescriptor;
+class LookupIterator;
+class FieldType;
+class Module;
+class ModuleInfoEntry;
+class MutableHeapNumber;
+class ObjectHashTable;
+class ObjectTemplateInfo;
+class ObjectVisitor;
+class PreparseData;
+class PropertyArray;
+class PropertyCell;
+class PropertyDescriptor;
+class PrototypeInfo;
+class ReadOnlyRoots;
+class RegExpMatchInfo;
+class RootVisitor;
+class SafepointEntry;
+class ScriptContextTable;
+class SharedFunctionInfo;
+class StringStream;
+class Symbol;
+class FeedbackCell;
+class FeedbackMetadata;
+class FeedbackVector;
+class UncompiledData;
+class TemplateInfo;
+class TransitionArray;
+class TemplateList;
+class WasmInstanceObject;
+class WasmMemoryObject;
+template <typename T>
+class ZoneForwardList;
+
+#define OBJECT_TYPE_LIST(V) \
+ V(LayoutDescriptor) \
+ V(Primitive) \
+ V(Number) \
+ V(Numeric)
+
+#define HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
+ V(AbstractCode) \
+ V(AccessCheckNeeded) \
+ V(AllocationSite) \
+ V(ArrayList) \
+ V(BigInt) \
+ V(BigIntWrapper) \
+ V(ObjectBoilerplateDescription) \
+ V(Boolean) \
+ V(BooleanWrapper) \
+ V(BreakPoint) \
+ V(BreakPointInfo) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(CachedTemplateObject) \
+ V(CallHandlerInfo) \
+ V(Callable) \
+ V(Cell) \
+ V(ClassBoilerplate) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(CompilationCacheTable) \
+ V(ConsString) \
+ V(Constructor) \
+ V(Context) \
+ V(CoverageInfo) \
+ V(ClosureFeedbackCellArray) \
+ V(DataHandler) \
+ V(DeoptimizationData) \
+ V(DependentCode) \
+ V(DescriptorArray) \
+ V(EmbedderDataArray) \
+ V(EphemeronHashTable) \
+ V(ExternalOneByteString) \
+ V(ExternalString) \
+ V(ExternalTwoByteString) \
+ V(FeedbackCell) \
+ V(FeedbackMetadata) \
+ V(FeedbackVector) \
+ V(Filler) \
+ V(FixedArray) \
+ V(FixedArrayBase) \
+ V(FixedArrayExact) \
+ V(FixedDoubleArray) \
+ V(Foreign) \
+ V(FrameArray) \
+ V(FreeSpace) \
+ V(Function) \
+ V(GlobalDictionary) \
+ V(HandlerTable) \
+ V(HeapNumber) \
+ V(InternalizedString) \
+ V(JSArgumentsObject) \
+ V(JSArgumentsObjectWithLength) \
+ V(JSArray) \
+ V(JSArrayBuffer) \
+ V(JSArrayBufferView) \
+ V(JSArrayIterator) \
+ V(JSAsyncFromSyncIterator) \
+ V(JSAsyncFunctionObject) \
+ V(JSAsyncGeneratorObject) \
+ V(JSBoundFunction) \
+ V(JSCollection) \
+ V(JSContextExtensionObject) \
+ V(JSDataView) \
+ V(JSDate) \
+ V(JSError) \
+ V(JSFunction) \
+ V(JSGeneratorObject) \
+ V(JSGlobalObject) \
+ V(JSGlobalProxy) \
+ V(JSMap) \
+ V(JSMapIterator) \
+ V(JSMessageObject) \
+ V(JSModuleNamespace) \
+ V(JSObject) \
+ V(JSPromise) \
+ V(JSProxy) \
+ V(JSReceiver) \
+ V(JSRegExp) \
+ V(JSRegExpResult) \
+ V(JSRegExpStringIterator) \
+ V(JSSet) \
+ V(JSSetIterator) \
+ V(JSSloppyArgumentsObject) \
+ V(JSStringIterator) \
+ V(JSTypedArray) \
+ V(JSValue) \
+ V(JSWeakRef) \
+ V(JSWeakCollection) \
+ V(JSFinalizationGroup) \
+ V(JSFinalizationGroupCleanupIterator) \
+ V(JSWeakMap) \
+ V(JSWeakSet) \
+ V(LoadHandler) \
+ V(Map) \
+ V(MapCache) \
+ V(Microtask) \
+ V(ModuleInfo) \
+ V(MutableHeapNumber) \
+ V(Name) \
+ V(NameDictionary) \
+ V(NativeContext) \
+ V(NormalizedMapCache) \
+ V(NumberDictionary) \
+ V(NumberWrapper) \
+ V(ObjectHashSet) \
+ V(ObjectHashTable) \
+ V(Oddball) \
+ V(OrderedHashMap) \
+ V(OrderedHashSet) \
+ V(OrderedNameDictionary) \
+ V(PreparseData) \
+ V(PromiseReactionJobTask) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PropertyDescriptorObject) \
+ V(RegExpMatchInfo) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ V(ScriptWrapper) \
+ V(SeqOneByteString) \
+ V(SeqString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(SimpleNumberDictionary) \
+ V(SlicedString) \
+ V(SloppyArgumentsElements) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
+ V(StoreHandler) \
+ V(String) \
+ V(StringSet) \
+ V(StringTable) \
+ V(StringWrapper) \
+ V(Struct) \
+ V(Symbol) \
+ V(SymbolWrapper) \
+ V(TemplateInfo) \
+ V(TemplateList) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledData) \
+ V(UncompiledDataWithPreparseData) \
+ V(UncompiledDataWithoutPreparseData) \
+ V(Undetectable) \
+ V(UniqueName) \
+ V(WasmExceptionObject) \
+ V(WasmGlobalObject) \
+ V(WasmInstanceObject) \
+ V(WasmMemoryObject) \
+ V(WasmModuleObject) \
+ V(WasmTableObject) \
+ V(WeakFixedArray) \
+ V(WeakArrayList) \
+ V(WeakCell)
+
+#ifdef V8_INTL_SUPPORT
+#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
+ HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
+ V(JSV8BreakIterator) \
+ V(JSCollator) \
+ V(JSDateTimeFormat) \
+ V(JSListFormat) \
+ V(JSLocale) \
+ V(JSNumberFormat) \
+ V(JSPluralRules) \
+ V(JSRelativeTimeFormat) \
+ V(JSSegmentIterator) \
+ V(JSSegmenter)
+#else
+#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V)
+#endif // V8_INTL_SUPPORT
+
+#define HEAP_OBJECT_TEMPLATE_TYPE_LIST(V) V(HashTable)
+
+#define HEAP_OBJECT_TYPE_LIST(V) \
+ HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
+ HEAP_OBJECT_TEMPLATE_TYPE_LIST(V)
+
+#define ODDBALL_LIST(V) \
+ V(Undefined, undefined_value) \
+ V(Null, null_value) \
+ V(TheHole, the_hole_value) \
+ V(Exception, exception) \
+ V(Uninitialized, uninitialized_value) \
+ V(True, true_value) \
+ V(False, false_value) \
+ V(ArgumentsMarker, arguments_marker) \
+ V(OptimizedOut, optimized_out) \
+ V(StaleRegister, stale_register)
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_OBJECT_LIST_MACROS_H_
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index b5d076564d..c8ebf57ce7 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -57,46 +57,26 @@
#undef RELAXED_WRITE_WEAK_FIELD
#undef WRITE_BARRIER
#undef WEAK_WRITE_BARRIER
+#undef EPHEMERON_KEY_WRITE_BARRIER
#undef CONDITIONAL_WRITE_BARRIER
#undef CONDITIONAL_WEAK_WRITE_BARRIER
-#undef READ_DOUBLE_FIELD
-#undef WRITE_DOUBLE_FIELD
-#undef READ_INT_FIELD
-#undef WRITE_INT_FIELD
+#undef CONDITIONAL_EPHEMERON_KEY_WRITE_BARRIER
#undef ACQUIRE_READ_INT32_FIELD
-#undef READ_UINT8_FIELD
-#undef WRITE_UINT8_FIELD
#undef RELAXED_WRITE_INT8_FIELD
-#undef READ_INT8_FIELD
#undef RELAXED_READ_INT8_FIELD
-#undef WRITE_INT8_FIELD
-#undef READ_UINT16_FIELD
-#undef WRITE_UINT16_FIELD
-#undef READ_INT16_FIELD
-#undef WRITE_INT16_FIELD
#undef RELAXED_READ_INT16_FIELD
#undef RELAXED_WRITE_INT16_FIELD
-#undef READ_UINT32_FIELD
#undef RELAXED_READ_UINT32_FIELD
-#undef WRITE_UINT32_FIELD
#undef RELAXED_WRITE_UINT32_FIELD
-#undef READ_INT32_FIELD
#undef RELAXED_READ_INT32_FIELD
-#undef WRITE_INT32_FIELD
#undef RELEASE_WRITE_INT32_FIELD
#undef RELAXED_WRITE_INT32_FIELD
-#undef READ_FLOAT_FIELD
-#undef WRITE_FLOAT_FIELD
-#undef READ_INTPTR_FIELD
-#undef WRITE_INTPTR_FIELD
-#undef READ_UINTPTR_FIELD
-#undef WRITE_UINTPTR_FIELD
-#undef READ_UINT64_FIELD
-#undef WRITE_UINT64_FIELD
-#undef READ_BYTE_FIELD
#undef RELAXED_READ_BYTE_FIELD
-#undef WRITE_BYTE_FIELD
#undef RELAXED_WRITE_BYTE_FIELD
+#undef DECL_PRINTER
#undef DECL_VERIFIER
+#undef EXPORT_DECL_VERIFIER
#undef DEFINE_DEOPT_ELEMENT_ACCESSORS
#undef DEFINE_DEOPT_ENTRY_ACCESSORS
+#undef TQ_OBJECT_CONSTRUCTORS
+#undef TQ_OBJECT_CONSTRUCTORS_IMPL
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index da5c157bbc..1f499d4fba 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -14,15 +14,13 @@
// for fields that can be written to and read from multiple threads at the same
// time. See comments in src/base/atomicops.h for the memory ordering sematics.
-#include <src/v8memory.h>
+#include "src/common/v8memory.h"
// Since this changes visibility, it should always be last in a class
// definition.
#define OBJECT_CONSTRUCTORS(Type, ...) \
public: \
constexpr Type() : __VA_ARGS__() {} \
- Type* operator->() { return this; } \
- const Type* operator->() const { return this; } \
\
protected: \
explicit inline Type(Address ptr)
@@ -80,15 +78,13 @@
#define CAST_ACCESSOR(Type) \
Type Type::cast(Object object) { return Type(object.ptr()); }
-#define INT_ACCESSORS(holder, name, offset) \
- int holder::name() const { return READ_INT_FIELD(*this, offset); } \
- void holder::set_##name(int value) { WRITE_INT_FIELD(*this, offset, value); }
+#define INT_ACCESSORS(holder, name, offset) \
+ int holder::name() const { return ReadField<int>(offset); } \
+ void holder::set_##name(int value) { WriteField<int>(offset, value); }
-#define INT32_ACCESSORS(holder, name, offset) \
- int32_t holder::name() const { return READ_INT32_FIELD(*this, offset); } \
- void holder::set_##name(int32_t value) { \
- WRITE_INT32_FIELD(*this, offset, value); \
- }
+#define INT32_ACCESSORS(holder, name, offset) \
+ int32_t holder::name() const { return ReadField<int32_t>(offset); } \
+ void holder::set_##name(int32_t value) { WriteField<int32_t>(offset, value); }
#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
int32_t holder::name() const { \
@@ -98,20 +94,20 @@
RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
}
-#define UINT16_ACCESSORS(holder, name, offset) \
- uint16_t holder::name() const { return READ_UINT16_FIELD(*this, offset); } \
- void holder::set_##name(int value) { \
- DCHECK_GE(value, 0); \
- DCHECK_LE(value, static_cast<uint16_t>(-1)); \
- WRITE_UINT16_FIELD(*this, offset, value); \
+#define UINT16_ACCESSORS(holder, name, offset) \
+ uint16_t holder::name() const { return ReadField<uint16_t>(offset); } \
+ void holder::set_##name(int value) { \
+ DCHECK_GE(value, 0); \
+ DCHECK_LE(value, static_cast<uint16_t>(-1)); \
+ WriteField<uint16_t>(offset, value); \
}
-#define UINT8_ACCESSORS(holder, name, offset) \
- uint8_t holder::name() const { return READ_UINT8_FIELD(*this, offset); } \
- void holder::set_##name(int value) { \
- DCHECK_GE(value, 0); \
- DCHECK_LE(value, static_cast<uint8_t>(-1)); \
- WRITE_UINT8_FIELD(*this, offset, value); \
+#define UINT8_ACCESSORS(holder, name, offset) \
+ uint8_t holder::name() const { return ReadField<uint8_t>(offset); } \
+ void holder::set_##name(int value) { \
+ DCHECK_GE(value, 0); \
+ DCHECK_LE(value, static_cast<uint8_t>(-1)); \
+ WriteField<uint8_t>(offset, value); \
}
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
@@ -227,9 +223,9 @@
return instance_type == forinstancetype; \
}
-#define TYPE_CHECKER(type, ...) \
- bool HeapObject::Is##type() const { \
- return InstanceTypeChecker::Is##type(map()->instance_type()); \
+#define TYPE_CHECKER(type, ...) \
+ bool HeapObject::Is##type() const { \
+ return InstanceTypeChecker::Is##type(map().instance_type()); \
}
#define RELAXED_INT16_ACCESSORS(holder, name, offset) \
@@ -276,50 +272,50 @@
#define RELAXED_WRITE_WEAK_FIELD(p, offset, value) \
MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
-#define WRITE_BARRIER(object, offset, value) \
- do { \
- DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
- MarkingBarrier(object, (object)->RawField(offset), value); \
- GenerationalBarrier(object, (object)->RawField(offset), value); \
+#define WRITE_BARRIER(object, offset, value) \
+ do { \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
+ MarkingBarrier(object, (object).RawField(offset), value); \
+ GenerationalBarrier(object, (object).RawField(offset), value); \
} while (false)
-#define WEAK_WRITE_BARRIER(object, offset, value) \
- do { \
- DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
- MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
- GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
+#define WEAK_WRITE_BARRIER(object, offset, value) \
+ do { \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
+ MarkingBarrier(object, (object).RawMaybeWeakField(offset), value); \
+ GenerationalBarrier(object, (object).RawMaybeWeakField(offset), value); \
} while (false)
-#define EPHEMERON_KEY_WRITE_BARRIER(object, offset, value) \
- do { \
- DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
- EphemeronHashTable table = EphemeronHashTable::cast(object); \
- MarkingBarrier(object, (object)->RawField(offset), value); \
- GenerationalEphemeronKeyBarrier(table, (object)->RawField(offset), value); \
+#define EPHEMERON_KEY_WRITE_BARRIER(object, offset, value) \
+ do { \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
+ EphemeronHashTable table = EphemeronHashTable::cast(object); \
+ MarkingBarrier(object, (object).RawField(offset), value); \
+ GenerationalEphemeronKeyBarrier(table, (object).RawField(offset), value); \
} while (false)
-#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
- do { \
- DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
- DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- MarkingBarrier(object, (object)->RawField(offset), value); \
- } \
- GenerationalBarrier(object, (object)->RawField(offset), value); \
- } \
+#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
+ do { \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
+ DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ MarkingBarrier(object, (object).RawField(offset), value); \
+ } \
+ GenerationalBarrier(object, (object).RawField(offset), value); \
+ } \
} while (false)
-#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
- do { \
- DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
- DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
- } \
- GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
- } \
+#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
+ do { \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
+ DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ MarkingBarrier(object, (object).RawMaybeWeakField(offset), value); \
+ } \
+ GenerationalBarrier(object, (object).RawMaybeWeakField(offset), value); \
+ } \
} while (false)
#define CONDITIONAL_EPHEMERON_KEY_WRITE_BARRIER(object, offset, value, mode) \
@@ -329,60 +325,24 @@
EphemeronHashTable table = EphemeronHashTable::cast(object); \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
- MarkingBarrier(object, (object)->RawField(offset), value); \
+ MarkingBarrier(object, (object).RawField(offset), value); \
} \
- GenerationalEphemeronKeyBarrier(table, (object)->RawField(offset), \
+ GenerationalEphemeronKeyBarrier(table, (object).RawField(offset), \
value); \
} \
} while (false)
-#define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset))
-
-#define WRITE_DOUBLE_FIELD(p, offset, value) \
- WriteDoubleValue(FIELD_ADDR(p, offset), value)
-
-#define READ_INT_FIELD(p, offset) \
- (*reinterpret_cast<const int*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INT_FIELD(p, offset, value) \
- (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
-
#define ACQUIRE_READ_INT32_FIELD(p, offset) \
static_cast<int32_t>(base::Acquire_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
-#define READ_UINT8_FIELD(p, offset) \
- (*reinterpret_cast<const uint8_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_UINT8_FIELD(p, offset, value) \
- (*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value)
-
#define RELAXED_WRITE_INT8_FIELD(p, offset, value) \
base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
-
-#define READ_INT8_FIELD(p, offset) \
- (*reinterpret_cast<const int8_t*>(FIELD_ADDR(p, offset)))
-
#define RELAXED_READ_INT8_FIELD(p, offset) \
static_cast<int8_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
-#define WRITE_INT8_FIELD(p, offset, value) \
- (*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINT16_FIELD(p, offset) \
- (*reinterpret_cast<const uint16_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_UINT16_FIELD(p, offset, value) \
- (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INT16_FIELD(p, offset) \
- (*reinterpret_cast<const int16_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INT16_FIELD(p, offset, value) \
- (*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
-
#define RELAXED_READ_INT16_FIELD(p, offset) \
static_cast<int16_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic16*>(FIELD_ADDR(p, offset))))
@@ -392,31 +352,19 @@
reinterpret_cast<base::Atomic16*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic16>(value));
-#define READ_UINT32_FIELD(p, offset) \
- (*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset)))
-
#define RELAXED_READ_UINT32_FIELD(p, offset) \
static_cast<uint32_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
-#define WRITE_UINT32_FIELD(p, offset, value) \
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
-
#define RELAXED_WRITE_UINT32_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
-#define READ_INT32_FIELD(p, offset) \
- (*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
-
#define RELAXED_READ_INT32_FIELD(p, offset) \
static_cast<int32_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
-#define WRITE_INT32_FIELD(p, offset, value) \
- (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
-
#define RELEASE_WRITE_INT32_FIELD(p, offset, value) \
base::Release_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
@@ -427,72 +375,20 @@
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
-#define READ_FLOAT_FIELD(p, offset) \
- (*reinterpret_cast<const float*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_FLOAT_FIELD(p, offset, value) \
- (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
-
-// TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size fields
-// (external pointers, doubles and BigInt data) are only kTaggedSize aligned so
-// we have to use unaligned pointer friendly way of accessing them in order to
-// avoid undefined behavior in C++ code.
-#ifdef V8_COMPRESS_POINTERS
-
-#define READ_INTPTR_FIELD(p, offset) \
- ReadUnalignedValue<intptr_t>(FIELD_ADDR(p, offset))
-
-#define WRITE_INTPTR_FIELD(p, offset, value) \
- WriteUnalignedValue<intptr_t>(FIELD_ADDR(p, offset), value)
-
-#define READ_UINTPTR_FIELD(p, offset) \
- ReadUnalignedValue<uintptr_t>(FIELD_ADDR(p, offset))
-
-#define WRITE_UINTPTR_FIELD(p, offset, value) \
- WriteUnalignedValue<uintptr_t>(FIELD_ADDR(p, offset), value)
-
-#define READ_UINT64_FIELD(p, offset) \
- ReadUnalignedValue<uint64_t>(FIELD_ADDR(p, offset))
-
-#define WRITE_UINT64_FIELD(p, offset, value) \
- WriteUnalignedValue<uint64_t>(FIELD_ADDR(p, offset), value)
-
-#else // V8_COMPRESS_POINTERS
-
-#define READ_INTPTR_FIELD(p, offset) \
- (*reinterpret_cast<const intptr_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INTPTR_FIELD(p, offset, value) \
- (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINTPTR_FIELD(p, offset) \
- (*reinterpret_cast<const uintptr_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_UINTPTR_FIELD(p, offset, value) \
- (*reinterpret_cast<uintptr_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINT64_FIELD(p, offset) \
- (*reinterpret_cast<const uint64_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_UINT64_FIELD(p, offset, value) \
- (*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value)
-
-#endif // V8_COMPRESS_POINTERS
-
-#define READ_BYTE_FIELD(p, offset) \
- (*reinterpret_cast<const byte*>(FIELD_ADDR(p, offset)))
-
#define RELAXED_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
-#define WRITE_BYTE_FIELD(p, offset, value) \
- (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
-
#define RELAXED_WRITE_BYTE_FIELD(p, offset, value) \
base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
+#ifdef OBJECT_PRINT
+#define DECL_PRINTER(Name) void Name##Print(std::ostream& os); // NOLINT
+#else
+#define DECL_PRINTER(Name)
+#endif
+
#ifdef VERIFY_HEAP
#define DECL_VERIFIER(Name) void Name##Verify(Isolate* isolate);
#define EXPORT_DECL_VERIFIER(Name) \
@@ -515,3 +411,15 @@
void DeoptimizationData::Set##name(int i, type value) { \
set(IndexForEntry(i) + k##name##Offset, value); \
}
+
+#define TQ_OBJECT_CONSTRUCTORS(Type) \
+ public: \
+ constexpr Type() = default; \
+ \
+ protected: \
+ inline explicit Type(Address ptr); \
+ friend class TorqueGenerated##Type<Type, Super>;
+
+#define TQ_OBJECT_CONSTRUCTORS_IMPL(Type) \
+ inline Type::Type(Address ptr) \
+ : TorqueGenerated##Type<Type, Type::Super>(ptr) {}
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
new file mode 100644
index 0000000000..8626165647
--- /dev/null
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -0,0 +1,1116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_INL_H_
+#define V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_INL_H_
+
+#include "src/objects/objects-body-descriptors.h"
+
+#include <algorithm>
+
+#include "src/codegen/reloc-info.h"
+#include "src/objects/cell.h"
+#include "src/objects/data-handler.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/js-collection.h"
+#include "src/objects/js-weak-refs.h"
+#include "src/objects/oddball.h"
+#include "src/objects/ordered-hash-table.h"
+#include "src/objects/transitions.h"
+#include "src/wasm/wasm-objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+template <int start_offset>
+int FlexibleBodyDescriptor<start_offset>::SizeOf(Map map, HeapObject object) {
+ return object.SizeFromMap(map);
+}
+
+template <int start_offset>
+int FlexibleWeakBodyDescriptor<start_offset>::SizeOf(Map map,
+ HeapObject object) {
+ return object.SizeFromMap(map);
+}
+
+bool BodyDescriptorBase::IsValidJSObjectSlotImpl(Map map, HeapObject obj,
+ int offset) {
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
+ int embedder_fields_offset = JSObject::GetEmbedderFieldsStartOffset(map);
+ int inobject_fields_offset = map.GetInObjectPropertyOffset(0);
+ // |embedder_fields_offset| may be greater than |inobject_fields_offset| if
+ // the object does not have embedder fields but the check handles this
+ // case properly.
+ if (embedder_fields_offset <= offset && offset < inobject_fields_offset) {
+ // offset points to embedder fields area:
+ // [embedder_fields_offset, inobject_fields_offset).
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kEmbedderDataSlotSize));
+ return ((offset - embedder_fields_offset) & (kEmbedderDataSlotSize - 1)) ==
+ EmbedderDataSlot::kTaggedPayloadOffset;
+ }
+#else
+ // We store raw aligned pointers as Smis, so it's safe to treat the whole
+ // embedder field area as tagged slots.
+ STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
+#endif
+ if (!FLAG_unbox_double_fields || map.HasFastPointerLayout()) {
+ return true;
+ } else {
+ DCHECK(FLAG_unbox_double_fields);
+ DCHECK(IsAligned(offset, kSystemPointerSize));
+
+ LayoutDescriptorHelper helper(map);
+ DCHECK(!helper.all_fields_tagged());
+ return helper.IsTagged(offset);
+ }
+}
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IterateJSObjectBodyImpl(Map map, HeapObject obj,
+ int start_offset,
+ int end_offset,
+ ObjectVisitor* v) {
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
+ int header_size = JSObject::GetHeaderSize(map);
+ int inobject_fields_offset = map.GetInObjectPropertyOffset(0);
+ // We are always requested to process header and embedder fields.
+ DCHECK_LE(inobject_fields_offset, end_offset);
+ // Embedder fields are located between header and inobject properties.
+ if (header_size < inobject_fields_offset) {
+ // There are embedder fields.
+ IteratePointers(obj, start_offset, header_size, v);
+ // Iterate only tagged payload of the embedder slots and skip raw payload.
+ DCHECK_EQ(header_size, JSObject::GetEmbedderFieldsStartOffset(map));
+ for (int offset = header_size + EmbedderDataSlot::kTaggedPayloadOffset;
+ offset < inobject_fields_offset; offset += kEmbedderDataSlotSize) {
+ IteratePointer(obj, offset, v);
+ }
+ // Proceed processing inobject properties.
+ start_offset = inobject_fields_offset;
+ }
+#else
+ // We store raw aligned pointers as Smis, so it's safe to iterate the whole
+ // embedder field area as tagged slots.
+ STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
+#endif
+ if (!FLAG_unbox_double_fields || map.HasFastPointerLayout()) {
+ IteratePointers(obj, start_offset, end_offset, v);
+ } else {
+ DCHECK(FLAG_unbox_double_fields);
+ DCHECK(IsAligned(start_offset, kSystemPointerSize) &&
+ IsAligned(end_offset, kSystemPointerSize));
+
+ LayoutDescriptorHelper helper(map);
+ DCHECK(!helper.all_fields_tagged());
+ for (int offset = start_offset; offset < end_offset;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
+ IteratePointers(obj, offset, end_of_region_offset, v);
+ }
+ offset = end_of_region_offset;
+ }
+ }
+}
+
+template <typename ObjectVisitor>
+DISABLE_CFI_PERF void BodyDescriptorBase::IteratePointers(HeapObject obj,
+ int start_offset,
+ int end_offset,
+ ObjectVisitor* v) {
+ v->VisitPointers(obj, obj.RawField(start_offset), obj.RawField(end_offset));
+}
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IteratePointer(HeapObject obj, int offset,
+ ObjectVisitor* v) {
+ v->VisitPointer(obj, obj.RawField(offset));
+}
+
+template <typename ObjectVisitor>
+DISABLE_CFI_PERF void BodyDescriptorBase::IterateMaybeWeakPointers(
+ HeapObject obj, int start_offset, int end_offset, ObjectVisitor* v) {
+ v->VisitPointers(obj, obj.RawMaybeWeakField(start_offset),
+ obj.RawMaybeWeakField(end_offset));
+}
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IterateMaybeWeakPointer(HeapObject obj, int offset,
+ ObjectVisitor* v) {
+ v->VisitPointer(obj, obj.RawMaybeWeakField(offset));
+}
+
+template <typename ObjectVisitor>
+DISABLE_CFI_PERF void BodyDescriptorBase::IterateCustomWeakPointers(
+ HeapObject obj, int start_offset, int end_offset, ObjectVisitor* v) {
+ v->VisitCustomWeakPointers(obj, obj.RawField(start_offset),
+ obj.RawField(end_offset));
+}
+
+template <typename ObjectVisitor>
+DISABLE_CFI_PERF void BodyDescriptorBase::IterateEphemeron(HeapObject obj,
+ int index,
+ int key_offset,
+ int value_offset,
+ ObjectVisitor* v) {
+ v->VisitEphemeron(obj, index, obj.RawField(key_offset),
+ obj.RawField(value_offset));
+}
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IterateCustomWeakPointer(HeapObject obj, int offset,
+ ObjectVisitor* v) {
+ v->VisitCustomWeakPointer(obj, obj.RawField(offset));
+}
+
+class JSObject::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = JSReceiver::kPropertiesOrHashOffset;
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ if (offset < kStartOffset) return false;
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IterateJSObjectBodyImpl(map, obj, kStartOffset, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class JSObject::FastBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = JSReceiver::kPropertiesOrHashOffset;
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= kStartOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, kStartOffset, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class WeakCell::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= HeapObject::kHeaderSize;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, HeapObject::kHeaderSize, kTargetOffset, v);
+ IterateCustomWeakPointer(obj, kTargetOffset, v);
+ IteratePointers(obj, kTargetOffset + kTaggedSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class JSWeakRef::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, JSReceiver::kPropertiesOrHashOffset, kTargetOffset, v);
+ IterateCustomWeakPointer(obj, kTargetOffset, v);
+ IterateJSObjectBodyImpl(map, obj, kTargetOffset + kTaggedSize, object_size,
+ v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class SharedFunctionInfo::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ static_assert(kEndOfWeakFieldsOffset == kStartOfStrongFieldsOffset,
+ "Leverage that strong fields directly follow weak fields"
+ "to call FixedBodyDescriptor<...>::IsValidSlot below");
+ return FixedBodyDescriptor<kStartOfWeakFieldsOffset,
+ kEndOfStrongFieldsOffset,
+ kAlignedSize>::IsValidSlot(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IterateCustomWeakPointer(obj, kFunctionDataOffset, v);
+ IteratePointers(obj, SharedFunctionInfo::kStartOfStrongFieldsOffset,
+ SharedFunctionInfo::kEndOfStrongFieldsOffset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(AllocationSite::kCommonPointerFieldEndOffset ==
+ AllocationSite::kPretenureDataOffset);
+ STATIC_ASSERT(AllocationSite::kPretenureDataOffset + kInt32Size ==
+ AllocationSite::kPretenureCreateCountOffset);
+ STATIC_ASSERT(AllocationSite::kPretenureCreateCountOffset + kInt32Size ==
+ AllocationSite::kWeakNextOffset);
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ if (offset >= AllocationSite::kStartOffset &&
+ offset < AllocationSite::kCommonPointerFieldEndOffset) {
+ return true;
+ }
+ // check for weak_next offset
+ if (map.instance_size() == AllocationSite::kSizeWithWeakNext &&
+ offset == AllocationSite::kWeakNextOffset) {
+ return true;
+ }
+ return false;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ // Iterate over all the common pointer fields
+ IteratePointers(obj, AllocationSite::kStartOffset,
+ AllocationSite::kCommonPointerFieldEndOffset, v);
+ // Skip PretenureDataOffset and PretenureCreateCount which are Int32 fields.
+ // Visit weak_next only if it has weak_next field.
+ if (object_size == AllocationSite::kSizeWithWeakNext) {
+ IterateCustomWeakPointers(obj, AllocationSite::kWeakNextOffset,
+ AllocationSite::kSizeWithWeakNext, v);
+ }
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ if (offset < kEndOfTaggedFieldsOffset) return true;
+ if (offset < kHeaderSize) return false;
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ // JSArrayBuffer instances contain raw data that the GC does not know about.
+ IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
+ IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class JSTypedArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ if (offset < kEndOfTaggedFieldsOffset) return true;
+ // TODO(v8:4153): Remove this.
+ if (offset == kBasePointerOffset) return true;
+ if (offset < kHeaderSize) return false;
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ // JSTypedArray contains raw data that the GC does not know about.
+ IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
+ // TODO(v8:4153): Remove this.
+ IteratePointer(obj, kBasePointerOffset, v);
+ IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class JSDataView::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ if (offset < kEndOfTaggedFieldsOffset) return true;
+ if (offset < kHeaderSize) return false;
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ // JSDataView contains raw data that the GC does not know about.
+ IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
+ IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+template <typename Derived>
+class V8_EXPORT_PRIVATE SmallOrderedHashTable<Derived>::BodyDescriptor final
+ : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ Derived table = Derived::cast(obj);
+ // Only data table part contains tagged values.
+ return (offset >= DataTableStartOffset()) &&
+ (offset < table.GetBucketsStartOffset());
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ Derived table = Derived::cast(obj);
+ int start_offset = DataTableStartOffset();
+ int end_offset = table.GetBucketsStartOffset();
+ IteratePointers(obj, start_offset, end_offset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ Derived table = Derived::cast(obj);
+ return table.SizeFor(table.Capacity());
+ }
+};
+
+class ByteArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ return ByteArray::SizeFor(ByteArray::cast(obj).synchronized_length());
+ }
+};
+
+class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= kConstantPoolOffset &&
+ offset <= kSourcePositionTableOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointer(obj, kConstantPoolOffset, v);
+ IteratePointer(obj, kHandlerTableOffset, v);
+ IteratePointer(obj, kSourcePositionTableOffset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ return BytecodeArray::SizeFor(
+ BytecodeArray::cast(obj).synchronized_length());
+ }
+};
+
+class BigInt::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ return BigInt::SizeFor(BigInt::cast(obj).synchronized_length());
+ }
+};
+
+class FixedDoubleArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ return FixedDoubleArray::SizeFor(
+ FixedDoubleArray::cast(obj).synchronized_length());
+ }
+};
+
+class FeedbackMetadata::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ return FeedbackMetadata::SizeFor(
+ FeedbackMetadata::cast(obj).synchronized_slot_count());
+ }
+};
+
+class FeedbackVector::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset == kSharedFunctionInfoOffset ||
+ offset == kOptimizedCodeWeakOrSmiOffset ||
+ offset == kClosureFeedbackCellArrayOffset ||
+ offset >= kFeedbackSlotsOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointer(obj, kSharedFunctionInfoOffset, v);
+ IterateMaybeWeakPointer(obj, kOptimizedCodeWeakOrSmiOffset, v);
+ IteratePointer(obj, kClosureFeedbackCellArrayOffset, v);
+ IterateMaybeWeakPointers(obj, kFeedbackSlotsOffset, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ return FeedbackVector::SizeFor(FeedbackVector::cast(obj).length());
+ }
+};
+
+class PreparseData::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= PreparseData::cast(obj).inner_start_offset();
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ PreparseData data = PreparseData::cast(obj);
+ int start_offset = data.inner_start_offset();
+ int end_offset = start_offset + data.children_length() * kTaggedSize;
+ IteratePointers(obj, start_offset, end_offset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ PreparseData data = PreparseData::cast(obj);
+ return PreparseData::SizeFor(data.data_length(), data.children_length());
+ }
+};
+
+class PrototypeInfo::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= HeapObject::kHeaderSize;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, HeapObject::kHeaderSize, kObjectCreateMapOffset, v);
+ IterateMaybeWeakPointer(obj, kObjectCreateMapOffset, v);
+ IteratePointers(obj, kObjectCreateMapOffset + kTaggedSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ return obj.SizeFromMap(map);
+ }
+};
+
+class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kTableOffset + kTaggedSize == kSizeOfAllWeakCollections);
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IterateJSObjectBodyImpl(map, obj, kPropertiesOrHashOffset, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class Foreign::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ v->VisitExternalReference(
+ Foreign::cast(obj), reinterpret_cast<Address*>(
+ obj.RawField(kForeignAddressOffset).address()));
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
+};
+
+class ExternalOneByteString::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
+};
+
+class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
+};
+
+class Code::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kRelocationInfoOffset + kTaggedSize ==
+ kDeoptimizationDataOffset);
+ STATIC_ASSERT(kDeoptimizationDataOffset + kTaggedSize ==
+ kSourcePositionTableOffset);
+ STATIC_ASSERT(kSourcePositionTableOffset + kTaggedSize ==
+ kCodeDataContainerOffset);
+ STATIC_ASSERT(kCodeDataContainerOffset + kTaggedSize == kDataStart);
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ // Slots in code can't be invalid because we never trim code objects.
+ return true;
+ }
+
+ static constexpr int kRelocModeMask =
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, ObjectVisitor* v) {
+ // GC does not visit data/code in the header and in the body directly.
+ IteratePointers(obj, kRelocationInfoOffset, kDataStart, v);
+
+ RelocIterator it(Code::cast(obj), kRelocModeMask);
+ v->VisitRelocInfo(&it);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IterateBody(map, obj, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return Code::unchecked_cast(object).CodeSize();
+ }
+};
+
+class SeqOneByteString::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ SeqOneByteString string = SeqOneByteString::cast(obj);
+ return string.SizeFor(string.synchronized_length());
+ }
+};
+
+class SeqTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ SeqTwoByteString string = SeqTwoByteString::cast(obj);
+ return string.SizeFor(string.synchronized_length());
+ }
+};
+
+class WasmInstanceObject::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ SLOW_DCHECK(std::is_sorted(std::begin(kTaggedFieldOffsets),
+ std::end(kTaggedFieldOffsets)));
+ STATIC_ASSERT(sizeof(*kTaggedFieldOffsets) == sizeof(uint16_t));
+ if (offset < int{8 * sizeof(*kTaggedFieldOffsets)} &&
+ std::binary_search(std::begin(kTaggedFieldOffsets),
+ std::end(kTaggedFieldOffsets),
+ static_cast<uint16_t>(offset))) {
+ return true;
+ }
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, kPropertiesOrHashOffset, JSObject::kHeaderSize, v);
+ for (uint16_t offset : kTaggedFieldOffsets) {
+ IteratePointer(obj, offset, v);
+ }
+ IterateJSObjectBodyImpl(map, obj, kSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class Map::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ static_assert(
+ Map::kEndOfStrongFieldsOffset == Map::kStartOfWeakFieldsOffset,
+ "Leverage that weak fields directly follow strong fields for the "
+ "check below");
+ return offset >= Map::kStartOfStrongFieldsOffset &&
+ offset < Map::kEndOfWeakFieldsOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, Map::kStartOfStrongFieldsOffset,
+ Map::kEndOfStrongFieldsOffset, v);
+ IterateMaybeWeakPointer(obj, kTransitionsOrPrototypeInfoOffset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject obj) { return Map::kSize; }
+};
+
+class DataHandler::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= HeapObject::kHeaderSize;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ static_assert(kSmiHandlerOffset < kData1Offset,
+ "Field order must be in sync with this iteration code");
+ static_assert(kData1Offset < kSizeWithData1,
+ "Field order must be in sync with this iteration code");
+ IteratePointers(obj, kSmiHandlerOffset, kData1Offset, v);
+ IterateMaybeWeakPointers(obj, kData1Offset, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return object.SizeFromMap(map);
+ }
+};
+
+class NativeContext::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset < NativeContext::kEndOfTaggedFieldsOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, NativeContext::kStartOfStrongFieldsOffset,
+ NativeContext::kEndOfStrongFieldsOffset, v);
+ IterateCustomWeakPointers(obj, NativeContext::kStartOfWeakFieldsOffset,
+ NativeContext::kEndOfWeakFieldsOffset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return NativeContext::kSize;
+ }
+};
+
+class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= CodeDataContainer::kHeaderSize &&
+ offset < CodeDataContainer::kSize;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, CodeDataContainer::kHeaderSize,
+ CodeDataContainer::kPointerFieldsStrongEndOffset, v);
+ IterateCustomWeakPointers(
+ obj, CodeDataContainer::kPointerFieldsStrongEndOffset,
+ CodeDataContainer::kPointerFieldsWeakEndOffset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return CodeDataContainer::kSize;
+ }
+};
+
+class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kEmbedderDataSlotSize));
+ return (offset < EmbedderDataArray::kHeaderSize) ||
+ (((offset - EmbedderDataArray::kHeaderSize) &
+ (kEmbedderDataSlotSize - 1)) ==
+ EmbedderDataSlot::kTaggedPayloadOffset);
+#else
+ STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
+ // We store raw aligned pointers as Smis, so it's safe to iterate the whole
+ // array.
+ return true;
+#endif
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
+ // Iterate only tagged payload of the embedder slots and skip raw payload.
+ for (int offset = EmbedderDataArray::OffsetOfElementAt(0) +
+ EmbedderDataSlot::kTaggedPayloadOffset;
+ offset < object_size; offset += kEmbedderDataSlotSize) {
+ IteratePointer(obj, offset, v);
+ }
+#else
+ // We store raw aligned pointers as Smis, so it's safe to iterate the whole
+ // array.
+ STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
+ IteratePointers(obj, EmbedderDataArray::kHeaderSize, object_size, v);
+#endif
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return object.SizeFromMap(map);
+ }
+};
+
+template <typename Op, typename ReturnType, typename T1, typename T2,
+ typename T3, typename T4>
+ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
+ if (type < FIRST_NONSTRING_TYPE) {
+ switch (type & kStringRepresentationMask) {
+ case kSeqStringTag:
+ return ReturnType();
+ case kConsStringTag:
+ return Op::template apply<ConsString::BodyDescriptor>(p1, p2, p3, p4);
+ case kThinStringTag:
+ return Op::template apply<ThinString::BodyDescriptor>(p1, p2, p3, p4);
+ case kSlicedStringTag:
+ return Op::template apply<SlicedString::BodyDescriptor>(p1, p2, p3, p4);
+ case kExternalStringTag:
+ if ((type & kStringEncodingMask) == kOneByteStringTag) {
+ return Op::template apply<ExternalOneByteString::BodyDescriptor>(
+ p1, p2, p3, p4);
+ } else {
+ return Op::template apply<ExternalTwoByteString::BodyDescriptor>(
+ p1, p2, p3, p4);
+ }
+ }
+ UNREACHABLE();
+ }
+
+ switch (type) {
+ case EMBEDDER_DATA_ARRAY_TYPE:
+ return Op::template apply<EmbedderDataArray::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case FIXED_ARRAY_TYPE:
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
+ case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ case SCOPE_INFO_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
+ return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3, p4);
+ case EPHEMERON_HASH_TABLE_TYPE:
+ return Op::template apply<EphemeronHashTable::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case AWAIT_CONTEXT_TYPE:
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
+ return Op::template apply<Context::BodyDescriptor>(p1, p2, p3, p4);
+ case NATIVE_CONTEXT_TYPE:
+ return Op::template apply<NativeContext::BodyDescriptor>(p1, p2, p3, p4);
+ case WEAK_FIXED_ARRAY_TYPE:
+ return Op::template apply<WeakFixedArray::BodyDescriptor>(p1, p2, p3, p4);
+ case WEAK_ARRAY_LIST_TYPE:
+ return Op::template apply<WeakArrayList::BodyDescriptor>(p1, p2, p3, p4);
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return ReturnType();
+ case FEEDBACK_METADATA_TYPE:
+ return Op::template apply<FeedbackMetadata::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case PROPERTY_ARRAY_TYPE:
+ return Op::template apply<PropertyArray::BodyDescriptor>(p1, p2, p3, p4);
+ case DESCRIPTOR_ARRAY_TYPE:
+ return Op::template apply<DescriptorArray::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case TRANSITION_ARRAY_TYPE:
+ return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case FEEDBACK_CELL_TYPE:
+ return Op::template apply<FeedbackCell::BodyDescriptor>(p1, p2, p3, p4);
+ case FEEDBACK_VECTOR_TYPE:
+ return Op::template apply<FeedbackVector::BodyDescriptor>(p1, p2, p3, p4);
+ case JS_OBJECT_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_ARGUMENTS_TYPE:
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_ARRAY_ITERATOR_TYPE:
+ case JS_MODULE_NAMESPACE_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE:
+ case JS_STRING_ITERATOR_TYPE:
+ case JS_REGEXP_STRING_ITERATOR_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_API_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
+ case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
+ case JS_FINALIZATION_GROUP_TYPE:
+#ifdef V8_INTL_SUPPORT
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+ case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
+ case JS_INTL_LIST_FORMAT_TYPE:
+ case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_NUMBER_FORMAT_TYPE:
+ case JS_INTL_PLURAL_RULES_TYPE:
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
+ case JS_INTL_SEGMENTER_TYPE:
+#endif // V8_INTL_SUPPORT
+ case WASM_EXCEPTION_TYPE:
+ case WASM_GLOBAL_TYPE:
+ case WASM_MEMORY_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_TABLE_TYPE:
+ return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3, p4);
+ case WASM_INSTANCE_TYPE:
+ return Op::template apply<WasmInstanceObject::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ return Op::template apply<JSWeakCollection::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case JS_ARRAY_BUFFER_TYPE:
+ return Op::template apply<JSArrayBuffer::BodyDescriptor>(p1, p2, p3, p4);
+ case JS_DATA_VIEW_TYPE:
+ return Op::template apply<JSDataView::BodyDescriptor>(p1, p2, p3, p4);
+ case JS_TYPED_ARRAY_TYPE:
+ return Op::template apply<JSTypedArray::BodyDescriptor>(p1, p2, p3, p4);
+ case JS_FUNCTION_TYPE:
+ return Op::template apply<JSFunction::BodyDescriptor>(p1, p2, p3, p4);
+ case WEAK_CELL_TYPE:
+ return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3, p4);
+ case JS_WEAK_REF_TYPE:
+ return Op::template apply<JSWeakRef::BodyDescriptor>(p1, p2, p3, p4);
+ case ODDBALL_TYPE:
+ return Op::template apply<Oddball::BodyDescriptor>(p1, p2, p3, p4);
+ case JS_PROXY_TYPE:
+ return Op::template apply<JSProxy::BodyDescriptor>(p1, p2, p3, p4);
+ case FOREIGN_TYPE:
+ return Op::template apply<Foreign::BodyDescriptor>(p1, p2, p3, p4);
+ case MAP_TYPE:
+ return Op::template apply<Map::BodyDescriptor>(p1, p2, p3, p4);
+ case CODE_TYPE:
+ return Op::template apply<Code::BodyDescriptor>(p1, p2, p3, p4);
+ case CELL_TYPE:
+ return Op::template apply<Cell::BodyDescriptor>(p1, p2, p3, p4);
+ case PROPERTY_CELL_TYPE:
+ return Op::template apply<PropertyCell::BodyDescriptor>(p1, p2, p3, p4);
+ case SYMBOL_TYPE:
+ return Op::template apply<Symbol::BodyDescriptor>(p1, p2, p3, p4);
+ case BYTECODE_ARRAY_TYPE:
+ return Op::template apply<BytecodeArray::BodyDescriptor>(p1, p2, p3, p4);
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ return Op::template apply<
+ SmallOrderedHashTable<SmallOrderedHashSet>::BodyDescriptor>(p1, p2,
+ p3, p4);
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ return Op::template apply<
+ SmallOrderedHashTable<SmallOrderedHashMap>::BodyDescriptor>(p1, p2,
+ p3, p4);
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ return Op::template apply<
+ SmallOrderedHashTable<SmallOrderedNameDictionary>::BodyDescriptor>(
+ p1, p2, p3, p4);
+ case CODE_DATA_CONTAINER_TYPE:
+ return Op::template apply<CodeDataContainer::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case PREPARSE_DATA_TYPE:
+ return Op::template apply<PreparseData::BodyDescriptor>(p1, p2, p3, p4);
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
+ return Op::template apply<
+ UncompiledDataWithoutPreparseData::BodyDescriptor>(p1, p2, p3, p4);
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
+ return Op::template apply<UncompiledDataWithPreparseData::BodyDescriptor>(
+ p1, p2, p3, p4);
+ case HEAP_NUMBER_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case FILLER_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case FREE_SPACE_TYPE:
+ case BIGINT_TYPE:
+ return ReturnType();
+
+ case SHARED_FUNCTION_INFO_TYPE: {
+ return Op::template apply<SharedFunctionInfo::BodyDescriptor>(p1, p2, p3,
+ p4);
+ }
+ case ALLOCATION_SITE_TYPE:
+ return Op::template apply<AllocationSite::BodyDescriptor>(p1, p2, p3, p4);
+
+#define MAKE_STRUCT_CASE(TYPE, Name, name) case TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ if (type == PROTOTYPE_INFO_TYPE) {
+ return Op::template apply<PrototypeInfo::BodyDescriptor>(p1, p2, p3,
+ p4);
+ } else if (type == WASM_CAPI_FUNCTION_DATA_TYPE) {
+ return Op::template apply<WasmCapiFunctionData::BodyDescriptor>(p1, p2,
+ p3, p4);
+ } else {
+ return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
+ }
+ case CALL_HANDLER_INFO_TYPE:
+ return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
+ return Op::template apply<DataHandler::BodyDescriptor>(p1, p2, p3, p4);
+ default:
+ PrintF("Unknown type: %d\n", type);
+ UNREACHABLE();
+ }
+}
+
+template <typename ObjectVisitor>
+void HeapObject::IterateFast(ObjectVisitor* v) {
+ BodyDescriptorBase::IteratePointer(*this, kMapOffset, v);
+ IterateBodyFast(v);
+}
+
+template <typename ObjectVisitor>
+void HeapObject::IterateBodyFast(ObjectVisitor* v) {
+ Map m = map();
+ IterateBodyFast(m, SizeFromMap(m), v);
+}
+
+struct CallIterateBody {
+ template <typename BodyDescriptor, typename ObjectVisitor>
+ static void apply(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ BodyDescriptor::IterateBody(map, obj, object_size, v);
+ }
+};
+
+template <typename ObjectVisitor>
+void HeapObject::IterateBodyFast(Map map, int object_size, ObjectVisitor* v) {
+ BodyDescriptorApply<CallIterateBody, void>(map.instance_type(), map, *this,
+ object_size, v);
+}
+
+class EphemeronHashTable::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return (offset >= EphemeronHashTable::kHeaderSize);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ int entries_start = EphemeronHashTable::kHeaderSize +
+ EphemeronHashTable::kElementsStartIndex * kTaggedSize;
+ IteratePointers(obj, EphemeronHashTable::kHeaderSize, entries_start, v);
+ EphemeronHashTable table = EphemeronHashTable::unchecked_cast(obj);
+ int entries = table.Capacity();
+ for (int i = 0; i < entries; ++i) {
+ const int key_index = EphemeronHashTable::EntryToIndex(i);
+ const int value_index = EphemeronHashTable::EntryToValueIndex(i);
+ IterateEphemeron(obj, i, OffsetOfElementAt(key_index),
+ OffsetOfElementAt(value_index), v);
+ }
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return object.SizeFromMap(map);
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_INL_H_
diff --git a/deps/v8/src/objects/objects-body-descriptors.h b/deps/v8/src/objects/objects-body-descriptors.h
new file mode 100644
index 0000000000..728708f436
--- /dev/null
+++ b/deps/v8/src/objects/objects-body-descriptors.h
@@ -0,0 +1,186 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_H_
+#define V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_H_
+
+#include "src/objects/map.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// This is the base class for object's body descriptors.
+//
+// Each BodyDescriptor subclass must provide the following methods:
+//
+// 1) Returns true if the object contains a tagged value at given offset.
+// It is used for invalid slots filtering. If the offset points outside
+// of the object or to the map word, the result is UNDEFINED (!!!).
+//
+// static bool IsValidSlot(Map map, HeapObject obj, int offset);
+//
+//
+// 2) Iterate object's body using stateful object visitor.
+//
+// template <typename ObjectVisitor>
+// static inline void IterateBody(Map map, HeapObject obj, int object_size,
+// ObjectVisitor* v);
+class BodyDescriptorBase {
+ public:
+ template <typename ObjectVisitor>
+ static inline void IteratePointers(HeapObject obj, int start_offset,
+ int end_offset, ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ static inline void IteratePointer(HeapObject obj, int offset,
+ ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ static inline void IterateCustomWeakPointers(HeapObject obj, int start_offset,
+ int end_offset,
+ ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ static inline void IterateCustomWeakPointer(HeapObject obj, int offset,
+ ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ static inline void IterateEphemeron(HeapObject obj, int index, int key_offset,
+ int value_offset, ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ static inline void IterateMaybeWeakPointers(HeapObject obj, int start_offset,
+ int end_offset, ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ static inline void IterateMaybeWeakPointer(HeapObject obj, int offset,
+ ObjectVisitor* v);
+
+ protected:
+ // Returns true for all header and embedder fields.
+ static inline bool IsValidJSObjectSlotImpl(Map map, HeapObject obj,
+ int offset);
+
+ // Returns true for all header and embedder fields.
+ static inline bool IsValidEmbedderJSObjectSlotImpl(Map map, HeapObject obj,
+ int offset);
+
+ // Treats all header and embedder fields in the range as tagged.
+ template <typename ObjectVisitor>
+ static inline void IterateJSObjectBodyImpl(Map map, HeapObject obj,
+ int start_offset, int end_offset,
+ ObjectVisitor* v);
+};
+
+// This class describes a body of an object of a fixed size
+// in which all pointer fields are located in the [start_offset, end_offset)
+// interval.
+template <int start_offset, int end_offset, int size>
+class FixedBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = start_offset;
+ static const int kEndOffset = end_offset;
+ static const int kSize = size;
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= kStartOffset && offset < kEndOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, ObjectVisitor* v) {
+ IteratePointers(obj, start_offset, end_offset, v);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IterateBody(map, obj, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
+};
+
+// This class describes a body of an object of a variable size
+// in which all pointer fields are located in the [start_offset, object_size)
+// interval.
+template <int start_offset>
+class FlexibleBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = start_offset;
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return (offset >= kStartOffset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, start_offset, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object);
+};
+
+using StructBodyDescriptor = FlexibleBodyDescriptor<HeapObject::kHeaderSize>;
+
+template <int start_offset>
+class FlexibleWeakBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = start_offset;
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return (offset >= kStartOffset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IterateMaybeWeakPointers(obj, start_offset, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object);
+};
+
+// This class describes a body of an object which has a parent class that also
+// has a body descriptor. This represents a union of the parent's body
+// descriptor, and a new descriptor for the child -- so, both parent and child's
+// slots are iterated. The parent must be fixed size, and its slots be disjoint
+// with the child's.
+template <class ParentBodyDescriptor, class ChildBodyDescriptor>
+class SubclassBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ // The parent must end be before the child's start offset, to make sure that
+ // their slots are disjoint.
+ STATIC_ASSERT(ParentBodyDescriptor::kSize <=
+ ChildBodyDescriptor::kStartOffset);
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return ParentBodyDescriptor::IsValidSlot(map, obj, offset) ||
+ ChildBodyDescriptor::IsValidSlot(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, ObjectVisitor* v) {
+ ParentBodyDescriptor::IterateBody(map, obj, v);
+ ChildBodyDescriptor::IterateBody(map, obj, v);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ ParentBodyDescriptor::IterateBody(map, obj, object_size, v);
+ ChildBodyDescriptor::IterateBody(map, obj, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ // The child should know its full size.
+ return ChildBodyDescriptor::SizeOf(map, object);
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_H_
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
new file mode 100644
index 0000000000..90824c68ef
--- /dev/null
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -0,0 +1,400 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_OBJECTS_DEFINITIONS_H_
+#define V8_OBJECTS_OBJECTS_DEFINITIONS_H_
+
+#include "src/init/heap-symbols.h"
+
+namespace v8 {
+
+namespace internal {
+
+// All Maps have a field instance_type containing a InstanceType.
+// It describes the type of the instances.
+//
+// As an example, a JavaScript object is a heap object and its map
+// instance_type is JS_OBJECT_TYPE.
+//
+// The names of the string instance types are intended to systematically
+// mirror their encoding in the instance_type field of the map. The default
+// encoding is considered TWO_BYTE. It is not mentioned in the name. ONE_BYTE
+// encoding is mentioned explicitly in the name. Likewise, the default
+// representation is considered sequential. It is not mentioned in the
+// name. The other representations (e.g. CONS, EXTERNAL) are explicitly
+// mentioned. Finally, the string is either a STRING_TYPE (if it is a normal
+// string) or a INTERNALIZED_STRING_TYPE (if it is a internalized string).
+//
+// NOTE: The following things are some that depend on the string types having
+// instance_types that are less than those of all other types:
+// HeapObject::Size, HeapObject::IterateBody, the typeof operator, and
+// Object::IsString.
+//
+// NOTE: Everything following JS_VALUE_TYPE is considered a
+// JSObject for GC purposes. The first four entries here have typeof
+// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
+//
+// NOTE: List had to be split into two, because of conditional item(s) from
+// INTL namespace. They can't just be appended to the end, because of the
+// checks we do in tests (expecting JS_FUNCTION_TYPE to be last).
+#define INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
+ V(INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(STRING_TYPE) \
+ V(CONS_STRING_TYPE) \
+ V(EXTERNAL_STRING_TYPE) \
+ V(SLICED_STRING_TYPE) \
+ V(THIN_STRING_TYPE) \
+ V(ONE_BYTE_STRING_TYPE) \
+ V(CONS_ONE_BYTE_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(SLICED_ONE_BYTE_STRING_TYPE) \
+ V(THIN_ONE_BYTE_STRING_TYPE) \
+ V(UNCACHED_EXTERNAL_STRING_TYPE) \
+ V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ \
+ V(SYMBOL_TYPE) \
+ V(HEAP_NUMBER_TYPE) \
+ V(BIGINT_TYPE) \
+ V(ODDBALL_TYPE) \
+ \
+ V(MAP_TYPE) \
+ V(CODE_TYPE) \
+ V(MUTABLE_HEAP_NUMBER_TYPE) \
+ V(FOREIGN_TYPE) \
+ V(BYTE_ARRAY_TYPE) \
+ V(BYTECODE_ARRAY_TYPE) \
+ V(FREE_SPACE_TYPE) \
+ \
+ V(FIXED_DOUBLE_ARRAY_TYPE) \
+ V(FEEDBACK_METADATA_TYPE) \
+ V(FILLER_TYPE) \
+ \
+ V(ACCESS_CHECK_INFO_TYPE) \
+ V(ACCESSOR_INFO_TYPE) \
+ V(ACCESSOR_PAIR_TYPE) \
+ V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
+ V(ALLOCATION_MEMENTO_TYPE) \
+ V(ASM_WASM_DATA_TYPE) \
+ V(ASYNC_GENERATOR_REQUEST_TYPE) \
+ V(CLASS_POSITIONS_TYPE) \
+ V(DEBUG_INFO_TYPE) \
+ V(ENUM_CACHE_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(FUNCTION_TEMPLATE_RARE_DATA_TYPE) \
+ V(INTERCEPTOR_INFO_TYPE) \
+ V(INTERPRETER_DATA_TYPE) \
+ V(MODULE_INFO_ENTRY_TYPE) \
+ V(MODULE_TYPE) \
+ V(OBJECT_TEMPLATE_INFO_TYPE) \
+ V(PROMISE_CAPABILITY_TYPE) \
+ V(PROMISE_REACTION_TYPE) \
+ V(PROTOTYPE_INFO_TYPE) \
+ V(SCRIPT_TYPE) \
+ V(SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE) \
+ V(STACK_FRAME_INFO_TYPE) \
+ V(STACK_TRACE_FRAME_TYPE) \
+ V(TEMPLATE_OBJECT_DESCRIPTION_TYPE) \
+ V(TUPLE2_TYPE) \
+ V(TUPLE3_TYPE) \
+ V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
+ V(WASM_CAPI_FUNCTION_DATA_TYPE) \
+ V(WASM_DEBUG_INFO_TYPE) \
+ V(WASM_EXCEPTION_TAG_TYPE) \
+ V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \
+ V(WASM_JS_FUNCTION_DATA_TYPE) \
+ \
+ V(CALLABLE_TASK_TYPE) \
+ V(CALLBACK_TASK_TYPE) \
+ V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \
+ V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
+ V(FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE) \
+ \
+ V(ALLOCATION_SITE_TYPE) \
+ V(EMBEDDER_DATA_ARRAY_TYPE) \
+ \
+ V(FIXED_ARRAY_TYPE) \
+ V(OBJECT_BOILERPLATE_DESCRIPTION_TYPE) \
+ V(CLOSURE_FEEDBACK_CELL_ARRAY_TYPE) \
+ V(HASH_TABLE_TYPE) \
+ V(ORDERED_HASH_MAP_TYPE) \
+ V(ORDERED_HASH_SET_TYPE) \
+ V(ORDERED_NAME_DICTIONARY_TYPE) \
+ V(NAME_DICTIONARY_TYPE) \
+ V(GLOBAL_DICTIONARY_TYPE) \
+ V(NUMBER_DICTIONARY_TYPE) \
+ V(SIMPLE_NUMBER_DICTIONARY_TYPE) \
+ V(STRING_TABLE_TYPE) \
+ V(EPHEMERON_HASH_TABLE_TYPE) \
+ V(SCOPE_INFO_TYPE) \
+ V(SCRIPT_CONTEXT_TABLE_TYPE) \
+ \
+ V(AWAIT_CONTEXT_TYPE) \
+ V(BLOCK_CONTEXT_TYPE) \
+ V(CATCH_CONTEXT_TYPE) \
+ V(DEBUG_EVALUATE_CONTEXT_TYPE) \
+ V(EVAL_CONTEXT_TYPE) \
+ V(FUNCTION_CONTEXT_TYPE) \
+ V(MODULE_CONTEXT_TYPE) \
+ V(NATIVE_CONTEXT_TYPE) \
+ V(SCRIPT_CONTEXT_TYPE) \
+ V(WITH_CONTEXT_TYPE) \
+ \
+ V(WEAK_FIXED_ARRAY_TYPE) \
+ V(TRANSITION_ARRAY_TYPE) \
+ \
+ V(CALL_HANDLER_INFO_TYPE) \
+ V(CELL_TYPE) \
+ V(CODE_DATA_CONTAINER_TYPE) \
+ V(DESCRIPTOR_ARRAY_TYPE) \
+ V(FEEDBACK_CELL_TYPE) \
+ V(FEEDBACK_VECTOR_TYPE) \
+ V(LOAD_HANDLER_TYPE) \
+ V(PREPARSE_DATA_TYPE) \
+ V(PROPERTY_ARRAY_TYPE) \
+ V(PROPERTY_CELL_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
+ V(SMALL_ORDERED_HASH_MAP_TYPE) \
+ V(SMALL_ORDERED_HASH_SET_TYPE) \
+ V(SMALL_ORDERED_NAME_DICTIONARY_TYPE) \
+ V(STORE_HANDLER_TYPE) \
+ V(UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \
+ V(UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE) \
+ V(WEAK_ARRAY_LIST_TYPE) \
+ V(WEAK_CELL_TYPE) \
+ \
+ V(JS_PROXY_TYPE) \
+ V(JS_GLOBAL_OBJECT_TYPE) \
+ V(JS_GLOBAL_PROXY_TYPE) \
+ V(JS_MODULE_NAMESPACE_TYPE) \
+ V(JS_SPECIAL_API_OBJECT_TYPE) \
+ V(JS_VALUE_TYPE) \
+ V(JS_API_OBJECT_TYPE) \
+ V(JS_OBJECT_TYPE) \
+ \
+ V(JS_ARGUMENTS_TYPE) \
+ V(JS_ARRAY_BUFFER_TYPE) \
+ V(JS_ARRAY_ITERATOR_TYPE) \
+ V(JS_ARRAY_TYPE) \
+ V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JS_ASYNC_FUNCTION_OBJECT_TYPE) \
+ V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
+ V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_DATE_TYPE) \
+ V(JS_ERROR_TYPE) \
+ V(JS_GENERATOR_OBJECT_TYPE) \
+ V(JS_MAP_TYPE) \
+ V(JS_MAP_KEY_ITERATOR_TYPE) \
+ V(JS_MAP_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_MAP_VALUE_ITERATOR_TYPE) \
+ V(JS_MESSAGE_OBJECT_TYPE) \
+ V(JS_PROMISE_TYPE) \
+ V(JS_REGEXP_TYPE) \
+ V(JS_REGEXP_STRING_ITERATOR_TYPE) \
+ V(JS_SET_TYPE) \
+ V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_SET_VALUE_ITERATOR_TYPE) \
+ V(JS_STRING_ITERATOR_TYPE) \
+ V(JS_WEAK_REF_TYPE) \
+ V(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE) \
+ V(JS_FINALIZATION_GROUP_TYPE) \
+ V(JS_WEAK_MAP_TYPE) \
+ V(JS_WEAK_SET_TYPE) \
+ V(JS_TYPED_ARRAY_TYPE) \
+ V(JS_DATA_VIEW_TYPE)
+
+#define INSTANCE_TYPE_LIST_AFTER_INTL(V) \
+ V(WASM_EXCEPTION_TYPE) \
+ V(WASM_GLOBAL_TYPE) \
+ V(WASM_INSTANCE_TYPE) \
+ V(WASM_MEMORY_TYPE) \
+ V(WASM_MODULE_TYPE) \
+ V(WASM_TABLE_TYPE) \
+ V(JS_BOUND_FUNCTION_TYPE) \
+ V(JS_FUNCTION_TYPE)
+
+#ifdef V8_INTL_SUPPORT
+#define INSTANCE_TYPE_LIST(V) \
+ INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
+ V(JS_INTL_V8_BREAK_ITERATOR_TYPE) \
+ V(JS_INTL_COLLATOR_TYPE) \
+ V(JS_INTL_DATE_TIME_FORMAT_TYPE) \
+ V(JS_INTL_LIST_FORMAT_TYPE) \
+ V(JS_INTL_LOCALE_TYPE) \
+ V(JS_INTL_NUMBER_FORMAT_TYPE) \
+ V(JS_INTL_PLURAL_RULES_TYPE) \
+ V(JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
+ V(JS_INTL_SEGMENT_ITERATOR_TYPE) \
+ V(JS_INTL_SEGMENTER_TYPE) \
+ INSTANCE_TYPE_LIST_AFTER_INTL(V)
+#else
+#define INSTANCE_TYPE_LIST(V) \
+ INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
+ INSTANCE_TYPE_LIST_AFTER_INTL(V)
+#endif // V8_INTL_SUPPORT
+
+// Since string types are not consecutive, this macro is used to
+// iterate over them.
+#define STRING_TYPE_LIST(V) \
+ V(STRING_TYPE, kVariableSizeSentinel, string, String) \
+ V(ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, one_byte_string, \
+ OneByteString) \
+ V(CONS_STRING_TYPE, ConsString::kSize, cons_string, ConsString) \
+ V(CONS_ONE_BYTE_STRING_TYPE, ConsString::kSize, cons_one_byte_string, \
+ ConsOneByteString) \
+ V(SLICED_STRING_TYPE, SlicedString::kSize, sliced_string, SlicedString) \
+ V(SLICED_ONE_BYTE_STRING_TYPE, SlicedString::kSize, sliced_one_byte_string, \
+ SlicedOneByteString) \
+ V(EXTERNAL_STRING_TYPE, ExternalTwoByteString::kSize, external_string, \
+ ExternalString) \
+ V(EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize, \
+ external_one_byte_string, ExternalOneByteString) \
+ V(UNCACHED_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kUncachedSize, \
+ uncached_external_string, UncachedExternalString) \
+ V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE, \
+ ExternalOneByteString::kUncachedSize, uncached_external_one_byte_string, \
+ UncachedExternalOneByteString) \
+ \
+ V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string, \
+ InternalizedString) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, \
+ one_byte_internalized_string, OneByteInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE, ExternalTwoByteString::kSize, \
+ external_internalized_string, ExternalInternalizedString) \
+ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \
+ external_one_byte_internalized_string, ExternalOneByteInternalizedString) \
+ V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE, \
+ ExternalTwoByteString::kUncachedSize, \
+ uncached_external_internalized_string, UncachedExternalInternalizedString) \
+ V(UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, \
+ ExternalOneByteString::kUncachedSize, \
+ uncached_external_one_byte_internalized_string, \
+ UncachedExternalOneByteInternalizedString) \
+ V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
+ V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
+ ThinOneByteString)
+
+// A struct is a simple object a set of object-valued fields. Including an
+// object type in this causes the compiler to generate most of the boilerplate
+// code for the class including allocation and garbage collection routines,
+// casts and predicates. All you need to define is the class, methods and
+// object verification routines. Easy, no?
+//
+// Note that for subtle reasons related to the ordering or numerical values of
+// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
+// manually.
+#define STRUCT_LIST_GENERATOR(V, _) \
+ V(_, ACCESS_CHECK_INFO_TYPE, AccessCheckInfo, access_check_info) \
+ V(_, ACCESSOR_INFO_TYPE, AccessorInfo, accessor_info) \
+ V(_, ACCESSOR_PAIR_TYPE, AccessorPair, accessor_pair) \
+ V(_, ALIASED_ARGUMENTS_ENTRY_TYPE, AliasedArgumentsEntry, \
+ aliased_arguments_entry) \
+ V(_, ALLOCATION_MEMENTO_TYPE, AllocationMemento, allocation_memento) \
+ V(_, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
+ V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
+ async_generator_request) \
+ V(_, CLASS_POSITIONS_TYPE, ClassPositions, class_positions) \
+ V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \
+ V(_, ENUM_CACHE_TYPE, EnumCache, enum_cache) \
+ V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \
+ function_template_info) \
+ V(_, FUNCTION_TEMPLATE_RARE_DATA_TYPE, FunctionTemplateRareData, \
+ function_template_rare_data) \
+ V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
+ V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
+ V(_, MODULE_INFO_ENTRY_TYPE, ModuleInfoEntry, module_info_entry) \
+ V(_, MODULE_TYPE, Module, module) \
+ V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \
+ V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
+ V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
+ V(_, PROTOTYPE_INFO_TYPE, PrototypeInfo, prototype_info) \
+ V(_, SCRIPT_TYPE, Script, script) \
+ V(_, SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE, \
+ SourcePositionTableWithFrameCache, source_position_table_with_frame_cache) \
+ V(_, STACK_FRAME_INFO_TYPE, StackFrameInfo, stack_frame_info) \
+ V(_, STACK_TRACE_FRAME_TYPE, StackTraceFrame, stack_trace_frame) \
+ V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \
+ template_object_description) \
+ V(_, TUPLE2_TYPE, Tuple2, tuple2) \
+ V(_, TUPLE3_TYPE, Tuple3, tuple3) \
+ V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
+ array_boilerplate_description) \
+ V(_, WASM_CAPI_FUNCTION_DATA_TYPE, WasmCapiFunctionData, \
+ wasm_capi_function_data) \
+ V(_, WASM_DEBUG_INFO_TYPE, WasmDebugInfo, wasm_debug_info) \
+ V(_, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
+ V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
+ wasm_exported_function_data) \
+ V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data) \
+ V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
+ V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \
+ V(_, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, PromiseFulfillReactionJobTask, \
+ promise_fulfill_reaction_job_task) \
+ V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask, \
+ promise_reject_reaction_job_task) \
+ V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \
+ promise_resolve_thenable_job_task) \
+ V(_, FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE, \
+ FinalizationGroupCleanupJobTask, finalization_group_cleanup_job_task)
+
+// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
+#define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
+
+// Produces (NAME, Name, name) entries.
+#define STRUCT_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V)
+
+// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_MAPS_LIST entry
+#define STRUCT_MAPS_LIST_ADAPTER(V, NAME, Name, name) \
+ V(Map, name##_map, Name##Map)
+
+// Produces (Map, struct_name_map, StructNameMap) entries
+#define STRUCT_MAPS_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V)
+
+//
+// The following macros define list of allocation size objects and list of
+// their maps.
+//
+#define ALLOCATION_SITE_LIST(V, _) \
+ V(_, ALLOCATION_SITE_TYPE, AllocationSite, WithWeakNext, allocation_site) \
+ V(_, ALLOCATION_SITE_TYPE, AllocationSite, WithoutWeakNext, \
+ allocation_site_without_weaknext)
+
+// Adapts one ALLOCATION_SITE_LIST entry to the ALLOCATION_SITE_MAPS_LIST entry
+#define ALLOCATION_SITE_MAPS_LIST_ADAPTER(V, TYPE, Name, Size, name_size) \
+ V(Map, name_size##_map, Name##Size##Map)
+
+// Produces (Map, allocation_site_name_map, AllocationSiteNameMap) entries
+#define ALLOCATION_SITE_MAPS_LIST(V) \
+ ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAPS_LIST_ADAPTER, V)
+
+//
+// The following macros define list of data handler objects and list of their
+// maps.
+//
+#define DATA_HANDLER_LIST(V, _) \
+ V(_, LOAD_HANDLER_TYPE, LoadHandler, 1, load_handler1) \
+ V(_, LOAD_HANDLER_TYPE, LoadHandler, 2, load_handler2) \
+ V(_, LOAD_HANDLER_TYPE, LoadHandler, 3, load_handler3) \
+ V(_, STORE_HANDLER_TYPE, StoreHandler, 0, store_handler0) \
+ V(_, STORE_HANDLER_TYPE, StoreHandler, 1, store_handler1) \
+ V(_, STORE_HANDLER_TYPE, StoreHandler, 2, store_handler2) \
+ V(_, STORE_HANDLER_TYPE, StoreHandler, 3, store_handler3)
+
+// Adapts one DATA_HANDLER_LIST entry to the DATA_HANDLER_MAPS_LIST entry.
+#define DATA_HANDLER_MAPS_LIST_ADAPTER(V, TYPE, Name, Size, name_size) \
+ V(Map, name_size##_map, Name##Size##Map)
+
+// Produces (Map, handler_name_map, HandlerNameMap) entries
+#define DATA_HANDLER_MAPS_LIST(V) \
+ DATA_HANDLER_LIST(DATA_HANDLER_MAPS_LIST_ADAPTER, V)
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_OBJECTS_DEFINITIONS_H_
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
new file mode 100644
index 0000000000..ce92d64f2f
--- /dev/null
+++ b/deps/v8/src/objects/objects-inl.h
@@ -0,0 +1,1039 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Review notes:
+//
+// - The use of macros in these inline functions may seem superfluous
+// but it is absolutely needed to make sure gcc generates optimal
+// code. gcc is not happy when attempting to inline too deep.
+//
+
+#ifndef V8_OBJECTS_OBJECTS_INL_H_
+#define V8_OBJECTS_OBJECTS_INL_H_
+
+#include "src/objects/objects.h"
+
+#include "src/base/bits.h"
+#include "src/builtins/builtins.h"
+#include "src/common/v8memory.h"
+#include "src/handles/handles-inl.h"
+#include "src/heap/factory.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/numbers/conversions.h"
+#include "src/numbers/double.h"
+#include "src/objects/bigint.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/js-proxy-inl.h" // TODO(jkummerow): Drop.
+#include "src/objects/keys.h"
+#include "src/objects/literal-objects.h"
+#include "src/objects/lookup-inl.h" // TODO(jkummerow): Drop.
+#include "src/objects/oddball.h"
+#include "src/objects/property-details.h"
+#include "src/objects/property.h"
+#include "src/objects/regexp-match-info.h"
+#include "src/objects/scope-info.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/slots-inl.h"
+#include "src/objects/smi-inl.h"
+#include "src/objects/tagged-impl-inl.h"
+#include "src/objects/templates.h"
+#include "src/sanitizer/tsan.h"
+#include "torque-generated/class-definitions-tq-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+PropertyDetails::PropertyDetails(Smi smi) { value_ = smi.value(); }
+
+Smi PropertyDetails::AsSmi() const {
+ // Ensure the upper 2 bits have the same value by sign extending it. This is
+ // necessary to be able to use the 31st bit of the property details.
+ int value = value_ << 1;
+ return Smi::FromInt(value >> 1);
+}
+
+int PropertyDetails::field_width_in_words() const {
+ DCHECK_EQ(location(), kField);
+ if (!FLAG_unbox_double_fields) return 1;
+ if (kDoubleSize == kTaggedSize) return 1;
+ return representation().IsDouble() ? kDoubleSize / kTaggedSize : 1;
+}
+
+bool HeapObject::IsSloppyArgumentsElements() const {
+ return IsFixedArrayExact();
+}
+
+bool HeapObject::IsJSSloppyArgumentsObject() const {
+ return IsJSArgumentsObject();
+}
+
+bool HeapObject::IsJSGeneratorObject() const {
+ return map().instance_type() == JS_GENERATOR_OBJECT_TYPE ||
+ IsJSAsyncFunctionObject() || IsJSAsyncGeneratorObject();
+}
+
+bool HeapObject::IsDataHandler() const {
+ return IsLoadHandler() || IsStoreHandler();
+}
+
+bool HeapObject::IsClassBoilerplate() const { return IsFixedArrayExact(); }
+
+#define IS_TYPE_FUNCTION_DEF(type_) \
+ bool Object::Is##type_() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
+ }
+HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
+#undef IS_TYPE_FUNCTION_DEF
+
+#define IS_TYPE_FUNCTION_DEF(Type, Value) \
+ bool Object::Is##Type(Isolate* isolate) const { \
+ return Is##Type(ReadOnlyRoots(isolate->heap())); \
+ } \
+ bool Object::Is##Type(ReadOnlyRoots roots) const { \
+ return *this == roots.Value(); \
+ } \
+ bool Object::Is##Type() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##Type(); \
+ } \
+ bool HeapObject::Is##Type(Isolate* isolate) const { \
+ return Object::Is##Type(isolate); \
+ } \
+ bool HeapObject::Is##Type(ReadOnlyRoots roots) const { \
+ return Object::Is##Type(roots); \
+ } \
+ bool HeapObject::Is##Type() const { return Is##Type(GetReadOnlyRoots()); }
+ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
+#undef IS_TYPE_FUNCTION_DEF
+
+bool Object::IsNullOrUndefined(Isolate* isolate) const {
+ return IsNullOrUndefined(ReadOnlyRoots(isolate));
+}
+
+bool Object::IsNullOrUndefined(ReadOnlyRoots roots) const {
+ return IsNull(roots) || IsUndefined(roots);
+}
+
+bool Object::IsNullOrUndefined() const {
+ return IsHeapObject() && HeapObject::cast(*this).IsNullOrUndefined();
+}
+
+bool Object::IsZero() const { return *this == Smi::zero(); }
+
+bool Object::IsNoSharedNameSentinel() const {
+ return *this == SharedFunctionInfo::kNoSharedNameSentinel;
+}
+
+bool HeapObject::IsNullOrUndefined(Isolate* isolate) const {
+ return Object::IsNullOrUndefined(isolate);
+}
+
+bool HeapObject::IsNullOrUndefined(ReadOnlyRoots roots) const {
+ return Object::IsNullOrUndefined(roots);
+}
+
+bool HeapObject::IsNullOrUndefined() const {
+ return IsNullOrUndefined(GetReadOnlyRoots());
+}
+
+bool HeapObject::IsUniqueName() const {
+ return IsInternalizedString() || IsSymbol();
+}
+
+bool HeapObject::IsFunction() const {
+ STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+ return map().instance_type() >= FIRST_FUNCTION_TYPE;
+}
+
+bool HeapObject::IsCallable() const { return map().is_callable(); }
+
+bool HeapObject::IsConstructor() const { return map().is_constructor(); }
+
+bool HeapObject::IsModuleInfo() const {
+ return map() == GetReadOnlyRoots().module_info_map();
+}
+
+bool HeapObject::IsTemplateInfo() const {
+ return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
+}
+
+bool HeapObject::IsConsString() const {
+ if (!IsString()) return false;
+ return StringShape(String::cast(*this)).IsCons();
+}
+
+bool HeapObject::IsThinString() const {
+ if (!IsString()) return false;
+ return StringShape(String::cast(*this)).IsThin();
+}
+
+bool HeapObject::IsSlicedString() const {
+ if (!IsString()) return false;
+ return StringShape(String::cast(*this)).IsSliced();
+}
+
+bool HeapObject::IsSeqString() const {
+ if (!IsString()) return false;
+ return StringShape(String::cast(*this)).IsSequential();
+}
+
+bool HeapObject::IsSeqOneByteString() const {
+ if (!IsString()) return false;
+ return StringShape(String::cast(*this)).IsSequential() &&
+ String::cast(*this).IsOneByteRepresentation();
+}
+
+bool HeapObject::IsSeqTwoByteString() const {
+ if (!IsString()) return false;
+ return StringShape(String::cast(*this)).IsSequential() &&
+ String::cast(*this).IsTwoByteRepresentation();
+}
+
+bool HeapObject::IsExternalString() const {
+ if (!IsString()) return false;
+ return StringShape(String::cast(*this)).IsExternal();
+}
+
+bool HeapObject::IsExternalOneByteString() const {
+ if (!IsString()) return false;
+ return StringShape(String::cast(*this)).IsExternal() &&
+ String::cast(*this).IsOneByteRepresentation();
+}
+
+bool HeapObject::IsExternalTwoByteString() const {
+ if (!IsString()) return false;
+ return StringShape(String::cast(*this)).IsExternal() &&
+ String::cast(*this).IsTwoByteRepresentation();
+}
+
+bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); }
+
+bool Object::IsNumeric() const { return IsNumber() || IsBigInt(); }
+
+bool HeapObject::IsFiller() const {
+ InstanceType instance_type = map().instance_type();
+ return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
+}
+
+bool HeapObject::IsJSWeakCollection() const {
+ return IsJSWeakMap() || IsJSWeakSet();
+}
+
+bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
+
+bool HeapObject::IsPromiseReactionJobTask() const {
+ return IsPromiseFulfillReactionJobTask() || IsPromiseRejectReactionJobTask();
+}
+
+bool HeapObject::IsFrameArray() const { return IsFixedArrayExact(); }
+
+bool HeapObject::IsArrayList() const {
+ return map() == GetReadOnlyRoots().array_list_map() ||
+ *this == GetReadOnlyRoots().empty_fixed_array();
+}
+
+bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArrayExact(); }
+
+bool Object::IsLayoutDescriptor() const { return IsSmi() || IsByteArray(); }
+
+bool HeapObject::IsDeoptimizationData() const {
+ // Must be a fixed array.
+ if (!IsFixedArrayExact()) return false;
+
+ // There's no sure way to detect the difference between a fixed array and
+ // a deoptimization data array. Since this is used for asserts we can
+ // check that the length is zero or else the fixed size plus a multiple of
+ // the entry size.
+ int length = FixedArray::cast(*this).length();
+ if (length == 0) return true;
+
+ length -= DeoptimizationData::kFirstDeoptEntryIndex;
+ return length >= 0 && length % DeoptimizationData::kDeoptEntrySize == 0;
+}
+
+bool HeapObject::IsHandlerTable() const {
+ if (!IsFixedArrayExact()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a handler table array.
+ return true;
+}
+
+bool HeapObject::IsTemplateList() const {
+ if (!IsFixedArrayExact()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a template list.
+ if (FixedArray::cast(*this).length() < 1) return false;
+ return true;
+}
+
+bool HeapObject::IsDependentCode() const {
+ if (!IsWeakFixedArray()) return false;
+ // There's actually no way to see the difference between a weak fixed array
+ // and a dependent codes array.
+ return true;
+}
+
+bool HeapObject::IsAbstractCode() const {
+ return IsBytecodeArray() || IsCode();
+}
+
+bool HeapObject::IsStringWrapper() const {
+ return IsJSValue() && JSValue::cast(*this).value().IsString();
+}
+
+bool HeapObject::IsBooleanWrapper() const {
+ return IsJSValue() && JSValue::cast(*this).value().IsBoolean();
+}
+
+bool HeapObject::IsScriptWrapper() const {
+ return IsJSValue() && JSValue::cast(*this).value().IsScript();
+}
+
+bool HeapObject::IsNumberWrapper() const {
+ return IsJSValue() && JSValue::cast(*this).value().IsNumber();
+}
+
+bool HeapObject::IsBigIntWrapper() const {
+ return IsJSValue() && JSValue::cast(*this).value().IsBigInt();
+}
+
+bool HeapObject::IsSymbolWrapper() const {
+ return IsJSValue() && JSValue::cast(*this).value().IsSymbol();
+}
+
+bool HeapObject::IsJSArrayBufferView() const {
+ return IsJSDataView() || IsJSTypedArray();
+}
+
+bool HeapObject::IsStringSet() const { return IsHashTable(); }
+
+bool HeapObject::IsObjectHashSet() const { return IsHashTable(); }
+
+bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
+
+bool HeapObject::IsMapCache() const { return IsHashTable(); }
+
+bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
+
+bool Object::IsHashTableBase() const { return IsHashTable(); }
+
+bool Object::IsSmallOrderedHashTable() const {
+ return IsSmallOrderedHashSet() || IsSmallOrderedHashMap() ||
+ IsSmallOrderedNameDictionary();
+}
+
+bool Object::IsPrimitive() const {
+ return IsSmi() || HeapObject::cast(*this).map().IsPrimitiveMap();
+}
+
+// static
+Maybe<bool> Object::IsArray(Handle<Object> object) {
+ if (object->IsSmi()) return Just(false);
+ Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+ if (heap_object->IsJSArray()) return Just(true);
+ if (!heap_object->IsJSProxy()) return Just(false);
+ return JSProxy::IsArray(Handle<JSProxy>::cast(object));
+}
+
+bool HeapObject::IsUndetectable() const { return map().is_undetectable(); }
+
+bool HeapObject::IsAccessCheckNeeded() const {
+ if (IsJSGlobalProxy()) {
+ const JSGlobalProxy proxy = JSGlobalProxy::cast(*this);
+ JSGlobalObject global = proxy.GetIsolate()->context().global_object();
+ return proxy.IsDetachedFrom(global);
+ }
+ return map().is_access_check_needed();
+}
+
+bool HeapObject::IsStruct() const {
+ switch (map().instance_type()) {
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
+ return true;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ // It is hard to include ALLOCATION_SITE_TYPE in STRUCT_LIST because
+ // that macro is used for many things and AllocationSite needs a few
+ // special cases.
+ case ALLOCATION_SITE_TYPE:
+ return true;
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
+ return true;
+ case FEEDBACK_CELL_TYPE:
+ return true;
+ case CALL_HANDLER_INFO_TYPE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
+ bool Object::Is##Name() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##Name(); \
+ } \
+ TYPE_CHECKER(Name)
+STRUCT_LIST(MAKE_STRUCT_PREDICATE)
+#undef MAKE_STRUCT_PREDICATE
+
+double Object::Number() const {
+ DCHECK(IsNumber());
+ return IsSmi() ? static_cast<double>(Smi(this->ptr()).value())
+ : HeapNumber::unchecked_cast(*this).value();
+}
+
+// static
+bool Object::SameNumberValue(double value1, double value2) {
+ // SameNumberValue(NaN, NaN) is true.
+ if (value1 != value2) {
+ return std::isnan(value1) && std::isnan(value2);
+ }
+ // SameNumberValue(0.0, -0.0) is false.
+ return (std::signbit(value1) == std::signbit(value2));
+}
+
+bool Object::IsNaN() const {
+ return this->IsHeapNumber() && std::isnan(HeapNumber::cast(*this).value());
+}
+
+bool Object::IsMinusZero() const {
+ return this->IsHeapNumber() &&
+ i::IsMinusZero(HeapNumber::cast(*this).value());
+}
+
+OBJECT_CONSTRUCTORS_IMPL(RegExpMatchInfo, FixedArray)
+OBJECT_CONSTRUCTORS_IMPL(ScopeInfo, FixedArray)
+OBJECT_CONSTRUCTORS_IMPL(BigIntBase, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(BigInt, BigIntBase)
+OBJECT_CONSTRUCTORS_IMPL(FreshlyAllocatedBigInt, BigIntBase)
+
+// ------------------------------------
+// Cast operations
+
+CAST_ACCESSOR(BigInt)
+CAST_ACCESSOR(RegExpMatchInfo)
+CAST_ACCESSOR(ScopeInfo)
+
+bool Object::HasValidElements() {
+ // Dictionary is covered under FixedArray. ByteArray is used
+ // for the JSTypedArray backing stores.
+ return IsFixedArray() || IsFixedDoubleArray() || IsByteArray();
+}
+
+bool Object::FilterKey(PropertyFilter filter) {
+ DCHECK(!IsPropertyCell());
+ if (filter == PRIVATE_NAMES_ONLY) {
+ if (!IsSymbol()) return true;
+ return !Symbol::cast(*this).is_private_name();
+ } else if (IsSymbol()) {
+ if (filter & SKIP_SYMBOLS) return true;
+
+ if (Symbol::cast(*this).is_private()) return true;
+ } else {
+ if (filter & SKIP_STRINGS) return true;
+ }
+ return false;
+}
+
+Representation Object::OptimalRepresentation() {
+ if (!FLAG_track_fields) return Representation::Tagged();
+ if (IsSmi()) {
+ return Representation::Smi();
+ } else if (FLAG_track_double_fields && IsHeapNumber()) {
+ return Representation::Double();
+ } else if (FLAG_track_computed_fields && IsUninitialized()) {
+ return Representation::None();
+ } else if (FLAG_track_heap_object_fields) {
+ DCHECK(IsHeapObject());
+ return Representation::HeapObject();
+ } else {
+ return Representation::Tagged();
+ }
+}
+
+ElementsKind Object::OptimalElementsKind() {
+ if (IsSmi()) return PACKED_SMI_ELEMENTS;
+ if (IsNumber()) return PACKED_DOUBLE_ELEMENTS;
+ return PACKED_ELEMENTS;
+}
+
+bool Object::FitsRepresentation(Representation representation) {
+ if (FLAG_track_fields && representation.IsSmi()) {
+ return IsSmi();
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ return IsMutableHeapNumber() || IsNumber();
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ return IsHeapObject();
+ } else if (FLAG_track_fields && representation.IsNone()) {
+ return false;
+ }
+ return true;
+}
+
+bool Object::ToUint32(uint32_t* value) const {
+ if (IsSmi()) {
+ int num = Smi::ToInt(*this);
+ if (num < 0) return false;
+ *value = static_cast<uint32_t>(num);
+ return true;
+ }
+ if (IsHeapNumber()) {
+ double num = HeapNumber::cast(*this).value();
+ return DoubleToUint32IfEqualToSelf(num, value);
+ }
+ return false;
+}
+
+// static
+MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
+ Handle<Object> object,
+ const char* method_name) {
+ if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
+ return ToObjectImpl(isolate, object, method_name);
+}
+
+// static
+MaybeHandle<Name> Object::ToName(Isolate* isolate, Handle<Object> input) {
+ if (input->IsName()) return Handle<Name>::cast(input);
+ return ConvertToName(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToPropertyKey(Isolate* isolate,
+ Handle<Object> value) {
+ if (value->IsSmi() || HeapObject::cast(*value).IsName()) return value;
+ return ConvertToPropertyKey(isolate, value);
+}
+
+// static
+MaybeHandle<Object> Object::ToPrimitive(Handle<Object> input,
+ ToPrimitiveHint hint) {
+ if (input->IsPrimitive()) return input;
+ return JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input), hint);
+}
+
+// static
+MaybeHandle<Object> Object::ToNumber(Isolate* isolate, Handle<Object> input) {
+ if (input->IsNumber()) return input; // Shortcut.
+ return ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumber);
+}
+
+// static
+MaybeHandle<Object> Object::ToNumeric(Isolate* isolate, Handle<Object> input) {
+ if (input->IsNumber() || input->IsBigInt()) return input; // Shortcut.
+ return ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumeric);
+}
+
+// static
+MaybeHandle<Object> Object::ToInteger(Isolate* isolate, Handle<Object> input) {
+ if (input->IsSmi()) return input;
+ return ConvertToInteger(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToInt32(Isolate* isolate, Handle<Object> input) {
+ if (input->IsSmi()) return input;
+ return ConvertToInt32(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToUint32(Isolate* isolate, Handle<Object> input) {
+ if (input->IsSmi()) return handle(Smi::cast(*input).ToUint32Smi(), isolate);
+ return ConvertToUint32(isolate, input);
+}
+
+// static
+MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
+ if (input->IsString()) return Handle<String>::cast(input);
+ return ConvertToString(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
+ if (input->IsSmi()) {
+ int value = std::max(Smi::ToInt(*input), 0);
+ return handle(Smi::FromInt(value), isolate);
+ }
+ return ConvertToLength(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
+ MessageTemplate error_index) {
+ if (input->IsSmi() && Smi::ToInt(*input) >= 0) return input;
+ return ConvertToIndex(isolate, input, error_index);
+}
+
+MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
+ Handle<Name> name) {
+ LookupIterator it(isolate, object, name);
+ if (!it.IsFound()) return it.factory()->undefined_value();
+ return GetProperty(&it);
+}
+
+MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
+ uint32_t index) {
+ LookupIterator it(isolate, object, index);
+ if (!it.IsFound()) return it.factory()->undefined_value();
+ return GetProperty(&it);
+}
+
+MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
+ uint32_t index, Handle<Object> value,
+ ShouldThrow should_throw) {
+ LookupIterator it(isolate, object, index);
+ MAYBE_RETURN_NULL(
+ SetProperty(&it, value, StoreOrigin::kMaybeKeyed, Just(should_throw)));
+ return value;
+}
+
+ObjectSlot HeapObject::RawField(int byte_offset) const {
+ return ObjectSlot(FIELD_ADDR(*this, byte_offset));
+}
+
+MaybeObjectSlot HeapObject::RawMaybeWeakField(int byte_offset) const {
+ return MaybeObjectSlot(FIELD_ADDR(*this, byte_offset));
+}
+
+MapWord MapWord::FromMap(const Map map) { return MapWord(map.ptr()); }
+
+Map MapWord::ToMap() const { return Map::unchecked_cast(Object(value_)); }
+
+bool MapWord::IsForwardingAddress() const { return HAS_SMI_TAG(value_); }
+
+MapWord MapWord::FromForwardingAddress(HeapObject object) {
+ return MapWord(object.ptr() - kHeapObjectTag);
+}
+
+HeapObject MapWord::ToForwardingAddress() {
+ DCHECK(IsForwardingAddress());
+ return HeapObject::FromAddress(value_);
+}
+
+#ifdef VERIFY_HEAP
+void HeapObject::VerifyObjectField(Isolate* isolate, int offset) {
+ VerifyPointer(isolate, READ_FIELD(*this, offset));
+ STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
+}
+
+void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) {
+ MaybeObject::VerifyMaybeObjectPointer(isolate,
+ READ_WEAK_FIELD(*this, offset));
+ STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
+}
+
+void HeapObject::VerifySmiField(int offset) {
+ CHECK(READ_FIELD(*this, offset).IsSmi());
+ STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
+}
+
+#endif
+
+ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
+ return ReadOnlyHeap::GetReadOnlyRoots(*this);
+}
+
+Map HeapObject::map() const { return map_word().ToMap(); }
+
+void HeapObject::set_map(Map value) {
+ if (!value.is_null()) {
+#ifdef VERIFY_HEAP
+ GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
+#endif
+ }
+ set_map_word(MapWord::FromMap(value));
+ if (!value.is_null()) {
+ // TODO(1600) We are passing kNullAddress as a slot because maps can never
+ // be on an evacuation candidate.
+ MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
+ }
+}
+
+Map HeapObject::synchronized_map() const {
+ return synchronized_map_word().ToMap();
+}
+
+void HeapObject::synchronized_set_map(Map value) {
+ if (!value.is_null()) {
+#ifdef VERIFY_HEAP
+ GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
+#endif
+ }
+ synchronized_set_map_word(MapWord::FromMap(value));
+ if (!value.is_null()) {
+ // TODO(1600) We are passing kNullAddress as a slot because maps can never
+ // be on an evacuation candidate.
+ MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
+ }
+}
+
+// Unsafe accessor omitting write barrier.
+void HeapObject::set_map_no_write_barrier(Map value) {
+ if (!value.is_null()) {
+#ifdef VERIFY_HEAP
+ GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
+#endif
+ }
+ set_map_word(MapWord::FromMap(value));
+}
+
+void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
+ set_map_word(MapWord::FromMap(value));
+ if (mode != SKIP_WRITE_BARRIER) {
+ DCHECK(!value.is_null());
+ // TODO(1600) We are passing kNullAddress as a slot because maps can never
+ // be on an evacuation candidate.
+ MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
+ }
+}
+
+MapWordSlot HeapObject::map_slot() const {
+ return MapWordSlot(FIELD_ADDR(*this, kMapOffset));
+}
+
+MapWord HeapObject::map_word() const {
+ return MapWord(map_slot().Relaxed_Load().ptr());
+}
+
+void HeapObject::set_map_word(MapWord map_word) {
+ map_slot().Relaxed_Store(Object(map_word.value_));
+}
+
+MapWord HeapObject::synchronized_map_word() const {
+ return MapWord(map_slot().Acquire_Load().ptr());
+}
+
+void HeapObject::synchronized_set_map_word(MapWord map_word) {
+ map_slot().Release_Store(Object(map_word.value_));
+}
+
+int HeapObject::Size() const { return SizeFromMap(map()); }
+
+inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
+ return instance_type <= LAST_SPECIAL_RECEIVER_TYPE;
+}
+
+// This should be in objects/map-inl.h, but can't, because of a cyclic
+// dependency.
+bool Map::IsSpecialReceiverMap() const {
+ bool result = IsSpecialReceiverInstanceType(instance_type());
+ DCHECK_IMPLIES(!result,
+ !has_named_interceptor() && !is_access_check_needed());
+ return result;
+}
+
+inline bool IsCustomElementsReceiverInstanceType(InstanceType instance_type) {
+ return instance_type <= LAST_CUSTOM_ELEMENTS_RECEIVER;
+}
+
+// This should be in objects/map-inl.h, but can't, because of a cyclic
+// dependency.
+bool Map::IsCustomElementsReceiverMap() const {
+ return IsCustomElementsReceiverInstanceType(instance_type());
+}
+
+bool Object::ToArrayLength(uint32_t* index) const {
+ return Object::ToUint32(index);
+}
+
+bool Object::ToArrayIndex(uint32_t* index) const {
+ return Object::ToUint32(index) && *index != kMaxUInt32;
+}
+
+int RegExpMatchInfo::NumberOfCaptureRegisters() {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ Object obj = get(kNumberOfCapturesIndex);
+ return Smi::ToInt(obj);
+}
+
+void RegExpMatchInfo::SetNumberOfCaptureRegisters(int value) {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ set(kNumberOfCapturesIndex, Smi::FromInt(value));
+}
+
+String RegExpMatchInfo::LastSubject() {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ return String::cast(get(kLastSubjectIndex));
+}
+
+void RegExpMatchInfo::SetLastSubject(String value) {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ set(kLastSubjectIndex, value);
+}
+
+Object RegExpMatchInfo::LastInput() {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ return get(kLastInputIndex);
+}
+
+void RegExpMatchInfo::SetLastInput(Object value) {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ set(kLastInputIndex, value);
+}
+
+int RegExpMatchInfo::Capture(int i) {
+ DCHECK_LT(i, NumberOfCaptureRegisters());
+ Object obj = get(kFirstCaptureIndex + i);
+ return Smi::ToInt(obj);
+}
+
+void RegExpMatchInfo::SetCapture(int i, int value) {
+ DCHECK_LT(i, NumberOfCaptureRegisters());
+ set(kFirstCaptureIndex + i, Smi::FromInt(value));
+}
+
+WriteBarrierMode HeapObject::GetWriteBarrierMode(
+ const DisallowHeapAllocation& promise) {
+ return GetWriteBarrierModeForObject(*this, &promise);
+}
+
+// static
+AllocationAlignment HeapObject::RequiredAlignment(Map map) {
+ // TODO(bmeurer, v8:4153): We should think about requiring double alignment
+ // in general for ByteArray, since they are used as backing store for typed
+ // arrays now.
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): Consider using aligned allocations once the
+ // allocation alignment inconsistency is fixed. For now we keep using
+ // unaligned access since both x64 and arm64 architectures (where pointer
+ // compression is supported) allow unaligned access to doubles and full words.
+#endif // V8_COMPRESS_POINTERS
+#ifdef V8_HOST_ARCH_32_BIT
+ int instance_type = map.instance_type();
+ if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) return kDoubleAligned;
+ if (instance_type == HEAP_NUMBER_TYPE) return kDoubleUnaligned;
+#endif // V8_HOST_ARCH_32_BIT
+ return kWordAligned;
+}
+
+Address HeapObject::GetFieldAddress(int field_offset) const {
+ return FIELD_ADDR(*this, field_offset);
+}
+
+// static
+Maybe<bool> Object::GreaterThan(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(isolate, x, y);
+ if (result.IsJust()) {
+ switch (result.FromJust()) {
+ case ComparisonResult::kGreaterThan:
+ return Just(true);
+ case ComparisonResult::kLessThan:
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kUndefined:
+ return Just(false);
+ }
+ }
+ return Nothing<bool>();
+}
+
+// static
+Maybe<bool> Object::GreaterThanOrEqual(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(isolate, x, y);
+ if (result.IsJust()) {
+ switch (result.FromJust()) {
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kGreaterThan:
+ return Just(true);
+ case ComparisonResult::kLessThan:
+ case ComparisonResult::kUndefined:
+ return Just(false);
+ }
+ }
+ return Nothing<bool>();
+}
+
+// static
+Maybe<bool> Object::LessThan(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(isolate, x, y);
+ if (result.IsJust()) {
+ switch (result.FromJust()) {
+ case ComparisonResult::kLessThan:
+ return Just(true);
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kGreaterThan:
+ case ComparisonResult::kUndefined:
+ return Just(false);
+ }
+ }
+ return Nothing<bool>();
+}
+
+// static
+Maybe<bool> Object::LessThanOrEqual(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(isolate, x, y);
+ if (result.IsJust()) {
+ switch (result.FromJust()) {
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kLessThan:
+ return Just(true);
+ case ComparisonResult::kGreaterThan:
+ case ComparisonResult::kUndefined:
+ return Just(false);
+ }
+ }
+ return Nothing<bool>();
+}
+
+MaybeHandle<Object> Object::GetPropertyOrElement(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Name> name) {
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
+ return GetProperty(&it);
+}
+
+MaybeHandle<Object> Object::SetPropertyOrElement(
+ Isolate* isolate, Handle<Object> object, Handle<Name> name,
+ Handle<Object> value, Maybe<ShouldThrow> should_throw,
+ StoreOrigin store_origin) {
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
+ MAYBE_RETURN_NULL(SetProperty(&it, value, store_origin, should_throw));
+ return value;
+}
+
+MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
+ Handle<Name> name,
+ Handle<JSReceiver> holder) {
+ LookupIterator it = LookupIterator::PropertyOrElement(holder->GetIsolate(),
+ receiver, name, holder);
+ return GetProperty(&it);
+}
+
+// static
+Object Object::GetSimpleHash(Object object) {
+ DisallowHeapAllocation no_gc;
+ if (object.IsSmi()) {
+ uint32_t hash = ComputeUnseededHash(Smi::ToInt(object));
+ return Smi::FromInt(hash & Smi::kMaxValue);
+ }
+ if (object.IsHeapNumber()) {
+ double num = HeapNumber::cast(object).value();
+ if (std::isnan(num)) return Smi::FromInt(Smi::kMaxValue);
+ // Use ComputeUnseededHash for all values in Signed32 range, including -0,
+ // which is considered equal to 0 because collections use SameValueZero.
+ uint32_t hash;
+ // Check range before conversion to avoid undefined behavior.
+ if (num >= kMinInt && num <= kMaxInt && FastI2D(FastD2I(num)) == num) {
+ hash = ComputeUnseededHash(FastD2I(num));
+ } else {
+ hash = ComputeLongHash(double_to_uint64(num));
+ }
+ return Smi::FromInt(hash & Smi::kMaxValue);
+ }
+ if (object.IsName()) {
+ uint32_t hash = Name::cast(object).Hash();
+ return Smi::FromInt(hash);
+ }
+ if (object.IsOddball()) {
+ uint32_t hash = Oddball::cast(object).to_string().Hash();
+ return Smi::FromInt(hash);
+ }
+ if (object.IsBigInt()) {
+ uint32_t hash = BigInt::cast(object).Hash();
+ return Smi::FromInt(hash & Smi::kMaxValue);
+ }
+ if (object.IsSharedFunctionInfo()) {
+ uint32_t hash = SharedFunctionInfo::cast(object).Hash();
+ return Smi::FromInt(hash & Smi::kMaxValue);
+ }
+ DCHECK(object.IsJSReceiver());
+ return object;
+}
+
+Object Object::GetHash() {
+ DisallowHeapAllocation no_gc;
+ Object hash = GetSimpleHash(*this);
+ if (hash.IsSmi()) return hash;
+
+ DCHECK(IsJSReceiver());
+ JSReceiver receiver = JSReceiver::cast(*this);
+ return receiver.GetIdentityHash();
+}
+
+Handle<Object> ObjectHashTableShape::AsHandle(Handle<Object> key) {
+ return key;
+}
+
+Relocatable::Relocatable(Isolate* isolate) {
+ isolate_ = isolate;
+ prev_ = isolate->relocatable_top();
+ isolate->set_relocatable_top(this);
+}
+
+Relocatable::~Relocatable() {
+ DCHECK_EQ(isolate_->relocatable_top(), this);
+ isolate_->set_relocatable_top(prev_);
+}
+
+// Predictably converts HeapObject or Address to uint32 by calculating
+// offset of the address in respective MemoryChunk.
+static inline uint32_t ObjectAddressForHashing(Address object) {
+ uint32_t value = static_cast<uint32_t>(object);
+ return value & kPageAlignmentMask;
+}
+
+static inline Handle<Object> MakeEntryPair(Isolate* isolate, uint32_t index,
+ Handle<Object> value) {
+ Handle<Object> key = isolate->factory()->Uint32ToString(index);
+ Handle<FixedArray> entry_storage =
+ isolate->factory()->NewUninitializedFixedArray(2);
+ {
+ entry_storage->set(0, *key, SKIP_WRITE_BARRIER);
+ entry_storage->set(1, *value, SKIP_WRITE_BARRIER);
+ }
+ return isolate->factory()->NewJSArrayWithElements(entry_storage,
+ PACKED_ELEMENTS, 2);
+}
+
+static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Object> key,
+ Handle<Object> value) {
+ Handle<FixedArray> entry_storage =
+ isolate->factory()->NewUninitializedFixedArray(2);
+ {
+ entry_storage->set(0, *key, SKIP_WRITE_BARRIER);
+ entry_storage->set(1, *value, SKIP_WRITE_BARRIER);
+ }
+ return isolate->factory()->NewJSArrayWithElements(entry_storage,
+ PACKED_ELEMENTS, 2);
+}
+
+bool ScopeInfo::IsAsmModule() const {
+ return IsAsmModuleField::decode(Flags());
+}
+
+bool ScopeInfo::HasSimpleParameters() const {
+ return HasSimpleParametersField::decode(Flags());
+}
+
+#define FIELD_ACCESSORS(name) \
+ void ScopeInfo::Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
+ int ScopeInfo::name() const { \
+ if (length() > 0) { \
+ return Smi::ToInt(get(k##name)); \
+ } else { \
+ return 0; \
+ } \
+ }
+FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
+#undef FIELD_ACCESSORS
+
+FreshlyAllocatedBigInt FreshlyAllocatedBigInt::cast(Object object) {
+ SLOW_DCHECK(object.IsBigInt());
+ return FreshlyAllocatedBigInt(object.ptr());
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
new file mode 100644
index 0000000000..8cc22fa0e5
--- /dev/null
+++ b/deps/v8/src/objects/objects.cc
@@ -0,0 +1,8200 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/objects.h"
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <sstream>
+#include <vector>
+
+#include "src/objects/objects-inl.h"
+
+#include "src/api/api-arguments-inl.h"
+#include "src/api/api-natives.h"
+#include "src/api/api.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/base/bits.h"
+#include "src/base/debug/stack_trace.h"
+#include "src/base/overflowing-math.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/builtins/accessors.h"
+#include "src/builtins/builtins.h"
+#include "src/codegen/compiler.h"
+#include "src/common/globals.h"
+#include "src/date/date.h"
+#include "src/debug/debug.h"
+#include "src/execution/arguments.h"
+#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
+#include "src/execution/microtask-queue.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/read-only-heap.h"
+#include "src/ic/ic.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/objects/allocation-site-inl.h"
+#include "src/objects/allocation-site-scopes.h"
+#include "src/objects/api-callbacks.h"
+#include "src/objects/arguments-inl.h"
+#include "src/objects/bigint.h"
+#include "src/objects/cell-inl.h"
+#include "src/objects/code-inl.h"
+#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/debug-objects-inl.h"
+#include "src/objects/elements.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/field-index-inl.h"
+#include "src/objects/field-index.h"
+#include "src/objects/field-type.h"
+#include "src/objects/foreign.h"
+#include "src/objects/frame-array-inl.h"
+#include "src/objects/free-space-inl.h"
+#include "src/objects/function-kind.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/keys.h"
+#include "src/objects/lookup-inl.h"
+#include "src/objects/map-updater.h"
+#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/utils/identity-map.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-break-iterator.h"
+#include "src/objects/js-collator.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/js-collection-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-date-time-format.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/js-generator-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-list-format.h"
+#include "src/objects/js-locale.h"
+#include "src/objects/js-number-format.h"
+#include "src/objects/js-plural-rules.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-regexp-string-iterator.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-relative-time-format.h"
+#include "src/objects/js-segment-iterator.h"
+#include "src/objects/js-segmenter.h"
+#endif // V8_INTL_SUPPORT
+#include "src/codegen/source-position-table.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/literal-objects-inl.h"
+#include "src/objects/map-inl.h"
+#include "src/objects/map.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/promise-inl.h"
+#include "src/objects/property-descriptor.h"
+#include "src/objects/prototype.h"
+#include "src/objects/slots-atomic-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/string-comparator.h"
+#include "src/objects/struct-inl.h"
+#include "src/objects/template-objects-inl.h"
+#include "src/objects/transitions-inl.h"
+#include "src/parsing/preparse-data.h"
+#include "src/regexp/jsregexp.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/strings/string-search.h"
+#include "src/strings/string-stream.h"
+#include "src/strings/unicode-decoder.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/utils-inl.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+ShouldThrow GetShouldThrow(Isolate* isolate, Maybe<ShouldThrow> should_throw) {
+ if (should_throw.IsJust()) return should_throw.FromJust();
+
+ LanguageMode mode = isolate->context().scope_info().language_mode();
+ if (mode == LanguageMode::kStrict) return kThrowOnError;
+
+ for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
+ if (!(it.frame()->is_optimized() || it.frame()->is_interpreted())) {
+ continue;
+ }
+ // Get the language mode from closure.
+ JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(it.frame());
+ std::vector<SharedFunctionInfo> functions;
+ js_frame->GetFunctions(&functions);
+ LanguageMode closure_language_mode = functions.back().language_mode();
+ if (closure_language_mode > mode) {
+ mode = closure_language_mode;
+ }
+ break;
+ }
+
+ return is_sloppy(mode) ? kDontThrow : kThrowOnError;
+}
+
+bool ComparisonResultToBool(Operation op, ComparisonResult result) {
+ switch (op) {
+ case Operation::kLessThan:
+ return result == ComparisonResult::kLessThan;
+ case Operation::kLessThanOrEqual:
+ return result == ComparisonResult::kLessThan ||
+ result == ComparisonResult::kEqual;
+ case Operation::kGreaterThan:
+ return result == ComparisonResult::kGreaterThan;
+ case Operation::kGreaterThanOrEqual:
+ return result == ComparisonResult::kGreaterThan ||
+ result == ComparisonResult::kEqual;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+std::ostream& operator<<(std::ostream& os, InstanceType instance_type) {
+ switch (instance_type) {
+#define WRITE_TYPE(TYPE) \
+ case TYPE: \
+ return os << #TYPE;
+ INSTANCE_TYPE_LIST(WRITE_TYPE)
+#undef WRITE_TYPE
+ }
+ UNREACHABLE();
+}
+
+Handle<FieldType> Object::OptimalType(Isolate* isolate,
+ Representation representation) {
+ if (representation.IsNone()) return FieldType::None(isolate);
+ if (FLAG_track_field_types) {
+ if (representation.IsHeapObject() && IsHeapObject()) {
+ // We can track only JavaScript objects with stable maps.
+ Handle<Map> map(HeapObject::cast(*this).map(), isolate);
+ if (map->is_stable() && map->IsJSReceiverMap()) {
+ return FieldType::Class(map, isolate);
+ }
+ }
+ }
+ return FieldType::Any(isolate);
+}
+
+Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
+ Representation representation) {
+ if (!representation.IsDouble()) return object;
+ auto result = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+ if (object->IsUninitialized(isolate)) {
+ result->set_value_as_bits(kHoleNanInt64);
+ } else if (object->IsMutableHeapNumber()) {
+ // Ensure that all bits of the double value are preserved.
+ result->set_value_as_bits(MutableHeapNumber::cast(*object).value_as_bits());
+ } else {
+ result->set_value(object->Number());
+ }
+ return result;
+}
+
+Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
+ Representation representation) {
+ DCHECK(!object->IsUninitialized(isolate));
+ if (!representation.IsDouble()) {
+ DCHECK(object->FitsRepresentation(representation));
+ return object;
+ }
+ return isolate->factory()->NewHeapNumber(
+ MutableHeapNumber::cast(*object).value());
+}
+
+MaybeHandle<JSReceiver> Object::ToObjectImpl(Isolate* isolate,
+ Handle<Object> object,
+ const char* method_name) {
+ DCHECK(!object->IsJSReceiver()); // Use ToObject() for fast path.
+ Handle<Context> native_context = isolate->native_context();
+ Handle<JSFunction> constructor;
+ if (object->IsSmi()) {
+ constructor = handle(native_context->number_function(), isolate);
+ } else {
+ int constructor_function_index =
+ Handle<HeapObject>::cast(object)->map().GetConstructorFunctionIndex();
+ if (constructor_function_index == Map::kNoConstructorFunctionIndex) {
+ if (method_name != nullptr) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(
+ MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(method_name)),
+ JSReceiver);
+ }
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
+ JSReceiver);
+ }
+ constructor = handle(
+ JSFunction::cast(native_context->get(constructor_function_index)),
+ isolate);
+ }
+ Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
+ Handle<JSValue>::cast(result)->set_value(*object);
+ return result;
+}
+
+// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
+// static
+MaybeHandle<JSReceiver> Object::ConvertReceiver(Isolate* isolate,
+ Handle<Object> object) {
+ if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
+ if (object->IsNullOrUndefined(isolate)) {
+ return isolate->global_proxy();
+ }
+ return Object::ToObject(isolate, object);
+}
+
+// static
+MaybeHandle<Object> Object::ConvertToNumberOrNumeric(Isolate* isolate,
+ Handle<Object> input,
+ Conversion mode) {
+ while (true) {
+ if (input->IsNumber()) {
+ return input;
+ }
+ if (input->IsString()) {
+ return String::ToNumber(isolate, Handle<String>::cast(input));
+ }
+ if (input->IsOddball()) {
+ return Oddball::ToNumber(isolate, Handle<Oddball>::cast(input));
+ }
+ if (input->IsSymbol()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToNumber),
+ Object);
+ }
+ if (input->IsBigInt()) {
+ if (mode == Conversion::kToNumeric) return input;
+ DCHECK_EQ(mode, Conversion::kToNumber);
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kBigIntToNumber),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input,
+ JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
+ ToPrimitiveHint::kNumber),
+ Object);
+ }
+}
+
+// static
+MaybeHandle<Object> Object::ConvertToInteger(Isolate* isolate,
+ Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input,
+ ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumber), Object);
+ if (input->IsSmi()) return input;
+ return isolate->factory()->NewNumber(DoubleToInteger(input->Number()));
+}
+
+// static
+MaybeHandle<Object> Object::ConvertToInt32(Isolate* isolate,
+ Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input,
+ ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumber), Object);
+ if (input->IsSmi()) return input;
+ return isolate->factory()->NewNumberFromInt(DoubleToInt32(input->Number()));
+}
+
+// static
+MaybeHandle<Object> Object::ConvertToUint32(Isolate* isolate,
+ Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input,
+ ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumber), Object);
+ if (input->IsSmi()) return handle(Smi::cast(*input).ToUint32Smi(), isolate);
+ return isolate->factory()->NewNumberFromUint(DoubleToUint32(input->Number()));
+}
+
+// static
+MaybeHandle<Name> Object::ConvertToName(Isolate* isolate,
+ Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input, Object::ToPrimitive(input, ToPrimitiveHint::kString),
+ Name);
+ if (input->IsName()) return Handle<Name>::cast(input);
+ return ToString(isolate, input);
+}
+
+// ES6 7.1.14
+// static
+MaybeHandle<Object> Object::ConvertToPropertyKey(Isolate* isolate,
+ Handle<Object> value) {
+ // 1. Let key be ToPrimitive(argument, hint String).
+ MaybeHandle<Object> maybe_key =
+ Object::ToPrimitive(value, ToPrimitiveHint::kString);
+ // 2. ReturnIfAbrupt(key).
+ Handle<Object> key;
+ if (!maybe_key.ToHandle(&key)) return key;
+ // 3. If Type(key) is Symbol, then return key.
+ if (key->IsSymbol()) return key;
+ // 4. Return ToString(key).
+ // Extending spec'ed behavior, we'd be happy to return an element index.
+ if (key->IsSmi()) return key;
+ if (key->IsHeapNumber()) {
+ uint32_t uint_value;
+ if (value->ToArrayLength(&uint_value) &&
+ uint_value <= static_cast<uint32_t>(Smi::kMaxValue)) {
+ return handle(Smi::FromInt(static_cast<int>(uint_value)), isolate);
+ }
+ }
+ return Object::ToString(isolate, key);
+}
+
+// static
+MaybeHandle<String> Object::ConvertToString(Isolate* isolate,
+ Handle<Object> input) {
+ while (true) {
+ if (input->IsOddball()) {
+ return handle(Handle<Oddball>::cast(input)->to_string(), isolate);
+ }
+ if (input->IsNumber()) {
+ return isolate->factory()->NumberToString(input);
+ }
+ if (input->IsSymbol()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToString),
+ String);
+ }
+ if (input->IsBigInt()) {
+ return BigInt::ToString(isolate, Handle<BigInt>::cast(input));
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input,
+ JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
+ ToPrimitiveHint::kString),
+ String);
+ // The previous isString() check happened in Object::ToString and thus we
+ // put it at the end of the loop in this helper.
+ if (input->IsString()) {
+ return Handle<String>::cast(input);
+ }
+ }
+}
+
+namespace {
+
+bool IsErrorObject(Isolate* isolate, Handle<Object> object) {
+ if (!object->IsJSReceiver()) return false;
+ Handle<Symbol> symbol = isolate->factory()->stack_trace_symbol();
+ return JSReceiver::HasOwnProperty(Handle<JSReceiver>::cast(object), symbol)
+ .FromMaybe(false);
+}
+
+Handle<String> AsStringOrEmpty(Isolate* isolate, Handle<Object> object) {
+ return object->IsString() ? Handle<String>::cast(object)
+ : isolate->factory()->empty_string();
+}
+
+Handle<String> NoSideEffectsErrorToString(Isolate* isolate,
+ Handle<Object> input) {
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(input);
+
+ Handle<Name> name_key = isolate->factory()->name_string();
+ Handle<Object> name = JSReceiver::GetDataProperty(receiver, name_key);
+ Handle<String> name_str = AsStringOrEmpty(isolate, name);
+
+ Handle<Name> msg_key = isolate->factory()->message_string();
+ Handle<Object> msg = JSReceiver::GetDataProperty(receiver, msg_key);
+ Handle<String> msg_str = AsStringOrEmpty(isolate, msg);
+
+ if (name_str->length() == 0) return msg_str;
+ if (msg_str->length() == 0) return name_str;
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(name_str);
+ builder.AppendCString(": ");
+ builder.AppendString(msg_str);
+
+ return builder.Finish().ToHandleChecked();
+}
+
+} // namespace
+
+// static
+Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
+ Handle<Object> input) {
+ DisallowJavascriptExecution no_js(isolate);
+
+ if (input->IsString() || input->IsNumber() || input->IsOddball()) {
+ return Object::ToString(isolate, input).ToHandleChecked();
+ } else if (input->IsBigInt()) {
+ MaybeHandle<String> maybe_string =
+ BigInt::ToString(isolate, Handle<BigInt>::cast(input), 10, kDontThrow);
+ Handle<String> result;
+ if (maybe_string.ToHandle(&result)) return result;
+ // BigInt-to-String conversion can fail on 32-bit platforms where
+ // String::kMaxLength is too small to fit this BigInt.
+ return isolate->factory()->NewStringFromStaticChars(
+ "<a very large BigInt>");
+ } else if (input->IsFunction()) {
+ // -- F u n c t i o n
+ Handle<String> fun_str;
+ if (input->IsJSBoundFunction()) {
+ fun_str = JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(input));
+ } else {
+ DCHECK(input->IsJSFunction());
+ fun_str = JSFunction::ToString(Handle<JSFunction>::cast(input));
+ }
+
+ if (fun_str->length() > 128) {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(isolate->factory()->NewSubString(fun_str, 0, 111));
+ builder.AppendCString("...<omitted>...");
+ builder.AppendString(isolate->factory()->NewSubString(
+ fun_str, fun_str->length() - 2, fun_str->length()));
+
+ return builder.Finish().ToHandleChecked();
+ }
+ return fun_str;
+ } else if (input->IsSymbol()) {
+ // -- S y m b o l
+ Handle<Symbol> symbol = Handle<Symbol>::cast(input);
+
+ if (symbol->is_private_name()) {
+ return Handle<String>(String::cast(symbol->name()), isolate);
+ }
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("Symbol(");
+ if (symbol->name().IsString()) {
+ builder.AppendString(handle(String::cast(symbol->name()), isolate));
+ }
+ builder.AppendCharacter(')');
+
+ return builder.Finish().ToHandleChecked();
+ } else if (input->IsJSReceiver()) {
+ // -- J S R e c e i v e r
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(input);
+ Handle<Object> to_string = JSReceiver::GetDataProperty(
+ receiver, isolate->factory()->toString_string());
+
+ if (IsErrorObject(isolate, input) ||
+ *to_string == *isolate->error_to_string()) {
+ // When internally formatting error objects, use a side-effects-free
+ // version of Error.prototype.toString independent of the actually
+ // installed toString method.
+ return NoSideEffectsErrorToString(isolate, input);
+ } else if (*to_string == *isolate->object_to_string()) {
+ Handle<Object> ctor = JSReceiver::GetDataProperty(
+ receiver, isolate->factory()->constructor_string());
+ if (ctor->IsFunction()) {
+ Handle<String> ctor_name;
+ if (ctor->IsJSBoundFunction()) {
+ ctor_name = JSBoundFunction::GetName(
+ isolate, Handle<JSBoundFunction>::cast(ctor))
+ .ToHandleChecked();
+ } else if (ctor->IsJSFunction()) {
+ Handle<Object> ctor_name_obj =
+ JSFunction::GetName(isolate, Handle<JSFunction>::cast(ctor));
+ ctor_name = AsStringOrEmpty(isolate, ctor_name_obj);
+ }
+
+ if (ctor_name->length() != 0) {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("#<");
+ builder.AppendString(ctor_name);
+ builder.AppendCString(">");
+
+ return builder.Finish().ToHandleChecked();
+ }
+ }
+ }
+ }
+
+ // At this point, input is either none of the above or a JSReceiver.
+
+ Handle<JSReceiver> receiver;
+ if (input->IsJSReceiver()) {
+ receiver = Handle<JSReceiver>::cast(input);
+ } else {
+ // This is the only case where Object::ToObject throws.
+ DCHECK(!input->IsSmi());
+ int constructor_function_index =
+ Handle<HeapObject>::cast(input)->map().GetConstructorFunctionIndex();
+ if (constructor_function_index == Map::kNoConstructorFunctionIndex) {
+ return isolate->factory()->NewStringFromAsciiChecked("[object Unknown]");
+ }
+
+ receiver = Object::ToObjectImpl(isolate, input).ToHandleChecked();
+ }
+
+ Handle<String> builtin_tag = handle(receiver->class_name(), isolate);
+ Handle<Object> tag_obj = JSReceiver::GetDataProperty(
+ receiver, isolate->factory()->to_string_tag_symbol());
+ Handle<String> tag =
+ tag_obj->IsString() ? Handle<String>::cast(tag_obj) : builtin_tag;
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("[object ");
+ builder.AppendString(tag);
+ builder.AppendCString("]");
+
+ return builder.Finish().ToHandleChecked();
+}
+
+// static
+MaybeHandle<Object> Object::ConvertToLength(Isolate* isolate,
+ Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(isolate, input), Object);
+ if (input->IsSmi()) {
+ int value = std::max(Smi::ToInt(*input), 0);
+ return handle(Smi::FromInt(value), isolate);
+ }
+ double len = DoubleToInteger(input->Number());
+ if (len <= 0.0) {
+ return handle(Smi::kZero, isolate);
+ } else if (len >= kMaxSafeInteger) {
+ len = kMaxSafeInteger;
+ }
+ return isolate->factory()->NewNumber(len);
+}
+
+// static
+MaybeHandle<Object> Object::ConvertToIndex(Isolate* isolate,
+ Handle<Object> input,
+ MessageTemplate error_index) {
+ if (input->IsUndefined(isolate)) return handle(Smi::kZero, isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(isolate, input), Object);
+ if (input->IsSmi() && Smi::ToInt(*input) >= 0) return input;
+ double len = DoubleToInteger(input->Number()) + 0.0;
+ auto js_len = isolate->factory()->NewNumber(len);
+ if (len < 0.0 || len > kMaxSafeInteger) {
+ THROW_NEW_ERROR(isolate, NewRangeError(error_index, js_len), Object);
+ }
+ return js_len;
+}
+
+bool Object::BooleanValue(Isolate* isolate) {
+ if (IsSmi()) return Smi::ToInt(*this) != 0;
+ DCHECK(IsHeapObject());
+ if (IsBoolean()) return IsTrue(isolate);
+ if (IsNullOrUndefined(isolate)) return false;
+ if (IsUndetectable()) return false; // Undetectable object is false.
+ if (IsString()) return String::cast(*this).length() != 0;
+ if (IsHeapNumber()) return DoubleToBoolean(HeapNumber::cast(*this).value());
+ if (IsBigInt()) return BigInt::cast(*this).ToBoolean();
+ return true;
+}
+
+Object Object::ToBoolean(Isolate* isolate) {
+ if (IsBoolean()) return *this;
+ return isolate->heap()->ToBoolean(BooleanValue(isolate));
+}
+
+namespace {
+
+// TODO(bmeurer): Maybe we should introduce a marker interface Number,
+// where we put all these methods at some point?
+ComparisonResult StrictNumberCompare(double x, double y) {
+ if (std::isnan(x) || std::isnan(y)) {
+ return ComparisonResult::kUndefined;
+ } else if (x < y) {
+ return ComparisonResult::kLessThan;
+ } else if (x > y) {
+ return ComparisonResult::kGreaterThan;
+ } else {
+ return ComparisonResult::kEqual;
+ }
+}
+
+// See Number case of ES6#sec-strict-equality-comparison
+// Returns false if x or y is NaN, treats -0.0 as equal to 0.0.
+bool StrictNumberEquals(double x, double y) {
+ // Must check explicitly for NaN's on Windows, but -0 works fine.
+ if (std::isnan(x) || std::isnan(y)) return false;
+ return x == y;
+}
+
+bool StrictNumberEquals(const Object x, const Object y) {
+ return StrictNumberEquals(x.Number(), y.Number());
+}
+
+bool StrictNumberEquals(Handle<Object> x, Handle<Object> y) {
+ return StrictNumberEquals(*x, *y);
+}
+
+ComparisonResult Reverse(ComparisonResult result) {
+ if (result == ComparisonResult::kLessThan) {
+ return ComparisonResult::kGreaterThan;
+ }
+ if (result == ComparisonResult::kGreaterThan) {
+ return ComparisonResult::kLessThan;
+ }
+ return result;
+}
+
+} // anonymous namespace
+
+// static
+Maybe<ComparisonResult> Object::Compare(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
+ // ES6 section 7.2.11 Abstract Relational Comparison step 3 and 4.
+ if (!Object::ToPrimitive(x, ToPrimitiveHint::kNumber).ToHandle(&x) ||
+ !Object::ToPrimitive(y, ToPrimitiveHint::kNumber).ToHandle(&y)) {
+ return Nothing<ComparisonResult>();
+ }
+ if (x->IsString() && y->IsString()) {
+ // ES6 section 7.2.11 Abstract Relational Comparison step 5.
+ return Just(String::Compare(isolate, Handle<String>::cast(x),
+ Handle<String>::cast(y)));
+ }
+ if (x->IsBigInt() && y->IsString()) {
+ return Just(BigInt::CompareToString(isolate, Handle<BigInt>::cast(x),
+ Handle<String>::cast(y)));
+ }
+ if (x->IsString() && y->IsBigInt()) {
+ return Just(Reverse(BigInt::CompareToString(
+ isolate, Handle<BigInt>::cast(y), Handle<String>::cast(x))));
+ }
+ // ES6 section 7.2.11 Abstract Relational Comparison step 6.
+ if (!Object::ToNumeric(isolate, x).ToHandle(&x) ||
+ !Object::ToNumeric(isolate, y).ToHandle(&y)) {
+ return Nothing<ComparisonResult>();
+ }
+
+ bool x_is_number = x->IsNumber();
+ bool y_is_number = y->IsNumber();
+ if (x_is_number && y_is_number) {
+ return Just(StrictNumberCompare(x->Number(), y->Number()));
+ } else if (!x_is_number && !y_is_number) {
+ return Just(BigInt::CompareToBigInt(Handle<BigInt>::cast(x),
+ Handle<BigInt>::cast(y)));
+ } else if (x_is_number) {
+ return Just(Reverse(BigInt::CompareToNumber(Handle<BigInt>::cast(y), x)));
+ } else {
+ return Just(BigInt::CompareToNumber(Handle<BigInt>::cast(x), y));
+ }
+}
+
+// static
+Maybe<bool> Object::Equals(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
+ // This is the generic version of Abstract Equality Comparison. Must be in
+ // sync with CodeStubAssembler::Equal.
+ while (true) {
+ if (x->IsNumber()) {
+ if (y->IsNumber()) {
+ return Just(StrictNumberEquals(x, y));
+ } else if (y->IsBoolean()) {
+ return Just(
+ StrictNumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
+ } else if (y->IsString()) {
+ return Just(StrictNumberEquals(
+ x, String::ToNumber(isolate, Handle<String>::cast(y))));
+ } else if (y->IsBigInt()) {
+ return Just(BigInt::EqualToNumber(Handle<BigInt>::cast(y), x));
+ } else if (y->IsJSReceiver()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(false);
+ }
+ } else if (x->IsString()) {
+ if (y->IsString()) {
+ return Just(String::Equals(isolate, Handle<String>::cast(x),
+ Handle<String>::cast(y)));
+ } else if (y->IsNumber()) {
+ x = String::ToNumber(isolate, Handle<String>::cast(x));
+ return Just(StrictNumberEquals(x, y));
+ } else if (y->IsBoolean()) {
+ x = String::ToNumber(isolate, Handle<String>::cast(x));
+ return Just(
+ StrictNumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
+ } else if (y->IsBigInt()) {
+ return Just(BigInt::EqualToString(isolate, Handle<BigInt>::cast(y),
+ Handle<String>::cast(x)));
+ } else if (y->IsJSReceiver()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(false);
+ }
+ } else if (x->IsBoolean()) {
+ if (y->IsOddball()) {
+ return Just(x.is_identical_to(y));
+ } else if (y->IsNumber()) {
+ return Just(
+ StrictNumberEquals(Handle<Oddball>::cast(x)->to_number(), *y));
+ } else if (y->IsString()) {
+ y = String::ToNumber(isolate, Handle<String>::cast(y));
+ return Just(
+ StrictNumberEquals(Handle<Oddball>::cast(x)->to_number(), *y));
+ } else if (y->IsBigInt()) {
+ x = Oddball::ToNumber(isolate, Handle<Oddball>::cast(x));
+ return Just(BigInt::EqualToNumber(Handle<BigInt>::cast(y), x));
+ } else if (y->IsJSReceiver()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ x = Oddball::ToNumber(isolate, Handle<Oddball>::cast(x));
+ } else {
+ return Just(false);
+ }
+ } else if (x->IsSymbol()) {
+ if (y->IsSymbol()) {
+ return Just(x.is_identical_to(y));
+ } else if (y->IsJSReceiver()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(false);
+ }
+ } else if (x->IsBigInt()) {
+ if (y->IsBigInt()) {
+ return Just(BigInt::EqualToBigInt(BigInt::cast(*x), BigInt::cast(*y)));
+ }
+ return Equals(isolate, y, x);
+ } else if (x->IsJSReceiver()) {
+ if (y->IsJSReceiver()) {
+ return Just(x.is_identical_to(y));
+ } else if (y->IsUndetectable()) {
+ return Just(x->IsUndetectable());
+ } else if (y->IsBoolean()) {
+ y = Oddball::ToNumber(isolate, Handle<Oddball>::cast(y));
+ } else if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(x))
+ .ToHandle(&x)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(x->IsUndetectable() && y->IsUndetectable());
+ }
+ }
+}
+
+bool Object::StrictEquals(Object that) {
+ if (this->IsNumber()) {
+ if (!that.IsNumber()) return false;
+ return StrictNumberEquals(*this, that);
+ } else if (this->IsString()) {
+ if (!that.IsString()) return false;
+ return String::cast(*this).Equals(String::cast(that));
+ } else if (this->IsBigInt()) {
+ if (!that.IsBigInt()) return false;
+ return BigInt::EqualToBigInt(BigInt::cast(*this), BigInt::cast(that));
+ }
+ return *this == that;
+}
+
+// static
+Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
+ if (object->IsNumber()) return isolate->factory()->number_string();
+ if (object->IsOddball())
+ return handle(Oddball::cast(*object).type_of(), isolate);
+ if (object->IsUndetectable()) {
+ return isolate->factory()->undefined_string();
+ }
+ if (object->IsString()) return isolate->factory()->string_string();
+ if (object->IsSymbol()) return isolate->factory()->symbol_string();
+ if (object->IsBigInt()) return isolate->factory()->bigint_string();
+ if (object->IsCallable()) return isolate->factory()->function_string();
+ return isolate->factory()->object_string();
+}
+
+// static
+MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs) {
+ if (lhs->IsNumber() && rhs->IsNumber()) {
+ return isolate->factory()->NewNumber(lhs->Number() + rhs->Number());
+ } else if (lhs->IsString() && rhs->IsString()) {
+ return isolate->factory()->NewConsString(Handle<String>::cast(lhs),
+ Handle<String>::cast(rhs));
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToPrimitive(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToPrimitive(rhs), Object);
+ if (lhs->IsString() || rhs->IsString()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToString(isolate, rhs),
+ Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToString(isolate, lhs),
+ Object);
+ return isolate->factory()->NewConsString(Handle<String>::cast(lhs),
+ Handle<String>::cast(rhs));
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(isolate, rhs),
+ Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(isolate, lhs),
+ Object);
+ return isolate->factory()->NewNumber(lhs->Number() + rhs->Number());
+}
+
+// static
+MaybeHandle<Object> Object::OrdinaryHasInstance(Isolate* isolate,
+ Handle<Object> callable,
+ Handle<Object> object) {
+ // The {callable} must have a [[Call]] internal method.
+ if (!callable->IsCallable()) return isolate->factory()->false_value();
+
+ // Check if {callable} is a bound function, and if so retrieve its
+ // [[BoundTargetFunction]] and use that instead of {callable}.
+ if (callable->IsJSBoundFunction()) {
+ Handle<Object> bound_callable(
+ Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
+ isolate);
+ return Object::InstanceOf(isolate, object, bound_callable);
+ }
+
+ // If {object} is not a receiver, return false.
+ if (!object->IsJSReceiver()) return isolate->factory()->false_value();
+
+ // Get the "prototype" of {callable}; raise an error if it's not a receiver.
+ Handle<Object> prototype;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, prototype,
+ Object::GetProperty(isolate, callable,
+ isolate->factory()->prototype_string()),
+ Object);
+ if (!prototype->IsJSReceiver()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype),
+ Object);
+ }
+
+ // Return whether or not {prototype} is in the prototype chain of {object}.
+ Maybe<bool> result = JSReceiver::HasInPrototypeChain(
+ isolate, Handle<JSReceiver>::cast(object), prototype);
+ if (result.IsNothing()) return MaybeHandle<Object>();
+ return isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// static
+MaybeHandle<Object> Object::InstanceOf(Isolate* isolate, Handle<Object> object,
+ Handle<Object> callable) {
+ // The {callable} must be a receiver.
+ if (!callable->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kNonObjectInInstanceOfCheck),
+ Object);
+ }
+
+ // Lookup the @@hasInstance method on {callable}.
+ Handle<Object> inst_of_handler;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, inst_of_handler,
+ Object::GetMethod(Handle<JSReceiver>::cast(callable),
+ isolate->factory()->has_instance_symbol()),
+ Object);
+ if (!inst_of_handler->IsUndefined(isolate)) {
+ // Call the {inst_of_handler} on the {callable}.
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, inst_of_handler, callable, 1, &object),
+ Object);
+ return isolate->factory()->ToBoolean(result->BooleanValue(isolate));
+ }
+
+ // The {callable} must have a [[Call]] internal method.
+ if (!callable->IsCallable()) {
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kNonCallableInInstanceOfCheck),
+ Object);
+ }
+
+ // Fall back to OrdinaryHasInstance with {callable} and {object}.
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, Object::OrdinaryHasInstance(isolate, callable, object),
+ Object);
+ return result;
+}
+
+// static
+MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
+ Handle<Name> name) {
+ Handle<Object> func;
+ Isolate* isolate = receiver->GetIsolate();
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, func, JSReceiver::GetProperty(isolate, receiver, name), Object);
+ if (func->IsNullOrUndefined(isolate)) {
+ return isolate->factory()->undefined_value();
+ }
+ if (!func->IsCallable()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kPropertyNotFunction, func,
+ name, receiver),
+ Object);
+ }
+ return func;
+}
+
+namespace {
+
+MaybeHandle<FixedArray> CreateListFromArrayLikeFastPath(
+ Isolate* isolate, Handle<Object> object, ElementTypes element_types) {
+ if (element_types == ElementTypes::kAll) {
+ if (object->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ uint32_t length;
+ if (!array->HasArrayPrototype(isolate) ||
+ !array->length().ToUint32(&length) || !array->HasFastElements() ||
+ !JSObject::PrototypeHasNoElements(isolate, *array)) {
+ return MaybeHandle<FixedArray>();
+ }
+ return array->GetElementsAccessor()->CreateListFromArrayLike(
+ isolate, array, length);
+ } else if (object->IsJSTypedArray()) {
+ Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(object);
+ size_t length = array->length();
+ if (array->WasDetached() ||
+ length > static_cast<size_t>(FixedArray::kMaxLength)) {
+ return MaybeHandle<FixedArray>();
+ }
+ return array->GetElementsAccessor()->CreateListFromArrayLike(
+ isolate, array, static_cast<uint32_t>(length));
+ }
+ }
+ return MaybeHandle<FixedArray>();
+}
+
+} // namespace
+
+// static
+MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
+ Isolate* isolate, Handle<Object> object, ElementTypes element_types) {
+ // Fast-path for JSArray and JSTypedArray.
+ MaybeHandle<FixedArray> fast_result =
+ CreateListFromArrayLikeFastPath(isolate, object, element_types);
+ if (!fast_result.is_null()) return fast_result;
+ // 1. ReturnIfAbrupt(object).
+ // 2. (default elementTypes -- not applicable.)
+ // 3. If Type(obj) is not Object, throw a TypeError exception.
+ if (!object->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "CreateListFromArrayLike")),
+ FixedArray);
+ }
+
+ // 4. Let len be ? ToLength(? Get(obj, "length")).
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+ Handle<Object> raw_length_number;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, raw_length_number,
+ Object::GetLengthFromArrayLike(isolate, receiver),
+ FixedArray);
+ uint32_t len;
+ if (!raw_length_number->ToUint32(&len) ||
+ len > static_cast<uint32_t>(FixedArray::kMaxLength)) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayLength),
+ FixedArray);
+ }
+ // 5. Let list be an empty List.
+ Handle<FixedArray> list = isolate->factory()->NewFixedArray(len);
+ // 6. Let index be 0.
+ // 7. Repeat while index < len:
+ for (uint32_t index = 0; index < len; ++index) {
+ // 7a. Let indexName be ToString(index).
+ // 7b. Let next be ? Get(obj, indexName).
+ Handle<Object> next;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, next,
+ JSReceiver::GetElement(isolate, receiver, index),
+ FixedArray);
+ switch (element_types) {
+ case ElementTypes::kAll:
+ // Nothing to do.
+ break;
+ case ElementTypes::kStringAndSymbol: {
+ // 7c. If Type(next) is not an element of elementTypes, throw a
+ // TypeError exception.
+ if (!next->IsName()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kNotPropertyName, next),
+ FixedArray);
+ }
+ // 7d. Append next as the last element of list.
+ // Internalize on the fly so we can use pointer identity later.
+ next = isolate->factory()->InternalizeName(Handle<Name>::cast(next));
+ break;
+ }
+ }
+ list->set(index, *next);
+ // 7e. Set index to index + 1. (See loop header.)
+ }
+ // 8. Return list.
+ return list;
+}
+
+// static
+MaybeHandle<Object> Object::GetLengthFromArrayLike(Isolate* isolate,
+ Handle<JSReceiver> object) {
+ Handle<Object> val;
+ Handle<Name> key = isolate->factory()->length_string();
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, val, JSReceiver::GetProperty(isolate, object, key), Object);
+ return Object::ToLength(isolate, val);
+}
+
+// static
+MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
+ OnNonExistent on_non_existent) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY: {
+ bool was_found;
+ Handle<Object> receiver = it->GetReceiver();
+ // In case of global IC, the receiver is the global object. Replace by
+ // the global proxy.
+ if (receiver->IsJSGlobalObject()) {
+ receiver = handle(JSGlobalObject::cast(*receiver).global_proxy(),
+ it->isolate());
+ }
+ MaybeHandle<Object> result =
+ JSProxy::GetProperty(it->isolate(), it->GetHolder<JSProxy>(),
+ it->GetName(), receiver, &was_found);
+ if (!was_found) it->NotFound();
+ return result;
+ }
+ case LookupIterator::INTERCEPTOR: {
+ bool done;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ it->isolate(), result,
+ JSObject::GetPropertyWithInterceptor(it, &done), Object);
+ if (done) return result;
+ break;
+ }
+ case LookupIterator::ACCESS_CHECK:
+ if (it->HasAccess()) break;
+ return JSObject::GetPropertyWithFailedAccessCheck(it);
+ case LookupIterator::ACCESSOR:
+ return GetPropertyWithAccessor(it);
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return it->isolate()->factory()->undefined_value();
+ case LookupIterator::DATA:
+ return it->GetDataValue();
+ }
+ }
+
+ if (on_non_existent == OnNonExistent::kThrowReferenceError) {
+ THROW_NEW_ERROR(it->isolate(),
+ NewReferenceError(MessageTemplate::kNotDefined, it->name()),
+ Object);
+ }
+ return it->isolate()->factory()->undefined_value();
+}
+
+// static
+MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
+ Handle<JSProxy> proxy,
+ Handle<Name> name,
+ Handle<Object> receiver,
+ bool* was_found) {
+ *was_found = true;
+
+ DCHECK(!name->IsPrivate());
+ STACK_CHECK(isolate, MaybeHandle<Object>());
+ Handle<Name> trap_name = isolate->factory()->get_string();
+ // 1. Assert: IsPropertyKey(P) is true.
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyRevoked, trap_name),
+ Object);
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ // 6. Let trap be ? GetMethod(handler, "get").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name), Object);
+ // 7. If trap is undefined, then
+ if (trap->IsUndefined(isolate)) {
+ // 7.a Return target.[[Get]](P, Receiver).
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, receiver, name, target);
+ MaybeHandle<Object> result = Object::GetProperty(&it);
+ *was_found = it.IsFound();
+ return result;
+ }
+ // 8. Let trapResult be ? Call(trap, handler, «target, P, Receiver»).
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target, name, receiver};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args), Object);
+
+ MaybeHandle<Object> result =
+ JSProxy::CheckGetSetTrapResult(isolate, name, target, trap_result, kGet);
+ if (result.is_null()) {
+ return result;
+ }
+
+ // 11. Return trap_result
+ return trap_result;
+}
+
+// static
+MaybeHandle<Object> JSProxy::CheckGetSetTrapResult(Isolate* isolate,
+ Handle<Name> name,
+ Handle<JSReceiver> target,
+ Handle<Object> trap_result,
+ AccessKind access_kind) {
+ // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> target_found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN_NULL(target_found);
+ // 10. If targetDesc is not undefined, then
+ if (target_found.FromJust()) {
+ // 10.a. If IsDataDescriptor(targetDesc) and targetDesc.[[Configurable]] is
+ // false and targetDesc.[[Writable]] is false, then
+ // 10.a.i. If SameValue(trapResult, targetDesc.[[Value]]) is false,
+ // throw a TypeError exception.
+ bool inconsistent = PropertyDescriptor::IsDataDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ !target_desc.writable() &&
+ !trap_result->SameValue(*target_desc.value());
+ if (inconsistent) {
+ if (access_kind == kGet) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyGetNonConfigurableData, name,
+ target_desc.value(), trap_result),
+ Object);
+ } else {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetFrozenData, name));
+ return MaybeHandle<Object>();
+ }
+ }
+ // 10.b. If IsAccessorDescriptor(targetDesc) and targetDesc.[[Configurable]]
+ // is false and targetDesc.[[Get]] is undefined, then
+ // 10.b.i. If trapResult is not undefined, throw a TypeError exception.
+ if (access_kind == kGet) {
+ inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ target_desc.get()->IsUndefined(isolate) &&
+ !trap_result->IsUndefined(isolate);
+ } else {
+ inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ target_desc.set()->IsUndefined(isolate);
+ }
+ if (inconsistent) {
+ if (access_kind == kGet) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyGetNonConfigurableAccessor,
+ name, trap_result),
+ Object);
+ } else {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetFrozenAccessor, name));
+ return MaybeHandle<Object>();
+ }
+ }
+ }
+ return isolate->factory()->undefined_value();
+}
+
+bool Object::ToInt32(int32_t* value) {
+ if (IsSmi()) {
+ *value = Smi::ToInt(*this);
+ return true;
+ }
+ if (IsHeapNumber()) {
+ double num = HeapNumber::cast(*this).value();
+ // Check range before conversion to avoid undefined behavior.
+ if (num >= kMinInt && num <= kMaxInt && FastI2D(FastD2I(num)) == num) {
+ *value = FastD2I(num);
+ return true;
+ }
+ }
+ return false;
+}
+
+// static constexpr object declarations need a definition to make the
+// compiler happy.
+constexpr Object Smi::kZero;
+V8_EXPORT_PRIVATE constexpr Object SharedFunctionInfo::kNoSharedNameSentinel;
+
+Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
+ Isolate* isolate, Handle<FunctionTemplateInfo> info,
+ MaybeHandle<Name> maybe_name) {
+ Object current_info = info->shared_function_info();
+ if (current_info.IsSharedFunctionInfo()) {
+ return handle(SharedFunctionInfo::cast(current_info), isolate);
+ }
+ Handle<Name> name;
+ Handle<String> name_string;
+ if (maybe_name.ToHandle(&name) && name->IsString()) {
+ name_string = Handle<String>::cast(name);
+ } else if (info->class_name().IsString()) {
+ name_string = handle(String::cast(info->class_name()), isolate);
+ } else {
+ name_string = isolate->factory()->empty_string();
+ }
+ FunctionKind function_kind;
+ if (info->remove_prototype()) {
+ function_kind = kConciseMethod;
+ } else {
+ function_kind = kNormalFunction;
+ }
+ Handle<SharedFunctionInfo> result =
+ isolate->factory()->NewSharedFunctionInfoForApiFunction(name_string, info,
+ function_kind);
+
+ result->set_length(info->length());
+ result->DontAdaptArguments();
+ DCHECK(result->IsApiFunction());
+
+ info->set_shared_function_info(*result);
+ return result;
+}
+
+bool FunctionTemplateInfo::IsTemplateFor(Map map) {
+ // There is a constraint on the object; check.
+ if (!map.IsJSObjectMap()) return false;
+ // Fetch the constructor function of the object.
+ Object cons_obj = map.GetConstructor();
+ Object type;
+ if (cons_obj.IsJSFunction()) {
+ JSFunction fun = JSFunction::cast(cons_obj);
+ type = fun.shared().function_data();
+ } else if (cons_obj.IsFunctionTemplateInfo()) {
+ type = FunctionTemplateInfo::cast(cons_obj);
+ } else {
+ return false;
+ }
+ // Iterate through the chain of inheriting function templates to
+ // see if the required one occurs.
+ while (type.IsFunctionTemplateInfo()) {
+ if (type == *this) return true;
+ type = FunctionTemplateInfo::cast(type).GetParentTemplate();
+ }
+ // Didn't find the required type in the inheritance chain.
+ return false;
+}
+
+// static
+FunctionTemplateRareData FunctionTemplateInfo::AllocateFunctionTemplateRareData(
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
+ DCHECK(function_template_info->rare_data().IsUndefined(isolate));
+ Handle<Struct> struct_obj = isolate->factory()->NewStruct(
+ FUNCTION_TEMPLATE_RARE_DATA_TYPE, AllocationType::kOld);
+ Handle<FunctionTemplateRareData> rare_data =
+ i::Handle<FunctionTemplateRareData>::cast(struct_obj);
+ function_template_info->set_rare_data(*rare_data);
+ return *rare_data;
+}
+
+// static
+Handle<TemplateList> TemplateList::New(Isolate* isolate, int size) {
+ Handle<FixedArray> list =
+ isolate->factory()->NewFixedArray(kLengthIndex + size);
+ list->set(kLengthIndex, Smi::kZero);
+ return Handle<TemplateList>::cast(list);
+}
+
+// static
+Handle<TemplateList> TemplateList::Add(Isolate* isolate,
+ Handle<TemplateList> list,
+ Handle<i::Object> value) {
+ STATIC_ASSERT(kFirstElementIndex == 1);
+ int index = list->length() + 1;
+ Handle<i::FixedArray> fixed_array = Handle<FixedArray>::cast(list);
+ fixed_array = FixedArray::SetAndGrow(isolate, fixed_array, index, value);
+ fixed_array->set(kLengthIndex, Smi::FromInt(index));
+ return Handle<TemplateList>::cast(fixed_array);
+}
+
+// ES6 9.5.1
+// static
+MaybeHandle<HeapObject> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<String> trap_name = isolate->factory()->getPrototypeOf_string();
+
+ STACK_CHECK(isolate, MaybeHandle<HeapObject>());
+
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot.
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot.
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyRevoked, trap_name),
+ HeapObject);
+ }
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ // 5. Let trap be ? GetMethod(handler, "getPrototypeOf").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, trap,
+ Object::GetMethod(handler, trap_name), HeapObject);
+ // 6. If trap is undefined, then return target.[[GetPrototypeOf]]().
+ if (trap->IsUndefined(isolate)) {
+ return JSReceiver::GetPrototype(isolate, target);
+ }
+ // 7. Let handlerProto be ? Call(trap, handler, «target»).
+ Handle<Object> argv[] = {target};
+ Handle<Object> handler_proto;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, handler_proto,
+ Execution::Call(isolate, trap, handler, arraysize(argv), argv),
+ HeapObject);
+ // 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError.
+ if (!(handler_proto->IsJSReceiver() || handler_proto->IsNull(isolate))) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyGetPrototypeOfInvalid),
+ HeapObject);
+ }
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> is_extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(is_extensible, MaybeHandle<HeapObject>());
+ // 10. If extensibleTarget is true, return handlerProto.
+ if (is_extensible.FromJust()) return Handle<HeapObject>::cast(handler_proto);
+ // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
+ Handle<HeapObject> target_proto;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, target_proto,
+ JSReceiver::GetPrototype(isolate, target),
+ HeapObject);
+ // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError.
+ if (!handler_proto->SameValue(*target_proto)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyGetPrototypeOfNonExtensible),
+ HeapObject);
+ }
+ // 13. Return handlerProto.
+ return Handle<HeapObject>::cast(handler_proto);
+}
+
+MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
+ Isolate* isolate = it->isolate();
+ Handle<Object> structure = it->GetAccessors();
+ Handle<Object> receiver = it->GetReceiver();
+ // In case of global IC, the receiver is the global object. Replace by the
+ // global proxy.
+ if (receiver->IsJSGlobalObject()) {
+ receiver = handle(JSGlobalObject::cast(*receiver).global_proxy(), isolate);
+ }
+
+ // We should never get here to initialize a const with the hole value since a
+ // const declaration would conflict with the getter.
+ DCHECK(!structure->IsForeign());
+
+ // API style callbacks.
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ if (structure->IsAccessorInfo()) {
+ Handle<Name> name = it->GetName();
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure);
+ if (!info->IsCompatibleReceiver(*receiver)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ name, receiver),
+ Object);
+ }
+
+ if (!info->has_getter()) return isolate->factory()->undefined_value();
+
+ if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Object);
+ }
+
+ PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
+ Just(kDontThrow));
+ Handle<Object> result = args.CallAccessorGetter(info, name);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (result.is_null()) return isolate->factory()->undefined_value();
+ Handle<Object> reboxed_result = handle(*result, isolate);
+ if (info->replace_on_access() && receiver->IsJSReceiver()) {
+ RETURN_ON_EXCEPTION(isolate,
+ Accessors::ReplaceAccessorWithDataProperty(
+ receiver, holder, name, result),
+ Object);
+ }
+ return reboxed_result;
+ }
+
+ // AccessorPair with 'cached' private property.
+ if (it->TryLookupCachedProperty()) {
+ return Object::GetProperty(it);
+ }
+
+ // Regular accessor.
+ Handle<Object> getter(AccessorPair::cast(*structure).getter(), isolate);
+ if (getter->IsFunctionTemplateInfo()) {
+ SaveAndSwitchContext save(isolate, *holder->GetCreationContext());
+ return Builtins::InvokeApiFunction(
+ isolate, false, Handle<FunctionTemplateInfo>::cast(getter), receiver, 0,
+ nullptr, isolate->factory()->undefined_value());
+ } else if (getter->IsCallable()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return Object::GetPropertyWithDefinedGetter(
+ receiver, Handle<JSReceiver>::cast(getter));
+ }
+ // Getter is not a function.
+ return isolate->factory()->undefined_value();
+}
+
+// static
+Address AccessorInfo::redirect(Address address, AccessorComponent component) {
+ ApiFunction fun(address);
+ DCHECK_EQ(ACCESSOR_GETTER, component);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ return ExternalReference::Create(&fun, type).address();
+}
+
+Address AccessorInfo::redirected_getter() const {
+ Address accessor = v8::ToCData<Address>(getter());
+ if (accessor == kNullAddress) return kNullAddress;
+ return redirect(accessor, ACCESSOR_GETTER);
+}
+
+Address CallHandlerInfo::redirected_callback() const {
+ Address address = v8::ToCData<Address>(callback());
+ ApiFunction fun(address);
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+ return ExternalReference::Create(&fun, type).address();
+}
+
+bool AccessorInfo::IsCompatibleReceiverMap(Handle<AccessorInfo> info,
+ Handle<Map> map) {
+ if (!info->HasExpectedReceiverType()) return true;
+ if (!map->IsJSObjectMap()) return false;
+ return FunctionTemplateInfo::cast(info->expected_receiver_type())
+ .IsTemplateFor(*map);
+}
+
+Maybe<bool> Object::SetPropertyWithAccessor(
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> maybe_should_throw) {
+ Isolate* isolate = it->isolate();
+ Handle<Object> structure = it->GetAccessors();
+ Handle<Object> receiver = it->GetReceiver();
+ // In case of global IC, the receiver is the global object. Replace by the
+ // global proxy.
+ if (receiver->IsJSGlobalObject()) {
+ receiver = handle(JSGlobalObject::cast(*receiver).global_proxy(), isolate);
+ }
+
+ // We should never get here to initialize a const with the hole value since a
+ // const declaration would conflict with the setter.
+ DCHECK(!structure->IsForeign());
+
+ // API style callbacks.
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ if (structure->IsAccessorInfo()) {
+ Handle<Name> name = it->GetName();
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure);
+ if (!info->IsCompatibleReceiver(*receiver)) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, name, receiver));
+ return Nothing<bool>();
+ }
+
+ if (!info->has_setter()) {
+ // TODO(verwaest): We should not get here anymore once all AccessorInfos
+ // are marked as special_data_property. They cannot both be writable and
+ // not have a setter.
+ return Just(true);
+ }
+
+ if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, receiver, Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
+
+ // The actual type of setter callback is either
+ // v8::AccessorNameSetterCallback or
+ // i::Accesors::AccessorNameBooleanSetterCallback, depending on whether the
+ // AccessorInfo was created by the API or internally (see accessors.cc).
+ // Here we handle both cases using GenericNamedPropertySetterCallback and
+ // its Call method.
+ PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
+ maybe_should_throw);
+ Handle<Object> result = args.CallAccessorSetter(info, name, value);
+ // In the case of AccessorNameSetterCallback, we know that the result value
+ // cannot have been set, so the result of Call will be null. In the case of
+ // AccessorNameBooleanSetterCallback, the result will either be null
+ // (signalling an exception) or a boolean Oddball.
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ if (result.is_null()) return Just(true);
+ DCHECK(result->BooleanValue(isolate) ||
+ GetShouldThrow(isolate, maybe_should_throw) == kDontThrow);
+ return Just(result->BooleanValue(isolate));
+ }
+
+ // Regular accessor.
+ Handle<Object> setter(AccessorPair::cast(*structure).setter(), isolate);
+ if (setter->IsFunctionTemplateInfo()) {
+ SaveAndSwitchContext save(isolate, *holder->GetCreationContext());
+ Handle<Object> argv[] = {value};
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate,
+ Builtins::InvokeApiFunction(isolate, false,
+ Handle<FunctionTemplateInfo>::cast(setter),
+ receiver, arraysize(argv), argv,
+ isolate->factory()->undefined_value()),
+ Nothing<bool>());
+ return Just(true);
+ } else if (setter->IsCallable()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return SetPropertyWithDefinedSetter(
+ receiver, Handle<JSReceiver>::cast(setter), value, maybe_should_throw);
+ }
+
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, maybe_should_throw),
+ NewTypeError(MessageTemplate::kNoSetterInCallback,
+ it->GetName(), it->GetHolder<JSObject>()));
+}
+
+MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
+ Handle<Object> receiver, Handle<JSReceiver> getter) {
+ Isolate* isolate = getter->GetIsolate();
+
+ // Platforms with simulators like arm/arm64 expose a funny issue. If the
+ // simulator has a separate JS stack pointer from the C++ stack pointer, it
+ // can miss C++ stack overflows in the stack guard at the start of JavaScript
+ // functions. It would be very expensive to check the C++ stack pointer at
+ // that location. The best solution seems to be to break the impasse by
+ // adding checks at possible recursion points. What's more, we don't put
+ // this stack check behind the USE_SIMULATOR define in order to keep
+ // behavior the same between hardware and simulators.
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
+ isolate->StackOverflow();
+ return MaybeHandle<Object>();
+ }
+
+ return Execution::Call(isolate, getter, receiver, 0, nullptr);
+}
+
+Maybe<bool> Object::SetPropertyWithDefinedSetter(
+ Handle<Object> receiver, Handle<JSReceiver> setter, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw) {
+ Isolate* isolate = setter->GetIsolate();
+
+ Handle<Object> argv[] = {value};
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate,
+ Execution::Call(isolate, setter, receiver, arraysize(argv), argv),
+ Nothing<bool>());
+ return Just(true);
+}
+
+Map Object::GetPrototypeChainRootMap(Isolate* isolate) const {
+ DisallowHeapAllocation no_alloc;
+ if (IsSmi()) {
+ Context native_context = isolate->context().native_context();
+ return native_context.number_function().initial_map();
+ }
+
+ const HeapObject heap_object = HeapObject::cast(*this);
+ return heap_object.map().GetPrototypeChainRootMap(isolate);
+}
+
+Smi Object::GetOrCreateHash(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+ Object hash = Object::GetSimpleHash(*this);
+ if (hash.IsSmi()) return Smi::cast(hash);
+
+ DCHECK(IsJSReceiver());
+ return JSReceiver::cast(*this).GetOrCreateIdentityHash(isolate);
+}
+
+bool Object::SameValue(Object other) {
+ if (other == *this) return true;
+
+ if (IsNumber() && other.IsNumber()) {
+ return SameNumberValue(Number(), other.Number());
+ }
+ if (IsString() && other.IsString()) {
+ return String::cast(*this).Equals(String::cast(other));
+ }
+ if (IsBigInt() && other.IsBigInt()) {
+ return BigInt::EqualToBigInt(BigInt::cast(*this), BigInt::cast(other));
+ }
+ return false;
+}
+
+bool Object::SameValueZero(Object other) {
+ if (other == *this) return true;
+
+ if (IsNumber() && other.IsNumber()) {
+ double this_value = Number();
+ double other_value = other.Number();
+ // +0 == -0 is true
+ return this_value == other_value ||
+ (std::isnan(this_value) && std::isnan(other_value));
+ }
+ if (IsString() && other.IsString()) {
+ return String::cast(*this).Equals(String::cast(other));
+ }
+ if (IsBigInt() && other.IsBigInt()) {
+ return BigInt::EqualToBigInt(BigInt::cast(*this), BigInt::cast(other));
+ }
+ return false;
+}
+
+MaybeHandle<Object> Object::ArraySpeciesConstructor(
+ Isolate* isolate, Handle<Object> original_array) {
+ Handle<Object> default_species = isolate->array_function();
+ if (original_array->IsJSArray() &&
+ Handle<JSArray>::cast(original_array)->HasArrayPrototype(isolate) &&
+ isolate->IsArraySpeciesLookupChainIntact()) {
+ return default_species;
+ }
+ Handle<Object> constructor = isolate->factory()->undefined_value();
+ Maybe<bool> is_array = Object::IsArray(original_array);
+ MAYBE_RETURN_NULL(is_array);
+ if (is_array.FromJust()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, constructor,
+ Object::GetProperty(isolate, original_array,
+ isolate->factory()->constructor_string()),
+ Object);
+ if (constructor->IsConstructor()) {
+ Handle<Context> constructor_context;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, constructor_context,
+ JSReceiver::GetFunctionRealm(Handle<JSReceiver>::cast(constructor)),
+ Object);
+ if (*constructor_context != *isolate->native_context() &&
+ *constructor == constructor_context->array_function()) {
+ constructor = isolate->factory()->undefined_value();
+ }
+ }
+ if (constructor->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, constructor,
+ JSReceiver::GetProperty(isolate,
+ Handle<JSReceiver>::cast(constructor),
+ isolate->factory()->species_symbol()),
+ Object);
+ if (constructor->IsNull(isolate)) {
+ constructor = isolate->factory()->undefined_value();
+ }
+ }
+ }
+ if (constructor->IsUndefined(isolate)) {
+ return default_species;
+ } else {
+ if (!constructor->IsConstructor()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kSpeciesNotConstructor),
+ Object);
+ }
+ return constructor;
+ }
+}
+
+// ES6 section 7.3.20 SpeciesConstructor ( O, defaultConstructor )
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> Object::SpeciesConstructor(
+ Isolate* isolate, Handle<JSReceiver> recv,
+ Handle<JSFunction> default_ctor) {
+ Handle<Object> ctor_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, ctor_obj,
+ JSObject::GetProperty(isolate, recv,
+ isolate->factory()->constructor_string()),
+ Object);
+
+ if (ctor_obj->IsUndefined(isolate)) return default_ctor;
+
+ if (!ctor_obj->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kConstructorNotReceiver),
+ Object);
+ }
+
+ Handle<JSReceiver> ctor = Handle<JSReceiver>::cast(ctor_obj);
+
+ Handle<Object> species;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, species,
+ JSObject::GetProperty(isolate, ctor,
+ isolate->factory()->species_symbol()),
+ Object);
+
+ if (species->IsNullOrUndefined(isolate)) {
+ return default_ctor;
+ }
+
+ if (species->IsConstructor()) return species;
+
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kSpeciesNotConstructor), Object);
+}
+
+bool Object::IterationHasObservableEffects() {
+ // Check that this object is an array.
+ if (!IsJSArray()) return true;
+ JSArray array = JSArray::cast(*this);
+ Isolate* isolate = array.GetIsolate();
+
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
+ if (isolate->force_slow_path()) return true;
+#endif
+
+ // Check that we have the original ArrayPrototype.
+ if (!array.map().prototype().IsJSObject()) return true;
+ JSObject array_proto = JSObject::cast(array.map().prototype());
+ if (!isolate->is_initial_array_prototype(array_proto)) return true;
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ if (!isolate->IsArrayIteratorLookupChainIntact()) return true;
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ ElementsKind array_kind = array.GetElementsKind();
+ if (IsFastPackedElementsKind(array_kind)) return false;
+
+ // For FastHoley kinds, an element access on a hole would cause a lookup on
+ // the prototype. This could have different results if the prototype has been
+ // changed.
+ if (IsHoleyElementsKind(array_kind) &&
+ isolate->IsNoElementsProtectorIntact()) {
+ return false;
+ }
+ return true;
+}
+
+void Object::ShortPrint(FILE* out) const {
+ OFStream os(out);
+ os << Brief(*this);
+}
+
+void Object::ShortPrint(StringStream* accumulator) const {
+ std::ostringstream os;
+ os << Brief(*this);
+ accumulator->Add(os.str().c_str());
+}
+
+void Object::ShortPrint(std::ostream& os) const { os << Brief(*this); }
+
+std::ostream& operator<<(std::ostream& os, const Object& obj) {
+ obj.ShortPrint(os);
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const Brief& v) {
+ MaybeObject maybe_object(v.value);
+ Smi smi;
+ HeapObject heap_object;
+ if (maybe_object->ToSmi(&smi)) {
+ smi.SmiPrint(os);
+ } else if (maybe_object->IsCleared()) {
+ os << "[cleared]";
+ } else if (maybe_object->GetHeapObjectIfWeak(&heap_object)) {
+ os << "[weak] ";
+ heap_object.HeapObjectShortPrint(os);
+ } else if (maybe_object->GetHeapObjectIfStrong(&heap_object)) {
+ heap_object.HeapObjectShortPrint(os);
+ } else {
+ UNREACHABLE();
+ }
+ return os;
+}
+
+void Smi::SmiPrint(std::ostream& os) const { // NOLINT
+ os << value();
+}
+
+void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
+ os << AsHex::Address(this->ptr()) << " ";
+
+ if (IsString()) {
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ String::cast(*this).StringShortPrint(&accumulator);
+ os << accumulator.ToCString().get();
+ return;
+ }
+ if (IsJSObject()) {
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ JSObject::cast(*this).JSObjectShortPrint(&accumulator);
+ os << accumulator.ToCString().get();
+ return;
+ }
+ switch (map().instance_type()) {
+ case MAP_TYPE: {
+ os << "<Map";
+ Map mapInstance = Map::cast(*this);
+ if (mapInstance.IsJSObjectMap()) {
+ os << "(" << ElementsKindToString(mapInstance.elements_kind()) << ")";
+ } else if (mapInstance.instance_size() != kVariableSizeSentinel) {
+ os << "[" << mapInstance.instance_size() << "]";
+ }
+ os << ">";
+ } break;
+ case AWAIT_CONTEXT_TYPE: {
+ os << "<AwaitContext generator= ";
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ Context::cast(*this).extension().ShortPrint(&accumulator);
+ os << accumulator.ToCString().get();
+ os << '>';
+ break;
+ }
+ case BLOCK_CONTEXT_TYPE:
+ os << "<BlockContext[" << Context::cast(*this).length() << "]>";
+ break;
+ case CATCH_CONTEXT_TYPE:
+ os << "<CatchContext[" << Context::cast(*this).length() << "]>";
+ break;
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ os << "<DebugEvaluateContext[" << Context::cast(*this).length() << "]>";
+ break;
+ case EVAL_CONTEXT_TYPE:
+ os << "<EvalContext[" << Context::cast(*this).length() << "]>";
+ break;
+ case FUNCTION_CONTEXT_TYPE:
+ os << "<FunctionContext[" << Context::cast(*this).length() << "]>";
+ break;
+ case MODULE_CONTEXT_TYPE:
+ os << "<ModuleContext[" << Context::cast(*this).length() << "]>";
+ break;
+ case NATIVE_CONTEXT_TYPE:
+ os << "<NativeContext[" << Context::cast(*this).length() << "]>";
+ break;
+ case SCRIPT_CONTEXT_TYPE:
+ os << "<ScriptContext[" << Context::cast(*this).length() << "]>";
+ break;
+ case WITH_CONTEXT_TYPE:
+ os << "<WithContext[" << Context::cast(*this).length() << "]>";
+ break;
+ case SCRIPT_CONTEXT_TABLE_TYPE:
+ os << "<ScriptContextTable[" << FixedArray::cast(*this).length() << "]>";
+ break;
+ case HASH_TABLE_TYPE:
+ os << "<HashTable[" << FixedArray::cast(*this).length() << "]>";
+ break;
+ case ORDERED_HASH_MAP_TYPE:
+ os << "<OrderedHashMap[" << FixedArray::cast(*this).length() << "]>";
+ break;
+ case ORDERED_HASH_SET_TYPE:
+ os << "<OrderedHashSet[" << FixedArray::cast(*this).length() << "]>";
+ break;
+ case ORDERED_NAME_DICTIONARY_TYPE:
+ os << "<OrderedNameDictionary[" << FixedArray::cast(*this).length()
+ << "]>";
+ break;
+ case NAME_DICTIONARY_TYPE:
+ os << "<NameDictionary[" << FixedArray::cast(*this).length() << "]>";
+ break;
+ case GLOBAL_DICTIONARY_TYPE:
+ os << "<GlobalDictionary[" << FixedArray::cast(*this).length() << "]>";
+ break;
+ case NUMBER_DICTIONARY_TYPE:
+ os << "<NumberDictionary[" << FixedArray::cast(*this).length() << "]>";
+ break;
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ os << "<SimpleNumberDictionary[" << FixedArray::cast(*this).length()
+ << "]>";
+ break;
+ case STRING_TABLE_TYPE:
+ os << "<StringTable[" << FixedArray::cast(*this).length() << "]>";
+ break;
+ case FIXED_ARRAY_TYPE:
+ os << "<FixedArray[" << FixedArray::cast(*this).length() << "]>";
+ break;
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ os << "<ObjectBoilerplateDescription[" << FixedArray::cast(*this).length()
+ << "]>";
+ break;
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ os << "<FixedDoubleArray[" << FixedDoubleArray::cast(*this).length()
+ << "]>";
+ break;
+ case BYTE_ARRAY_TYPE:
+ os << "<ByteArray[" << ByteArray::cast(*this).length() << "]>";
+ break;
+ case BYTECODE_ARRAY_TYPE:
+ os << "<BytecodeArray[" << BytecodeArray::cast(*this).length() << "]>";
+ break;
+ case DESCRIPTOR_ARRAY_TYPE:
+ os << "<DescriptorArray["
+ << DescriptorArray::cast(*this).number_of_descriptors() << "]>";
+ break;
+ case TRANSITION_ARRAY_TYPE:
+ os << "<TransitionArray[" << TransitionArray::cast(*this).length()
+ << "]>";
+ break;
+ case PROPERTY_ARRAY_TYPE:
+ os << "<PropertyArray[" << PropertyArray::cast(*this).length() << "]>";
+ break;
+ case FEEDBACK_CELL_TYPE: {
+ {
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ os << "<FeedbackCell[";
+ if (map() == roots.no_closures_cell_map()) {
+ os << "no feedback";
+ } else if (map() == roots.no_closures_cell_map()) {
+ os << "no closures";
+ } else if (map() == roots.one_closure_cell_map()) {
+ os << "one closure";
+ } else if (map() == roots.many_closures_cell_map()) {
+ os << "many closures";
+ } else {
+ os << "!!!INVALID MAP!!!";
+ }
+ os << "]>";
+ }
+ break;
+ }
+ case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
+ os << "<ClosureFeedbackCellArray["
+ << ClosureFeedbackCellArray::cast(*this).length() << "]>";
+ break;
+ case FEEDBACK_VECTOR_TYPE:
+ os << "<FeedbackVector[" << FeedbackVector::cast(*this).length() << "]>";
+ break;
+ case FREE_SPACE_TYPE:
+ os << "<FreeSpace[" << FreeSpace::cast(*this).size() << "]>";
+ break;
+
+ case PREPARSE_DATA_TYPE: {
+ PreparseData data = PreparseData::cast(*this);
+ os << "<PreparseData[data=" << data.data_length()
+ << " children=" << data.children_length() << "]>";
+ break;
+ }
+
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE: {
+ UncompiledDataWithoutPreparseData data =
+ UncompiledDataWithoutPreparseData::cast(*this);
+ os << "<UncompiledDataWithoutPreparseData (" << data.start_position()
+ << ", " << data.end_position() << ")]>";
+ break;
+ }
+
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE: {
+ UncompiledDataWithPreparseData data =
+ UncompiledDataWithPreparseData::cast(*this);
+ os << "<UncompiledDataWithPreparseData (" << data.start_position() << ", "
+ << data.end_position() << ") preparsed=" << Brief(data.preparse_data())
+ << ">";
+ break;
+ }
+
+ case SHARED_FUNCTION_INFO_TYPE: {
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(*this);
+ std::unique_ptr<char[]> debug_name = shared.DebugName().ToCString();
+ if (debug_name[0] != 0) {
+ os << "<SharedFunctionInfo " << debug_name.get() << ">";
+ } else {
+ os << "<SharedFunctionInfo>";
+ }
+ break;
+ }
+ case JS_MESSAGE_OBJECT_TYPE:
+ os << "<JSMessageObject>";
+ break;
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
+ os << "<" #Name; \
+ Name::cast(*this).BriefPrintDetails(os); \
+ os << ">"; \
+ break;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ case ALLOCATION_SITE_TYPE: {
+ os << "<AllocationSite";
+ AllocationSite::cast(*this).BriefPrintDetails(os);
+ os << ">";
+ break;
+ }
+ case SCOPE_INFO_TYPE: {
+ ScopeInfo scope = ScopeInfo::cast(*this);
+ os << "<ScopeInfo";
+ if (scope.length()) os << " " << scope.scope_type() << " ";
+ os << "[" << scope.length() << "]>";
+ break;
+ }
+ case CODE_TYPE: {
+ Code code = Code::cast(*this);
+ os << "<Code " << Code::Kind2String(code.kind());
+ if (code.is_builtin()) {
+ os << " " << Builtins::name(code.builtin_index());
+ }
+ os << ">";
+ break;
+ }
+ case ODDBALL_TYPE: {
+ if (IsUndefined()) {
+ os << "<undefined>";
+ } else if (IsTheHole()) {
+ os << "<the_hole>";
+ } else if (IsNull()) {
+ os << "<null>";
+ } else if (IsTrue()) {
+ os << "<true>";
+ } else if (IsFalse()) {
+ os << "<false>";
+ } else {
+ os << "<Odd Oddball: ";
+ os << Oddball::cast(*this).to_string().ToCString().get();
+ os << ">";
+ }
+ break;
+ }
+ case SYMBOL_TYPE: {
+ Symbol symbol = Symbol::cast(*this);
+ symbol.SymbolShortPrint(os);
+ break;
+ }
+ case HEAP_NUMBER_TYPE: {
+ os << "<HeapNumber ";
+ HeapNumber::cast(*this).HeapNumberPrint(os);
+ os << ">";
+ break;
+ }
+ case MUTABLE_HEAP_NUMBER_TYPE: {
+ os << "<MutableHeapNumber ";
+ MutableHeapNumber::cast(*this).MutableHeapNumberPrint(os);
+ os << '>';
+ break;
+ }
+ case BIGINT_TYPE: {
+ os << "<BigInt ";
+ BigInt::cast(*this).BigIntShortPrint(os);
+ os << ">";
+ break;
+ }
+ case JS_PROXY_TYPE:
+ os << "<JSProxy>";
+ break;
+ case FOREIGN_TYPE:
+ os << "<Foreign>";
+ break;
+ case CELL_TYPE: {
+ os << "<Cell value= ";
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ Cell::cast(*this).value().ShortPrint(&accumulator);
+ os << accumulator.ToCString().get();
+ os << '>';
+ break;
+ }
+ case PROPERTY_CELL_TYPE: {
+ PropertyCell cell = PropertyCell::cast(*this);
+ os << "<PropertyCell name=";
+ cell.name().ShortPrint(os);
+ os << " value=";
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ cell.value().ShortPrint(&accumulator);
+ os << accumulator.ToCString().get();
+ os << '>';
+ break;
+ }
+ case CALL_HANDLER_INFO_TYPE: {
+ CallHandlerInfo info = CallHandlerInfo::cast(*this);
+ os << "<CallHandlerInfo ";
+ os << "callback= " << Brief(info.callback());
+ os << ", js_callback= " << Brief(info.js_callback());
+ os << ", data= " << Brief(info.data());
+ if (info.IsSideEffectFreeCallHandlerInfo()) {
+ os << ", side_effect_free= true>";
+ } else {
+ os << ", side_effect_free= false>";
+ }
+ break;
+ }
+ default:
+ os << "<Other heap object (" << map().instance_type() << ")>";
+ break;
+ }
+}
+
+void Struct::BriefPrintDetails(std::ostream& os) {}
+
+void Tuple2::BriefPrintDetails(std::ostream& os) {
+ os << " " << Brief(value1()) << ", " << Brief(value2());
+}
+
+void Tuple3::BriefPrintDetails(std::ostream& os) {
+ os << " " << Brief(value1()) << ", " << Brief(value2()) << ", "
+ << Brief(value3());
+}
+
+void ClassPositions::BriefPrintDetails(std::ostream& os) {
+ os << " " << start() << ", " << end();
+}
+
+void ArrayBoilerplateDescription::BriefPrintDetails(std::ostream& os) {
+ os << " " << elements_kind() << ", " << Brief(constant_elements());
+}
+
+void CallableTask::BriefPrintDetails(std::ostream& os) {
+ os << " callable=" << Brief(callable());
+}
+
+void HeapObject::Iterate(ObjectVisitor* v) { IterateFast<ObjectVisitor>(v); }
+
+void HeapObject::IterateBody(ObjectVisitor* v) {
+ Map m = map();
+ IterateBodyFast<ObjectVisitor>(m, SizeFromMap(m), v);
+}
+
+void HeapObject::IterateBody(Map map, int object_size, ObjectVisitor* v) {
+ IterateBodyFast<ObjectVisitor>(map, object_size, v);
+}
+
+struct CallIsValidSlot {
+ template <typename BodyDescriptor>
+ static bool apply(Map map, HeapObject obj, int offset, int) {
+ return BodyDescriptor::IsValidSlot(map, obj, offset);
+ }
+};
+
+bool HeapObject::IsValidSlot(Map map, int offset) {
+ DCHECK_NE(0, offset);
+ return BodyDescriptorApply<CallIsValidSlot, bool>(map.instance_type(), map,
+ *this, offset, 0);
+}
+
+int HeapObject::SizeFromMap(Map map) const {
+ int instance_size = map.instance_size();
+ if (instance_size != kVariableSizeSentinel) return instance_size;
+ // Only inline the most frequent cases.
+ InstanceType instance_type = map.instance_type();
+ if (IsInRange(instance_type, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE)) {
+ return FixedArray::SizeFor(
+ FixedArray::unchecked_cast(*this).synchronized_length());
+ }
+ if (IsInRange(instance_type, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)) {
+ // Native context has fixed size.
+ DCHECK_NE(instance_type, NATIVE_CONTEXT_TYPE);
+ return Context::SizeFor(Context::unchecked_cast(*this).length());
+ }
+ if (instance_type == ONE_BYTE_STRING_TYPE ||
+ instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
+ // Strings may get concurrently truncated, hence we have to access its
+ // length synchronized.
+ return SeqOneByteString::SizeFor(
+ SeqOneByteString::unchecked_cast(*this).synchronized_length());
+ }
+ if (instance_type == BYTE_ARRAY_TYPE) {
+ return ByteArray::SizeFor(
+ ByteArray::unchecked_cast(*this).synchronized_length());
+ }
+ if (instance_type == BYTECODE_ARRAY_TYPE) {
+ return BytecodeArray::SizeFor(
+ BytecodeArray::unchecked_cast(*this).synchronized_length());
+ }
+ if (instance_type == FREE_SPACE_TYPE) {
+ return FreeSpace::unchecked_cast(*this).relaxed_read_size();
+ }
+ if (instance_type == STRING_TYPE ||
+ instance_type == INTERNALIZED_STRING_TYPE) {
+ // Strings may get concurrently truncated, hence we have to access its
+ // length synchronized.
+ return SeqTwoByteString::SizeFor(
+ SeqTwoByteString::unchecked_cast(*this).synchronized_length());
+ }
+ if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
+ return FixedDoubleArray::SizeFor(
+ FixedDoubleArray::unchecked_cast(*this).synchronized_length());
+ }
+ if (instance_type == FEEDBACK_METADATA_TYPE) {
+ return FeedbackMetadata::SizeFor(
+ FeedbackMetadata::unchecked_cast(*this).synchronized_slot_count());
+ }
+ if (instance_type == DESCRIPTOR_ARRAY_TYPE) {
+ return DescriptorArray::SizeFor(
+ DescriptorArray::unchecked_cast(*this).number_of_all_descriptors());
+ }
+ if (IsInRange(instance_type, FIRST_WEAK_FIXED_ARRAY_TYPE,
+ LAST_WEAK_FIXED_ARRAY_TYPE)) {
+ return WeakFixedArray::SizeFor(
+ WeakFixedArray::unchecked_cast(*this).synchronized_length());
+ }
+ if (instance_type == WEAK_ARRAY_LIST_TYPE) {
+ return WeakArrayList::SizeForCapacity(
+ WeakArrayList::unchecked_cast(*this).synchronized_capacity());
+ }
+ if (instance_type == SMALL_ORDERED_HASH_SET_TYPE) {
+ return SmallOrderedHashSet::SizeFor(
+ SmallOrderedHashSet::unchecked_cast(*this).Capacity());
+ }
+ if (instance_type == SMALL_ORDERED_HASH_MAP_TYPE) {
+ return SmallOrderedHashMap::SizeFor(
+ SmallOrderedHashMap::unchecked_cast(*this).Capacity());
+ }
+ if (instance_type == SMALL_ORDERED_NAME_DICTIONARY_TYPE) {
+ return SmallOrderedNameDictionary::SizeFor(
+ SmallOrderedNameDictionary::unchecked_cast(*this).Capacity());
+ }
+ if (instance_type == PROPERTY_ARRAY_TYPE) {
+ return PropertyArray::SizeFor(
+ PropertyArray::cast(*this).synchronized_length());
+ }
+ if (instance_type == FEEDBACK_VECTOR_TYPE) {
+ return FeedbackVector::SizeFor(
+ FeedbackVector::unchecked_cast(*this).length());
+ }
+ if (instance_type == BIGINT_TYPE) {
+ return BigInt::SizeFor(BigInt::unchecked_cast(*this).length());
+ }
+ if (instance_type == PREPARSE_DATA_TYPE) {
+ PreparseData data = PreparseData::unchecked_cast(*this);
+ return PreparseData::SizeFor(data.data_length(), data.children_length());
+ }
+ if (instance_type == CODE_TYPE) {
+ return Code::unchecked_cast(*this).CodeSize();
+ }
+ DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
+ return EmbedderDataArray::SizeFor(
+ EmbedderDataArray::unchecked_cast(*this).length());
+}
+
+bool HeapObject::NeedsRehashing() const {
+ switch (map().instance_type()) {
+ case DESCRIPTOR_ARRAY_TYPE:
+ return DescriptorArray::cast(*this).number_of_descriptors() > 1;
+ case TRANSITION_ARRAY_TYPE:
+ return TransitionArray::cast(*this).number_of_entries() > 1;
+ case ORDERED_HASH_MAP_TYPE:
+ return OrderedHashMap::cast(*this).NumberOfElements() > 0;
+ case ORDERED_HASH_SET_TYPE:
+ return OrderedHashSet::cast(*this).NumberOfElements() > 0;
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ case HASH_TABLE_TYPE:
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool HeapObject::CanBeRehashed() const {
+ DCHECK(NeedsRehashing());
+ switch (map().instance_type()) {
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
+ // TODO(yangguo): actually support rehashing OrderedHash{Map,Set}.
+ return false;
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ return true;
+ case DESCRIPTOR_ARRAY_TYPE:
+ return true;
+ case TRANSITION_ARRAY_TYPE:
+ return true;
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ return SmallOrderedHashMap::cast(*this).NumberOfElements() == 0;
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ return SmallOrderedHashMap::cast(*this).NumberOfElements() == 0;
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ return SmallOrderedNameDictionary::cast(*this).NumberOfElements() == 0;
+ default:
+ return false;
+ }
+ return false;
+}
+
+void HeapObject::RehashBasedOnMap(ReadOnlyRoots roots) {
+ switch (map().instance_type()) {
+ case HASH_TABLE_TYPE:
+ UNREACHABLE();
+ case NAME_DICTIONARY_TYPE:
+ NameDictionary::cast(*this).Rehash(roots);
+ break;
+ case GLOBAL_DICTIONARY_TYPE:
+ GlobalDictionary::cast(*this).Rehash(roots);
+ break;
+ case NUMBER_DICTIONARY_TYPE:
+ NumberDictionary::cast(*this).Rehash(roots);
+ break;
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ SimpleNumberDictionary::cast(*this).Rehash(roots);
+ break;
+ case STRING_TABLE_TYPE:
+ StringTable::cast(*this).Rehash(roots);
+ break;
+ case DESCRIPTOR_ARRAY_TYPE:
+ DCHECK_LE(1, DescriptorArray::cast(*this).number_of_descriptors());
+ DescriptorArray::cast(*this).Sort();
+ break;
+ case TRANSITION_ARRAY_TYPE:
+ TransitionArray::cast(*this).Sort();
+ break;
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ DCHECK_EQ(0, SmallOrderedHashMap::cast(*this).NumberOfElements());
+ break;
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ DCHECK_EQ(0, SmallOrderedHashSet::cast(*this).NumberOfElements());
+ break;
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ DCHECK_EQ(0, SmallOrderedNameDictionary::cast(*this).NumberOfElements());
+ break;
+ case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case INTERNALIZED_STRING_TYPE:
+ // Rare case, rehash read-only space strings before they are sealed.
+ DCHECK(ReadOnlyHeap::Contains(*this));
+ String::cast(*this).Hash();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool HeapObject::IsExternal(Isolate* isolate) const {
+ return map().FindRootMap(isolate) == isolate->heap()->external_map();
+}
+
+void DescriptorArray::GeneralizeAllFields() {
+ int length = number_of_descriptors();
+ for (int i = 0; i < length; i++) {
+ PropertyDetails details = GetDetails(i);
+ details = details.CopyWithRepresentation(Representation::Tagged());
+ if (details.location() == kField) {
+ DCHECK_EQ(kData, details.kind());
+ details = details.CopyWithConstness(PropertyConstness::kMutable);
+ SetValue(i, FieldType::Any());
+ }
+ set(ToDetailsIndex(i), MaybeObject::FromObject(details.AsSmi()));
+ }
+}
+
+MaybeHandle<Object> Object::SetProperty(Isolate* isolate, Handle<Object> object,
+ Handle<Name> name, Handle<Object> value,
+ StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw) {
+ LookupIterator it(isolate, object, name);
+ MAYBE_RETURN_NULL(SetProperty(&it, value, store_origin, should_throw));
+ return value;
+}
+
+Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
+ Handle<Object> value,
+ Maybe<ShouldThrow> should_throw,
+ StoreOrigin store_origin, bool* found) {
+ it->UpdateProtector();
+ DCHECK(it->IsFound());
+
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(it->isolate());
+
+ do {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ UNREACHABLE();
+
+ case LookupIterator::ACCESS_CHECK:
+ if (it->HasAccess()) break;
+ // Check whether it makes sense to reuse the lookup iterator. Here it
+ // might still call into setters up the prototype chain.
+ return JSObject::SetPropertyWithFailedAccessCheck(it, value,
+ should_throw);
+
+ case LookupIterator::JSPROXY: {
+ Handle<Object> receiver = it->GetReceiver();
+ // In case of global IC, the receiver is the global object. Replace by
+ // the global proxy.
+ if (receiver->IsJSGlobalObject()) {
+ receiver = handle(JSGlobalObject::cast(*receiver).global_proxy(),
+ it->isolate());
+ }
+ return JSProxy::SetProperty(it->GetHolder<JSProxy>(), it->GetName(),
+ value, receiver, should_throw);
+ }
+
+ case LookupIterator::INTERCEPTOR: {
+ if (it->HolderIsReceiverOrHiddenPrototype()) {
+ Maybe<bool> result =
+ JSObject::SetPropertyWithInterceptor(it, should_throw, value);
+ if (result.IsNothing() || result.FromJust()) return result;
+ } else {
+ Maybe<PropertyAttributes> maybe_attributes =
+ JSObject::GetPropertyAttributesWithInterceptor(it);
+ if (maybe_attributes.IsNothing()) return Nothing<bool>();
+ if ((maybe_attributes.FromJust() & READ_ONLY) != 0) {
+ return WriteToReadOnlyProperty(it, value, should_throw);
+ }
+ if (maybe_attributes.FromJust() == ABSENT) break;
+ *found = false;
+ return Nothing<bool>();
+ }
+ break;
+ }
+
+ case LookupIterator::ACCESSOR: {
+ if (it->IsReadOnly()) {
+ return WriteToReadOnlyProperty(it, value, should_throw);
+ }
+ Handle<Object> accessors = it->GetAccessors();
+ if (accessors->IsAccessorInfo() &&
+ !it->HolderIsReceiverOrHiddenPrototype() &&
+ AccessorInfo::cast(*accessors).is_special_data_property()) {
+ *found = false;
+ return Nothing<bool>();
+ }
+ return SetPropertyWithAccessor(it, value, should_throw);
+ }
+ case LookupIterator::INTEGER_INDEXED_EXOTIC: {
+ // IntegerIndexedElementSet converts value to a Number/BigInt prior to
+ // the bounds check. The bounds check has already happened here, but
+ // perform the possibly effectful ToNumber (or ToBigInt) operation
+ // anyways.
+ auto holder = it->GetHolder<JSTypedArray>();
+ Handle<Object> throwaway_value;
+ if (holder->type() == kExternalBigInt64Array ||
+ holder->type() == kExternalBigUint64Array) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(), throwaway_value,
+ BigInt::FromObject(it->isolate(), value), Nothing<bool>());
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(), throwaway_value,
+ Object::ToNumber(it->isolate(), value), Nothing<bool>());
+ }
+
+ // FIXME: Throw a TypeError if the holder is detached here
+ // (IntegerIndexedElementSpec step 5).
+
+ // TODO(verwaest): Per spec, we should return false here (steps 6-9
+ // in IntegerIndexedElementSpec), resulting in an exception being thrown
+ // on OOB accesses in strict code. Historically, v8 has not done made
+ // this change due to uncertainty about web compat. (v8:4901)
+ return Just(true);
+ }
+
+ case LookupIterator::DATA:
+ if (it->IsReadOnly()) {
+ return WriteToReadOnlyProperty(it, value, should_throw);
+ }
+ if (it->HolderIsReceiverOrHiddenPrototype()) {
+ return SetDataProperty(it, value);
+ }
+ V8_FALLTHROUGH;
+ case LookupIterator::TRANSITION:
+ *found = false;
+ return Nothing<bool>();
+ }
+ it->Next();
+ } while (it->IsFound());
+
+ *found = false;
+ return Nothing<bool>();
+}
+
+Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
+ StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw) {
+ if (it->IsFound()) {
+ bool found = true;
+ Maybe<bool> result =
+ SetPropertyInternal(it, value, should_throw, store_origin, &found);
+ if (found) return result;
+ }
+
+ // If the receiver is the JSGlobalObject, the store was contextual. In case
+ // the property did not exist yet on the global object itself, we have to
+ // throw a reference error in strict mode. In sloppy mode, we continue.
+ if (it->GetReceiver()->IsJSGlobalObject() &&
+ (GetShouldThrow(it->isolate(), should_throw) ==
+ ShouldThrow::kThrowOnError)) {
+ it->isolate()->Throw(*it->isolate()->factory()->NewReferenceError(
+ MessageTemplate::kNotDefined, it->GetName()));
+ return Nothing<bool>();
+ }
+
+ return AddDataProperty(it, value, NONE, should_throw, store_origin);
+}
+
+Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
+ StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw) {
+ Isolate* isolate = it->isolate();
+
+ if (it->IsFound()) {
+ bool found = true;
+ Maybe<bool> result =
+ SetPropertyInternal(it, value, should_throw, store_origin, &found);
+ if (found) return result;
+ }
+
+ it->UpdateProtector();
+
+ // The property either doesn't exist on the holder or exists there as a data
+ // property.
+
+ if (!it->GetReceiver()->IsJSReceiver()) {
+ return WriteToReadOnlyProperty(it, value, should_throw);
+ }
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
+
+ LookupIterator::Configuration c = LookupIterator::OWN;
+ LookupIterator own_lookup =
+ it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
+ : LookupIterator(isolate, receiver, it->name(), c);
+
+ for (; own_lookup.IsFound(); own_lookup.Next()) {
+ switch (own_lookup.state()) {
+ case LookupIterator::ACCESS_CHECK:
+ if (!own_lookup.HasAccess()) {
+ return JSObject::SetPropertyWithFailedAccessCheck(&own_lookup, value,
+ should_throw);
+ }
+ break;
+
+ case LookupIterator::ACCESSOR:
+ if (own_lookup.GetAccessors()->IsAccessorInfo()) {
+ if (own_lookup.IsReadOnly()) {
+ return WriteToReadOnlyProperty(&own_lookup, value, should_throw);
+ }
+ return Object::SetPropertyWithAccessor(&own_lookup, value,
+ should_throw);
+ }
+ V8_FALLTHROUGH;
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return RedefineIncompatibleProperty(isolate, it->GetName(), value,
+ should_throw);
+
+ case LookupIterator::DATA: {
+ if (own_lookup.IsReadOnly()) {
+ return WriteToReadOnlyProperty(&own_lookup, value, should_throw);
+ }
+ return SetDataProperty(&own_lookup, value);
+ }
+
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::JSPROXY: {
+ PropertyDescriptor desc;
+ Maybe<bool> owned =
+ JSReceiver::GetOwnPropertyDescriptor(&own_lookup, &desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (!owned.FromJust()) {
+ return JSReceiver::CreateDataProperty(&own_lookup, value,
+ should_throw);
+ }
+ if (PropertyDescriptor::IsAccessorDescriptor(&desc) ||
+ !desc.writable()) {
+ return RedefineIncompatibleProperty(isolate, it->GetName(), value,
+ should_throw);
+ }
+
+ PropertyDescriptor value_desc;
+ value_desc.set_value(value);
+ return JSReceiver::DefineOwnProperty(isolate, receiver, it->GetName(),
+ &value_desc, should_throw);
+ }
+
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ }
+ }
+
+ return AddDataProperty(&own_lookup, value, NONE, should_throw, store_origin);
+}
+
+Maybe<bool> Object::CannotCreateProperty(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> name,
+ Handle<Object> value,
+ Maybe<ShouldThrow> should_throw) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kStrictCannotCreateProperty, name,
+ Object::TypeOf(isolate, receiver), receiver));
+}
+
+Maybe<bool> Object::WriteToReadOnlyProperty(
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> maybe_should_throw) {
+ ShouldThrow should_throw = GetShouldThrow(it->isolate(), maybe_should_throw);
+ if (it->IsFound() && !it->HolderIsReceiver()) {
+ // "Override mistake" attempted, record a use count to track this per
+ // v8:8175
+ v8::Isolate::UseCounterFeature feature =
+ should_throw == kThrowOnError
+ ? v8::Isolate::kAttemptOverrideReadOnlyOnPrototypeStrict
+ : v8::Isolate::kAttemptOverrideReadOnlyOnPrototypeSloppy;
+ it->isolate()->CountUsage(feature);
+ }
+ return WriteToReadOnlyProperty(it->isolate(), it->GetReceiver(),
+ it->GetName(), value, should_throw);
+}
+
+Maybe<bool> Object::WriteToReadOnlyProperty(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> name,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrictReadOnlyProperty, name,
+ Object::TypeOf(isolate, receiver), receiver));
+}
+
+Maybe<bool> Object::RedefineIncompatibleProperty(
+ Isolate* isolate, Handle<Object> name, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw) {
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed, name));
+}
+
+Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
+ DCHECK_IMPLIES(it->GetReceiver()->IsJSProxy(),
+ it->GetName()->IsPrivateName());
+ DCHECK_IMPLIES(!it->IsElement() && it->GetName()->IsPrivateName(),
+ it->state() == LookupIterator::DATA);
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
+
+ // Store on the holder which may be hidden behind the receiver.
+ DCHECK(it->HolderIsReceiverOrHiddenPrototype());
+
+ Handle<Object> to_assign = value;
+ // Convert the incoming value to a number for storing into typed arrays.
+ if (it->IsElement() && receiver->IsJSObject() &&
+ JSObject::cast(*receiver).HasTypedArrayElements()) {
+ ElementsKind elements_kind = JSObject::cast(*receiver).GetElementsKind();
+ if (elements_kind == BIGINT64_ELEMENTS ||
+ elements_kind == BIGUINT64_ELEMENTS) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(it->isolate(), to_assign,
+ BigInt::FromObject(it->isolate(), value),
+ Nothing<bool>());
+ // We have to recheck the length. However, it can only change if the
+ // underlying buffer was detached, so just check that.
+ if (Handle<JSArrayBufferView>::cast(receiver)->WasDetached()) {
+ return Just(true);
+ // TODO(neis): According to the spec, this should throw a TypeError.
+ }
+ } else if (!value->IsNumber() && !value->IsUndefined(it->isolate())) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(it->isolate(), to_assign,
+ Object::ToNumber(it->isolate(), value),
+ Nothing<bool>());
+ // We have to recheck the length. However, it can only change if the
+ // underlying buffer was detached, so just check that.
+ if (Handle<JSArrayBufferView>::cast(receiver)->WasDetached()) {
+ return Just(true);
+ // TODO(neis): According to the spec, this should throw a TypeError.
+ }
+ }
+ }
+
+ // Possibly migrate to the most up-to-date map that will be able to store
+ // |value| under it->name().
+ it->PrepareForDataProperty(to_assign);
+
+ // Write the property value.
+ it->WriteDataValue(to_assign, false);
+
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ receiver->HeapObjectVerify(it->isolate());
+ }
+#endif
+ return Just(true);
+}
+
+Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
+ PropertyAttributes attributes,
+ Maybe<ShouldThrow> should_throw,
+ StoreOrigin store_origin) {
+ if (!it->GetReceiver()->IsJSReceiver()) {
+ return CannotCreateProperty(it->isolate(), it->GetReceiver(), it->GetName(),
+ value, should_throw);
+ }
+
+ // Private symbols should be installed on JSProxy using
+ // JSProxy::SetPrivateSymbol.
+ if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate() &&
+ !it->GetName()->IsPrivateName()) {
+ RETURN_FAILURE(it->isolate(), GetShouldThrow(it->isolate(), should_throw),
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
+
+ DCHECK_NE(LookupIterator::INTEGER_INDEXED_EXOTIC, it->state());
+
+ Handle<JSReceiver> receiver = it->GetStoreTarget<JSReceiver>();
+ DCHECK_IMPLIES(receiver->IsJSProxy(), it->GetName()->IsPrivateName());
+ DCHECK_IMPLIES(receiver->IsJSProxy(),
+ it->state() == LookupIterator::NOT_FOUND);
+
+ // If the receiver is a JSGlobalProxy, store on the prototype (JSGlobalObject)
+ // instead. If the prototype is Null, the proxy is detached.
+ if (receiver->IsJSGlobalProxy()) return Just(true);
+
+ Isolate* isolate = it->isolate();
+
+ if (it->ExtendingNonExtensible(receiver)) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(it->isolate(), should_throw),
+ NewTypeError(MessageTemplate::kObjectNotExtensible, it->GetName()));
+ }
+
+ if (it->IsElement()) {
+ if (receiver->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ if (JSArray::WouldChangeReadOnlyLength(array, it->index())) {
+ RETURN_FAILURE(isolate, GetShouldThrow(it->isolate(), should_throw),
+ NewTypeError(MessageTemplate::kStrictReadOnlyProperty,
+ isolate->factory()->length_string(),
+ Object::TypeOf(isolate, array), array));
+ }
+ }
+
+ Handle<JSObject> receiver_obj = Handle<JSObject>::cast(receiver);
+ JSObject::AddDataElement(receiver_obj, it->index(), value, attributes);
+ JSObject::ValidateElements(*receiver_obj);
+ return Just(true);
+ } else {
+ it->UpdateProtector();
+ // Migrate to the most up-to-date map that will be able to store |value|
+ // under it->name() with |attributes|.
+ it->PrepareTransitionToDataProperty(receiver, value, attributes,
+ store_origin);
+ DCHECK_EQ(LookupIterator::TRANSITION, it->state());
+ it->ApplyTransitionToDataProperty(receiver);
+
+ // Write the property value.
+ it->WriteDataValue(value, true);
+
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ receiver->HeapObjectVerify(isolate);
+ }
+#endif
+ }
+
+ return Just(true);
+}
+
+template <class T>
+static int AppendUniqueCallbacks(Isolate* isolate,
+ Handle<TemplateList> callbacks,
+ Handle<typename T::Array> array,
+ int valid_descriptors) {
+ int nof_callbacks = callbacks->length();
+
+ // Fill in new callback descriptors. Process the callbacks from
+ // back to front so that the last callback with a given name takes
+ // precedence over previously added callbacks with that name.
+ for (int i = nof_callbacks - 1; i >= 0; i--) {
+ Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)), isolate);
+ Handle<Name> key(Name::cast(entry->name()), isolate);
+ DCHECK(key->IsUniqueName());
+ // Check if a descriptor with this name already exists before writing.
+ if (!T::Contains(key, entry, valid_descriptors, array)) {
+ T::Insert(key, entry, valid_descriptors, array);
+ valid_descriptors++;
+ }
+ }
+
+ return valid_descriptors;
+}
+
+struct FixedArrayAppender {
+ using Array = FixedArray;
+ static bool Contains(Handle<Name> key, Handle<AccessorInfo> entry,
+ int valid_descriptors, Handle<FixedArray> array) {
+ for (int i = 0; i < valid_descriptors; i++) {
+ if (*key == AccessorInfo::cast(array->get(i)).name()) return true;
+ }
+ return false;
+ }
+ static void Insert(Handle<Name> key, Handle<AccessorInfo> entry,
+ int valid_descriptors, Handle<FixedArray> array) {
+ DisallowHeapAllocation no_gc;
+ array->set(valid_descriptors, *entry);
+ }
+};
+
+int AccessorInfo::AppendUnique(Isolate* isolate, Handle<Object> descriptors,
+ Handle<FixedArray> array,
+ int valid_descriptors) {
+ Handle<TemplateList> callbacks = Handle<TemplateList>::cast(descriptors);
+ DCHECK_GE(array->length(), callbacks->length() + valid_descriptors);
+ return AppendUniqueCallbacks<FixedArrayAppender>(isolate, callbacks, array,
+ valid_descriptors);
+}
+
+void JSProxy::Revoke(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
+ // ES#sec-proxy-revocation-functions
+ if (!proxy->IsRevoked()) {
+ // 5. Set p.[[ProxyTarget]] to null.
+ proxy->set_target(ReadOnlyRoots(isolate).null_value());
+ // 6. Set p.[[ProxyHandler]] to null.
+ proxy->set_handler(ReadOnlyRoots(isolate).null_value());
+ }
+ DCHECK(proxy->IsRevoked());
+}
+
+// static
+Maybe<bool> JSProxy::IsArray(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(proxy);
+ for (int i = 0; i < JSProxy::kMaxIterationLimit; i++) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked,
+ isolate->factory()->NewStringFromAsciiChecked("IsArray")));
+ return Nothing<bool>();
+ }
+ object = handle(JSReceiver::cast(proxy->target()), isolate);
+ if (object->IsJSArray()) return Just(true);
+ if (!object->IsJSProxy()) return Just(false);
+ }
+
+ // Too deep recursion, throw a RangeError.
+ isolate->StackOverflow();
+ return Nothing<bool>();
+}
+
+Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Name> name) {
+ DCHECK(!name->IsPrivate());
+ STACK_CHECK(isolate, Nothing<bool>());
+ // 1. (Assert)
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, isolate->factory()->has_string()));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ // 6. Let trap be ? GetMethod(handler, "has").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate->factory()->has_string()),
+ Nothing<bool>());
+ // 7. If trap is undefined, then
+ if (trap->IsUndefined(isolate)) {
+ // 7a. Return target.[[HasProperty]](P).
+ return JSReceiver::HasProperty(target, name);
+ }
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «target, P»)).
+ Handle<Object> trap_result_obj;
+ Handle<Object> args[] = {target, name};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_obj,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ bool boolean_trap_result = trap_result_obj->BooleanValue(isolate);
+ // 9. If booleanTrapResult is false, then:
+ if (!boolean_trap_result) {
+ MAYBE_RETURN(JSProxy::CheckHasTrap(isolate, name, target), Nothing<bool>());
+ }
+ // 10. Return booleanTrapResult.
+ return Just(boolean_trap_result);
+}
+
+Maybe<bool> JSProxy::CheckHasTrap(Isolate* isolate, Handle<Name> name,
+ Handle<JSReceiver> target) {
+ // 9a. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> target_found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN(target_found, Nothing<bool>());
+ // 9b. If targetDesc is not undefined, then:
+ if (target_found.FromJust()) {
+ // 9b i. If targetDesc.[[Configurable]] is false, throw a TypeError
+ // exception.
+ if (!target_desc.configurable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyHasNonConfigurable, name));
+ return Nothing<bool>();
+ }
+ // 9b ii. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
+ // 9b iii. If extensibleTarget is false, throw a TypeError exception.
+ if (!extensible_target.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyHasNonExtensible, name));
+ return Nothing<bool>();
+ }
+ }
+ return Just(true);
+}
+
+Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
+ Handle<Object> value, Handle<Object> receiver,
+ Maybe<ShouldThrow> should_throw) {
+ DCHECK(!name->IsPrivate());
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(isolate, Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->set_string();
+
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined(isolate)) {
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, receiver, name, target);
+
+ return Object::SetSuperProperty(&it, value, StoreOrigin::kMaybeKeyed,
+ should_throw);
+ }
+
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target, name, value, receiver};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ if (!trap_result->BooleanValue(isolate)) {
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
+ trap_name, name));
+ }
+
+ MaybeHandle<Object> result =
+ JSProxy::CheckGetSetTrapResult(isolate, name, target, value, kSet);
+
+ if (result.is_null()) {
+ return Nothing<bool>();
+ }
+ return Just(true);
+}
+
+Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
+ Handle<Name> name,
+ LanguageMode language_mode) {
+ DCHECK(!name->IsPrivate());
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(isolate, Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->deleteProperty_string();
+
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined(isolate)) {
+ return JSReceiver::DeletePropertyOrElement(target, name, language_mode);
+ }
+
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target, name};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ if (!trap_result->BooleanValue(isolate)) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
+ trap_name, name));
+ }
+
+ // Enforce the invariant.
+ PropertyDescriptor target_desc;
+ Maybe<bool> owned =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust()) {
+ if (!target_desc.configurable()) {
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kProxyDeletePropertyNonConfigurable, name));
+ return Nothing<bool>();
+ }
+ // 13. Let extensibleTarget be ? IsExtensible(target).
+ // 14. If extensibleTarget is false, throw a TypeError exception.
+ Maybe<bool> extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible, Nothing<bool>());
+ if (!extensible.FromJust()) {
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kProxyDeletePropertyNonExtensible, name));
+ return Nothing<bool>();
+ }
+ }
+
+ return Just(true);
+}
+
+// static
+MaybeHandle<JSProxy> JSProxy::New(Isolate* isolate, Handle<Object> target,
+ Handle<Object> handler) {
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kProxyNonObject),
+ JSProxy);
+ }
+ if (target->IsJSProxy() && JSProxy::cast(*target).IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked),
+ JSProxy);
+ }
+ if (!handler->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kProxyNonObject),
+ JSProxy);
+ }
+ if (handler->IsJSProxy() && JSProxy::cast(*handler).IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked),
+ JSProxy);
+ }
+ return isolate->factory()->NewJSProxy(Handle<JSReceiver>::cast(target),
+ Handle<JSReceiver>::cast(handler));
+}
+
+// static
+MaybeHandle<NativeContext> JSProxy::GetFunctionRealm(Handle<JSProxy> proxy) {
+ DCHECK(proxy->map().is_constructor());
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR(proxy->GetIsolate(),
+ NewTypeError(MessageTemplate::kProxyRevoked),
+ NativeContext);
+ }
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()),
+ proxy->GetIsolate());
+ return JSReceiver::GetFunctionRealm(target);
+}
+
+Maybe<PropertyAttributes> JSProxy::GetPropertyAttributes(LookupIterator* it) {
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSProxy::GetOwnPropertyDescriptor(
+ it->isolate(), it->GetHolder<JSProxy>(), it->GetName(), &desc);
+ MAYBE_RETURN(found, Nothing<PropertyAttributes>());
+ if (!found.FromJust()) return Just(ABSENT);
+ return Just(desc.ToAttributes());
+}
+
+// TODO(jkummerow): Consider unification with FastAsArrayLength() in
+// accessors.cc.
+bool PropertyKeyToArrayLength(Handle<Object> value, uint32_t* length) {
+ DCHECK(value->IsNumber() || value->IsName());
+ if (value->ToArrayLength(length)) return true;
+ if (value->IsString()) return String::cast(*value).AsArrayIndex(length);
+ return false;
+}
+
+bool PropertyKeyToArrayIndex(Handle<Object> index_obj, uint32_t* output) {
+ return PropertyKeyToArrayLength(index_obj, output) && *output != kMaxUInt32;
+}
+
+// ES6 9.4.2.1
+// static
+Maybe<bool> JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
+ Handle<Object> name,
+ PropertyDescriptor* desc,
+ Maybe<ShouldThrow> should_throw) {
+ // 1. Assert: IsPropertyKey(P) is true. ("P" is |name|.)
+ // 2. If P is "length", then:
+ // TODO(jkummerow): Check if we need slow string comparison.
+ if (*name == ReadOnlyRoots(isolate).length_string()) {
+ // 2a. Return ArraySetLength(A, Desc).
+ return ArraySetLength(isolate, o, desc, should_throw);
+ }
+ // 3. Else if P is an array index, then:
+ uint32_t index = 0;
+ if (PropertyKeyToArrayIndex(name, &index)) {
+ // 3a. Let oldLenDesc be OrdinaryGetOwnProperty(A, "length").
+ PropertyDescriptor old_len_desc;
+ Maybe<bool> success = GetOwnPropertyDescriptor(
+ isolate, o, isolate->factory()->length_string(), &old_len_desc);
+ // 3b. (Assert)
+ DCHECK(success.FromJust());
+ USE(success);
+ // 3c. Let oldLen be oldLenDesc.[[Value]].
+ uint32_t old_len = 0;
+ CHECK(old_len_desc.value()->ToArrayLength(&old_len));
+ // 3d. Let index be ToUint32(P).
+ // (Already done above.)
+ // 3e. (Assert)
+ // 3f. If index >= oldLen and oldLenDesc.[[Writable]] is false,
+ // return false.
+ if (index >= old_len && old_len_desc.has_writable() &&
+ !old_len_desc.writable()) {
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kDefineDisallowed, name));
+ }
+ // 3g. Let succeeded be OrdinaryDefineOwnProperty(A, P, Desc).
+ Maybe<bool> succeeded =
+ OrdinaryDefineOwnProperty(isolate, o, name, desc, should_throw);
+ // 3h. Assert: succeeded is not an abrupt completion.
+ // In our case, if should_throw == kThrowOnError, it can be!
+ // 3i. If succeeded is false, return false.
+ if (succeeded.IsNothing() || !succeeded.FromJust()) return succeeded;
+ // 3j. If index >= oldLen, then:
+ if (index >= old_len) {
+ // 3j i. Set oldLenDesc.[[Value]] to index + 1.
+ old_len_desc.set_value(isolate->factory()->NewNumberFromUint(index + 1));
+ // 3j ii. Let succeeded be
+ // OrdinaryDefineOwnProperty(A, "length", oldLenDesc).
+ succeeded = OrdinaryDefineOwnProperty(isolate, o,
+ isolate->factory()->length_string(),
+ &old_len_desc, should_throw);
+ // 3j iii. Assert: succeeded is true.
+ DCHECK(succeeded.FromJust());
+ USE(succeeded);
+ }
+ // 3k. Return true.
+ return Just(true);
+ }
+
+ // 4. Return OrdinaryDefineOwnProperty(A, P, Desc).
+ return OrdinaryDefineOwnProperty(isolate, o, name, desc, should_throw);
+}
+
+// Part of ES6 9.4.2.4 ArraySetLength.
+// static
+bool JSArray::AnythingToArrayLength(Isolate* isolate,
+ Handle<Object> length_object,
+ uint32_t* output) {
+ // Fast path: check numbers and strings that can be converted directly
+ // and unobservably.
+ if (length_object->ToArrayLength(output)) return true;
+ if (length_object->IsString() &&
+ Handle<String>::cast(length_object)->AsArrayIndex(output)) {
+ return true;
+ }
+ // Slow path: follow steps in ES6 9.4.2.4 "ArraySetLength".
+ // 3. Let newLen be ToUint32(Desc.[[Value]]).
+ Handle<Object> uint32_v;
+ if (!Object::ToUint32(isolate, length_object).ToHandle(&uint32_v)) {
+ // 4. ReturnIfAbrupt(newLen).
+ return false;
+ }
+ // 5. Let numberLen be ToNumber(Desc.[[Value]]).
+ Handle<Object> number_v;
+ if (!Object::ToNumber(isolate, length_object).ToHandle(&number_v)) {
+ // 6. ReturnIfAbrupt(newLen).
+ return false;
+ }
+ // 7. If newLen != numberLen, throw a RangeError exception.
+ if (uint32_v->Number() != number_v->Number()) {
+ Handle<Object> exception =
+ isolate->factory()->NewRangeError(MessageTemplate::kInvalidArrayLength);
+ isolate->Throw(*exception);
+ return false;
+ }
+ CHECK(uint32_v->ToArrayLength(output));
+ return true;
+}
+
+// ES6 9.4.2.4
+// static
+Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
+ PropertyDescriptor* desc,
+ Maybe<ShouldThrow> should_throw) {
+ // 1. If the [[Value]] field of Desc is absent, then
+ if (!desc->has_value()) {
+ // 1a. Return OrdinaryDefineOwnProperty(A, "length", Desc).
+ return OrdinaryDefineOwnProperty(
+ isolate, a, isolate->factory()->length_string(), desc, should_throw);
+ }
+ // 2. Let newLenDesc be a copy of Desc.
+ // (Actual copying is not necessary.)
+ PropertyDescriptor* new_len_desc = desc;
+ // 3. - 7. Convert Desc.[[Value]] to newLen.
+ uint32_t new_len = 0;
+ if (!AnythingToArrayLength(isolate, desc->value(), &new_len)) {
+ DCHECK(isolate->has_pending_exception());
+ return Nothing<bool>();
+ }
+ // 8. Set newLenDesc.[[Value]] to newLen.
+ // (Done below, if needed.)
+ // 9. Let oldLenDesc be OrdinaryGetOwnProperty(A, "length").
+ PropertyDescriptor old_len_desc;
+ Maybe<bool> success = GetOwnPropertyDescriptor(
+ isolate, a, isolate->factory()->length_string(), &old_len_desc);
+ // 10. (Assert)
+ DCHECK(success.FromJust());
+ USE(success);
+ // 11. Let oldLen be oldLenDesc.[[Value]].
+ uint32_t old_len = 0;
+ CHECK(old_len_desc.value()->ToArrayLength(&old_len));
+ // 12. If newLen >= oldLen, then
+ if (new_len >= old_len) {
+ // 8. Set newLenDesc.[[Value]] to newLen.
+ // 12a. Return OrdinaryDefineOwnProperty(A, "length", newLenDesc).
+ new_len_desc->set_value(isolate->factory()->NewNumberFromUint(new_len));
+ return OrdinaryDefineOwnProperty(isolate, a,
+ isolate->factory()->length_string(),
+ new_len_desc, should_throw);
+ }
+ // 13. If oldLenDesc.[[Writable]] is false, return false.
+ if (!old_len_desc.writable()) {
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ isolate->factory()->length_string()));
+ }
+ // 14. If newLenDesc.[[Writable]] is absent or has the value true,
+ // let newWritable be true.
+ bool new_writable = false;
+ if (!new_len_desc->has_writable() || new_len_desc->writable()) {
+ new_writable = true;
+ } else {
+ // 15. Else,
+ // 15a. Need to defer setting the [[Writable]] attribute to false in case
+ // any elements cannot be deleted.
+ // 15b. Let newWritable be false. (It's initialized as "false" anyway.)
+ // 15c. Set newLenDesc.[[Writable]] to true.
+ // (Not needed.)
+ }
+ // Most of steps 16 through 19 is implemented by JSArray::SetLength.
+ JSArray::SetLength(a, new_len);
+ // Steps 19d-ii, 20.
+ if (!new_writable) {
+ PropertyDescriptor readonly;
+ readonly.set_writable(false);
+ Maybe<bool> success = OrdinaryDefineOwnProperty(
+ isolate, a, isolate->factory()->length_string(), &readonly,
+ should_throw);
+ DCHECK(success.FromJust());
+ USE(success);
+ }
+ uint32_t actual_new_len = 0;
+ CHECK(a->length().ToArrayLength(&actual_new_len));
+ // Steps 19d-v, 21. Return false if there were non-deletable elements.
+ bool result = actual_new_len == new_len;
+ if (!result) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kStrictDeleteProperty,
+ isolate->factory()->NewNumberFromUint(actual_new_len - 1),
+ a));
+ }
+ return Just(result);
+}
+
+// ES6 9.5.6
+// static
+Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ Maybe<ShouldThrow> should_throw) {
+ STACK_CHECK(isolate, Nothing<bool>());
+ if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
+ DCHECK(!Handle<Symbol>::cast(key)->IsPrivateName());
+ return JSProxy::SetPrivateSymbol(isolate, proxy, Handle<Symbol>::cast(key),
+ desc, should_throw);
+ }
+ Handle<String> trap_name = isolate->factory()->defineProperty_string();
+ // 1. Assert: IsPropertyKey(P) is true.
+ DCHECK(key->IsName() || key->IsNumber());
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ // 6. Let trap be ? GetMethod(handler, "defineProperty").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
+ Nothing<bool>());
+ // 7. If trap is undefined, then:
+ if (trap->IsUndefined(isolate)) {
+ // 7a. Return target.[[DefineOwnProperty]](P, Desc).
+ return JSReceiver::DefineOwnProperty(isolate, target, key, desc,
+ should_throw);
+ }
+ // 8. Let descObj be FromPropertyDescriptor(Desc).
+ Handle<Object> desc_obj = desc->ToObject(isolate);
+ // 9. Let booleanTrapResult be
+ // ToBoolean(? Call(trap, handler, «target, P, descObj»)).
+ Handle<Name> property_name =
+ key->IsName()
+ ? Handle<Name>::cast(key)
+ : Handle<Name>::cast(isolate->factory()->NumberToString(key));
+ // Do not leak private property names.
+ DCHECK(!property_name->IsPrivate());
+ Handle<Object> trap_result_obj;
+ Handle<Object> args[] = {target, property_name, desc_obj};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_obj,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ // 10. If booleanTrapResult is false, return false.
+ if (!trap_result_obj->BooleanValue(isolate)) {
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
+ trap_name, property_name));
+ }
+ // 11. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> target_found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, key, &target_desc);
+ MAYBE_RETURN(target_found, Nothing<bool>());
+ // 12. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(maybe_extensible, Nothing<bool>());
+ bool extensible_target = maybe_extensible.FromJust();
+ // 13. If Desc has a [[Configurable]] field and if Desc.[[Configurable]]
+ // is false, then:
+ // 13a. Let settingConfigFalse be true.
+ // 14. Else let settingConfigFalse be false.
+ bool setting_config_false = desc->has_configurable() && !desc->configurable();
+ // 15. If targetDesc is undefined, then
+ if (!target_found.FromJust()) {
+ // 15a. If extensibleTarget is false, throw a TypeError exception.
+ if (!extensible_target) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonExtensible, property_name));
+ return Nothing<bool>();
+ }
+ // 15b. If settingConfigFalse is true, throw a TypeError exception.
+ if (setting_config_false) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonConfigurable, property_name));
+ return Nothing<bool>();
+ }
+ } else {
+ // 16. Else targetDesc is not undefined,
+ // 16a. If IsCompatiblePropertyDescriptor(extensibleTarget, Desc,
+ // targetDesc) is false, throw a TypeError exception.
+ Maybe<bool> valid = IsCompatiblePropertyDescriptor(
+ isolate, extensible_target, desc, &target_desc, property_name,
+ Just(kDontThrow));
+ MAYBE_RETURN(valid, Nothing<bool>());
+ if (!valid.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyIncompatible, property_name));
+ return Nothing<bool>();
+ }
+ // 16b. If settingConfigFalse is true and targetDesc.[[Configurable]] is
+ // true, throw a TypeError exception.
+ if (setting_config_false && target_desc.configurable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonConfigurable, property_name));
+ return Nothing<bool>();
+ }
+ // 16c. If IsDataDescriptor(targetDesc) is true,
+ // targetDesc.[[Configurable]] is
+ // false, and targetDesc.[[Writable]] is true, then
+ if (PropertyDescriptor::IsDataDescriptor(&target_desc) &&
+ !target_desc.configurable() && target_desc.writable()) {
+ // 16c i. If Desc has a [[Writable]] field and Desc.[[Writable]] is false,
+ // throw a TypeError exception.
+ if (desc->has_writable() && !desc->writable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonConfigurableWritable,
+ property_name));
+ return Nothing<bool>();
+ }
+ }
+ }
+ // 17. Return true.
+ return Just(true);
+}
+
+// static
+Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Symbol> private_name,
+ PropertyDescriptor* desc,
+ Maybe<ShouldThrow> should_throw) {
+ DCHECK(!private_name->IsPrivateName());
+ // Despite the generic name, this can only add private data properties.
+ if (!PropertyDescriptor::IsDataDescriptor(desc) ||
+ desc->ToAttributes() != DONT_ENUM) {
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
+ DCHECK(proxy->map().is_dictionary_map());
+ Handle<Object> value =
+ desc->has_value()
+ ? desc->value()
+ : Handle<Object>::cast(isolate->factory()->undefined_value());
+
+ LookupIterator it(proxy, private_name, proxy);
+
+ if (it.IsFound()) {
+ DCHECK_EQ(LookupIterator::DATA, it.state());
+ DCHECK_EQ(DONT_ENUM, it.property_attributes());
+ it.WriteDataValue(value, false);
+ return Just(true);
+ }
+
+ Handle<NameDictionary> dict(proxy->property_dictionary(), isolate);
+ PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell);
+ Handle<NameDictionary> result =
+ NameDictionary::Add(isolate, dict, private_name, value, details);
+ if (!dict.is_identical_to(result)) proxy->SetProperties(*result);
+ return Just(true);
+}
+
+// ES6 9.5.5
+// static
+Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
+ Handle<JSProxy> proxy,
+ Handle<Name> name,
+ PropertyDescriptor* desc) {
+ DCHECK(!name->IsPrivate());
+ STACK_CHECK(isolate, Nothing<bool>());
+
+ Handle<String> trap_name =
+ isolate->factory()->getOwnPropertyDescriptor_string();
+ // 1. (Assert)
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ // 6. Let trap be ? GetMethod(handler, "getOwnPropertyDescriptor").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
+ Nothing<bool>());
+ // 7. If trap is undefined, then
+ if (trap->IsUndefined(isolate)) {
+ // 7a. Return target.[[GetOwnProperty]](P).
+ return JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, desc);
+ }
+ // 8. Let trapResultObj be ? Call(trap, handler, «target, P»).
+ Handle<Object> trap_result_obj;
+ Handle<Object> args[] = {target, name};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_obj,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ // 9. If Type(trapResultObj) is neither Object nor Undefined, throw a
+ // TypeError exception.
+ if (!trap_result_obj->IsJSReceiver() &&
+ !trap_result_obj->IsUndefined(isolate)) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorInvalid, name));
+ return Nothing<bool>();
+ }
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN(found, Nothing<bool>());
+ // 11. If trapResultObj is undefined, then
+ if (trap_result_obj->IsUndefined(isolate)) {
+ // 11a. If targetDesc is undefined, return undefined.
+ if (!found.FromJust()) return Just(false);
+ // 11b. If targetDesc.[[Configurable]] is false, throw a TypeError
+ // exception.
+ if (!target_desc.configurable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorUndefined, name));
+ return Nothing<bool>();
+ }
+ // 11c. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
+ // 11d. (Assert)
+ // 11e. If extensibleTarget is false, throw a TypeError exception.
+ if (!extensible_target.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorNonExtensible, name));
+ return Nothing<bool>();
+ }
+ // 11f. Return undefined.
+ return Just(false);
+ }
+ // 12. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
+ // 13. Let resultDesc be ? ToPropertyDescriptor(trapResultObj).
+ if (!PropertyDescriptor::ToPropertyDescriptor(isolate, trap_result_obj,
+ desc)) {
+ DCHECK(isolate->has_pending_exception());
+ return Nothing<bool>();
+ }
+ // 14. Call CompletePropertyDescriptor(resultDesc).
+ PropertyDescriptor::CompletePropertyDescriptor(isolate, desc);
+ // 15. Let valid be IsCompatiblePropertyDescriptor (extensibleTarget,
+ // resultDesc, targetDesc).
+ Maybe<bool> valid = IsCompatiblePropertyDescriptor(
+ isolate, extensible_target.FromJust(), desc, &target_desc, name,
+ Just(kDontThrow));
+ MAYBE_RETURN(valid, Nothing<bool>());
+ // 16. If valid is false, throw a TypeError exception.
+ if (!valid.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorIncompatible, name));
+ return Nothing<bool>();
+ }
+ // 17. If resultDesc.[[Configurable]] is false, then
+ if (!desc->configurable()) {
+ // 17a. If targetDesc is undefined or targetDesc.[[Configurable]] is true:
+ if (target_desc.is_empty() || target_desc.configurable()) {
+ // 17a i. Throw a TypeError exception.
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorNonConfigurable,
+ name));
+ return Nothing<bool>();
+ }
+ // 17b. If resultDesc has a [[Writable]] field and resultDesc.[[Writable]]
+ // is false, then
+ if (desc->has_writable() && !desc->writable()) {
+ // 17b i. If targetDesc.[[Writable]] is true, throw a TypeError exception.
+ if (target_desc.writable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::
+ kProxyGetOwnPropertyDescriptorNonConfigurableWritable,
+ name));
+ return Nothing<bool>();
+ }
+ }
+ }
+ // 18. Return resultDesc.
+ return Just(true);
+}
+
+Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
+ ShouldThrow should_throw) {
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(isolate, Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->preventExtensions_string();
+
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined(isolate)) {
+ return JSReceiver::PreventExtensions(target, should_throw);
+ }
+
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ if (!trap_result->BooleanValue(isolate)) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+ }
+
+ // Enforce the invariant.
+ Maybe<bool> target_result = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(target_result, Nothing<bool>());
+ if (target_result.FromJust()) {
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kProxyPreventExtensionsExtensible));
+ return Nothing<bool>();
+ }
+ return Just(true);
+}
+
+Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(isolate, Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->isExtensible_string();
+
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined(isolate)) {
+ return JSReceiver::IsExtensible(target);
+ }
+
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+
+ // Enforce the invariant.
+ Maybe<bool> target_result = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(target_result, Nothing<bool>());
+ if (target_result.FromJust() != trap_result->BooleanValue(isolate)) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyIsExtensibleInconsistent,
+ factory->ToBoolean(target_result.FromJust())));
+ return Nothing<bool>();
+ }
+ return target_result;
+}
+
+Handle<DescriptorArray> DescriptorArray::CopyUpTo(Isolate* isolate,
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ int slack) {
+ return DescriptorArray::CopyUpToAddAttributes(isolate, desc,
+ enumeration_index, NONE, slack);
+}
+
+Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
+ Isolate* isolate, Handle<DescriptorArray> desc, int enumeration_index,
+ PropertyAttributes attributes, int slack) {
+ if (enumeration_index + slack == 0) {
+ return isolate->factory()->empty_descriptor_array();
+ }
+
+ int size = enumeration_index;
+
+ Handle<DescriptorArray> descriptors =
+ DescriptorArray::Allocate(isolate, size, slack);
+
+ if (attributes != NONE) {
+ for (int i = 0; i < size; ++i) {
+ MaybeObject value_or_field_type = desc->GetValue(i);
+ Name key = desc->GetKey(i);
+ PropertyDetails details = desc->GetDetails(i);
+ // Bulk attribute changes never affect private properties.
+ if (!key.IsPrivate()) {
+ int mask = DONT_DELETE | DONT_ENUM;
+ // READ_ONLY is an invalid attribute for JS setters/getters.
+ HeapObject heap_object;
+ if (details.kind() != kAccessor ||
+ !(value_or_field_type->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object.IsAccessorPair())) {
+ mask |= READ_ONLY;
+ }
+ details = details.CopyAddAttributes(
+ static_cast<PropertyAttributes>(attributes & mask));
+ }
+ descriptors->Set(i, key, value_or_field_type, details);
+ }
+ } else {
+ for (int i = 0; i < size; ++i) {
+ descriptors->CopyFrom(i, *desc);
+ }
+ }
+
+ if (desc->number_of_descriptors() != enumeration_index) descriptors->Sort();
+
+ return descriptors;
+}
+
+// Create a new descriptor array with only enumerable, configurable, writeable
+// data properties, but identical field locations.
+Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
+ Isolate* isolate, Handle<DescriptorArray> src, int enumeration_index,
+ int slack) {
+ if (enumeration_index + slack == 0) {
+ return isolate->factory()->empty_descriptor_array();
+ }
+
+ int size = enumeration_index;
+ Handle<DescriptorArray> descriptors =
+ DescriptorArray::Allocate(isolate, size, slack);
+
+ for (int i = 0; i < size; ++i) {
+ Name key = src->GetKey(i);
+ PropertyDetails details = src->GetDetails(i);
+
+ DCHECK(!key.IsPrivateName());
+ DCHECK(details.IsEnumerable());
+ DCHECK_EQ(details.kind(), kData);
+
+ // Ensure the ObjectClone property details are NONE, and that all source
+ // details did not contain DONT_ENUM.
+ PropertyDetails new_details(kData, NONE, details.location(),
+ details.constness(), details.representation(),
+ details.field_index());
+ // Do not propagate the field type of normal object fields from the
+ // original descriptors since FieldType changes don't create new maps.
+ MaybeObject type = src->GetValue(i);
+ if (details.location() == PropertyLocation::kField) {
+ type = MaybeObject::FromObject(FieldType::Any());
+ // TODO(bmeurer,ishell): Igor suggested to use some kind of dynamic
+ // checks in the fast-path for CloneObjectIC instead to avoid the
+ // need to generalize the descriptors here. That will also enable
+ // us to skip the defensive copying of the target map whenever a
+ // CloneObjectIC misses.
+ if (FLAG_modify_field_representation_inplace &&
+ (new_details.representation().IsSmi() ||
+ new_details.representation().IsHeapObject())) {
+ new_details =
+ new_details.CopyWithRepresentation(Representation::Tagged());
+ }
+ }
+ descriptors->Set(i, key, type, new_details);
+ }
+
+ descriptors->Sort();
+
+ return descriptors;
+}
+
+bool DescriptorArray::IsEqualUpTo(DescriptorArray desc, int nof_descriptors) {
+ for (int i = 0; i < nof_descriptors; i++) {
+ if (GetKey(i) != desc.GetKey(i) || GetValue(i) != desc.GetValue(i)) {
+ return false;
+ }
+ PropertyDetails details = GetDetails(i);
+ PropertyDetails other_details = desc.GetDetails(i);
+ if (details.kind() != other_details.kind() ||
+ details.location() != other_details.location() ||
+ !details.representation().Equals(other_details.representation())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+Handle<FixedArray> FixedArray::SetAndGrow(Isolate* isolate,
+ Handle<FixedArray> array, int index,
+ Handle<Object> value,
+ AllocationType allocation) {
+ if (index < array->length()) {
+ array->set(index, *value);
+ return array;
+ }
+ int capacity = array->length();
+ do {
+ capacity = JSObject::NewElementsCapacity(capacity);
+ } while (capacity <= index);
+ Handle<FixedArray> new_array =
+ isolate->factory()->NewUninitializedFixedArray(capacity, allocation);
+ array->CopyTo(0, *new_array, 0, array->length());
+ new_array->FillWithHoles(array->length(), new_array->length());
+ new_array->set(index, *value);
+ return new_array;
+}
+
+Handle<FixedArray> FixedArray::ShrinkOrEmpty(Isolate* isolate,
+ Handle<FixedArray> array,
+ int new_length) {
+ if (new_length == 0) {
+ return array->GetReadOnlyRoots().empty_fixed_array_handle();
+ } else {
+ array->Shrink(isolate, new_length);
+ return array;
+ }
+}
+
+void FixedArray::Shrink(Isolate* isolate, int new_length) {
+ DCHECK(0 < new_length && new_length <= length());
+ if (new_length < length()) {
+ isolate->heap()->RightTrimFixedArray(*this, length() - new_length);
+ }
+}
+
+void FixedArray::CopyTo(int pos, FixedArray dest, int dest_pos, int len) const {
+ DisallowHeapAllocation no_gc;
+ // Return early if len == 0 so that we don't try to read the write barrier off
+ // a canonical read-only empty fixed array.
+ if (len == 0) return;
+ WriteBarrierMode mode = dest.GetWriteBarrierMode(no_gc);
+ for (int index = 0; index < len; index++) {
+ dest.set(dest_pos + index, get(pos + index), mode);
+ }
+}
+
+// static
+Handle<ArrayList> ArrayList::Add(Isolate* isolate, Handle<ArrayList> array,
+ Handle<Object> obj) {
+ int length = array->Length();
+ array = EnsureSpace(isolate, array, length + 1);
+ // Check that GC didn't remove elements from the array.
+ DCHECK_EQ(array->Length(), length);
+ array->Set(length, *obj);
+ array->SetLength(length + 1);
+ return array;
+}
+
+// static
+Handle<ArrayList> ArrayList::Add(Isolate* isolate, Handle<ArrayList> array,
+ Handle<Object> obj1, Handle<Object> obj2) {
+ int length = array->Length();
+ array = EnsureSpace(isolate, array, length + 2);
+ // Check that GC didn't remove elements from the array.
+ DCHECK_EQ(array->Length(), length);
+ array->Set(length, *obj1);
+ array->Set(length + 1, *obj2);
+ array->SetLength(length + 2);
+ return array;
+}
+
+// static
+Handle<ArrayList> ArrayList::New(Isolate* isolate, int size) {
+ Handle<FixedArray> fixed_array =
+ isolate->factory()->NewFixedArray(size + kFirstIndex);
+ fixed_array->set_map_no_write_barrier(
+ ReadOnlyRoots(isolate).array_list_map());
+ Handle<ArrayList> result = Handle<ArrayList>::cast(fixed_array);
+ result->SetLength(0);
+ return result;
+}
+
+Handle<FixedArray> ArrayList::Elements(Isolate* isolate,
+ Handle<ArrayList> array) {
+ int length = array->Length();
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
+ // Do not copy the first entry, i.e., the length.
+ array->CopyTo(kFirstIndex, *result, 0, length);
+ return result;
+}
+
+namespace {
+
+Handle<FixedArray> EnsureSpaceInFixedArray(Isolate* isolate,
+ Handle<FixedArray> array,
+ int length) {
+ int capacity = array->length();
+ if (capacity < length) {
+ int new_capacity = length;
+ new_capacity = new_capacity + Max(new_capacity / 2, 2);
+ int grow_by = new_capacity - capacity;
+ array = isolate->factory()->CopyFixedArrayAndGrow(array, grow_by);
+ }
+ return array;
+}
+
+} // namespace
+
+// static
+Handle<ArrayList> ArrayList::EnsureSpace(Isolate* isolate,
+ Handle<ArrayList> array, int length) {
+ const bool empty = (array->length() == 0);
+ Handle<FixedArray> ret =
+ EnsureSpaceInFixedArray(isolate, array, kFirstIndex + length);
+ if (empty) {
+ ret->set_map_no_write_barrier(array->GetReadOnlyRoots().array_list_map());
+
+ Handle<ArrayList>::cast(ret)->SetLength(0);
+ }
+ return Handle<ArrayList>::cast(ret);
+}
+
+// static
+Handle<WeakArrayList> WeakArrayList::AddToEnd(Isolate* isolate,
+ Handle<WeakArrayList> array,
+ const MaybeObjectHandle& value) {
+ int length = array->length();
+ array = EnsureSpace(isolate, array, length + 1);
+ // Reload length; GC might have removed elements from the array.
+ length = array->length();
+ array->Set(length, *value);
+ array->set_length(length + 1);
+ return array;
+}
+
+bool WeakArrayList::IsFull() { return length() == capacity(); }
+
+// static
+Handle<WeakArrayList> WeakArrayList::EnsureSpace(Isolate* isolate,
+ Handle<WeakArrayList> array,
+ int length,
+ AllocationType allocation) {
+ int capacity = array->capacity();
+ if (capacity < length) {
+ int new_capacity = length;
+ new_capacity = new_capacity + Max(new_capacity / 2, 2);
+ int grow_by = new_capacity - capacity;
+ array = isolate->factory()->CopyWeakArrayListAndGrow(array, grow_by,
+ allocation);
+ }
+ return array;
+}
+
+int WeakArrayList::CountLiveWeakReferences() const {
+ int live_weak_references = 0;
+ for (int i = 0; i < length(); i++) {
+ if (Get(i)->IsWeak()) {
+ ++live_weak_references;
+ }
+ }
+ return live_weak_references;
+}
+
+bool WeakArrayList::RemoveOne(const MaybeObjectHandle& value) {
+ if (length() == 0) return false;
+ // Optimize for the most recently added element to be removed again.
+ MaybeObject cleared_weak_ref =
+ HeapObjectReference::ClearedValue(GetIsolate());
+ int last_index = length() - 1;
+ for (int i = last_index; i >= 0; --i) {
+ if (Get(i) == *value) {
+ // Move the last element into the this slot (or no-op, if this is the
+ // last slot).
+ Set(i, Get(last_index));
+ Set(last_index, cleared_weak_ref);
+ set_length(last_index);
+ return true;
+ }
+ }
+ return false;
+}
+
+// static
+Handle<WeakArrayList> PrototypeUsers::Add(Isolate* isolate,
+ Handle<WeakArrayList> array,
+ Handle<Map> value,
+ int* assigned_index) {
+ int length = array->length();
+ if (length == 0) {
+ // Uninitialized WeakArrayList; need to initialize empty_slot_index.
+ array = WeakArrayList::EnsureSpace(isolate, array, kFirstIndex + 1);
+ set_empty_slot_index(*array, kNoEmptySlotsMarker);
+ array->Set(kFirstIndex, HeapObjectReference::Weak(*value));
+ array->set_length(kFirstIndex + 1);
+ if (assigned_index != nullptr) *assigned_index = kFirstIndex;
+ return array;
+ }
+
+ // If the array has unfilled space at the end, use it.
+ if (!array->IsFull()) {
+ array->Set(length, HeapObjectReference::Weak(*value));
+ array->set_length(length + 1);
+ if (assigned_index != nullptr) *assigned_index = length;
+ return array;
+ }
+
+ // If there are empty slots, use one of them.
+ int empty_slot = Smi::ToInt(empty_slot_index(*array));
+ if (empty_slot != kNoEmptySlotsMarker) {
+ DCHECK_GE(empty_slot, kFirstIndex);
+ CHECK_LT(empty_slot, array->length());
+ int next_empty_slot = array->Get(empty_slot).ToSmi().value();
+
+ array->Set(empty_slot, HeapObjectReference::Weak(*value));
+ if (assigned_index != nullptr) *assigned_index = empty_slot;
+
+ set_empty_slot_index(*array, next_empty_slot);
+ return array;
+ } else {
+ DCHECK_EQ(empty_slot, kNoEmptySlotsMarker);
+ }
+
+ // Array full and no empty slots. Grow the array.
+ array = WeakArrayList::EnsureSpace(isolate, array, length + 1);
+ array->Set(length, HeapObjectReference::Weak(*value));
+ array->set_length(length + 1);
+ if (assigned_index != nullptr) *assigned_index = length;
+ return array;
+}
+
+WeakArrayList PrototypeUsers::Compact(Handle<WeakArrayList> array, Heap* heap,
+ CompactionCallback callback,
+ AllocationType allocation) {
+ if (array->length() == 0) {
+ return *array;
+ }
+ int new_length = kFirstIndex + array->CountLiveWeakReferences();
+ if (new_length == array->length()) {
+ return *array;
+ }
+
+ Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
+ heap->isolate(),
+ handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
+ new_length, allocation);
+ // Allocation might have caused GC and turned some of the elements into
+ // cleared weak heap objects. Count the number of live objects again.
+ int copy_to = kFirstIndex;
+ for (int i = kFirstIndex; i < array->length(); i++) {
+ MaybeObject element = array->Get(i);
+ HeapObject value;
+ if (element->GetHeapObjectIfWeak(&value)) {
+ callback(value, i, copy_to);
+ new_array->Set(copy_to++, element);
+ } else {
+ DCHECK(element->IsCleared() || element->IsSmi());
+ }
+ }
+ new_array->set_length(copy_to);
+ set_empty_slot_index(*new_array, kNoEmptySlotsMarker);
+ return *new_array;
+}
+
+Handle<RegExpMatchInfo> RegExpMatchInfo::ReserveCaptures(
+ Isolate* isolate, Handle<RegExpMatchInfo> match_info, int capture_count) {
+ DCHECK_GE(match_info->length(), kLastMatchOverhead);
+ const int required_length = kFirstCaptureIndex + capture_count;
+ return Handle<RegExpMatchInfo>::cast(
+ EnsureSpaceInFixedArray(isolate, match_info, required_length));
+}
+
+// static
+Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
+ Handle<Object> receiver,
+ Handle<JSFunction> function,
+ Handle<AbstractCode> code,
+ int offset, int flags,
+ Handle<FixedArray> parameters) {
+ const int frame_count = in->FrameCount();
+ const int new_length = LengthFor(frame_count + 1);
+ Handle<FrameArray> array =
+ EnsureSpace(function->GetIsolate(), in, new_length);
+ array->SetReceiver(frame_count, *receiver);
+ array->SetFunction(frame_count, *function);
+ array->SetCode(frame_count, *code);
+ array->SetOffset(frame_count, Smi::FromInt(offset));
+ array->SetFlags(frame_count, Smi::FromInt(flags));
+ array->SetParameters(frame_count, *parameters);
+ array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
+ return array;
+}
+
+// static
+Handle<FrameArray> FrameArray::AppendWasmFrame(
+ Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
+ int wasm_function_index, wasm::WasmCode* code, int offset, int flags) {
+ Isolate* isolate = wasm_instance->GetIsolate();
+ const int frame_count = in->FrameCount();
+ const int new_length = LengthFor(frame_count + 1);
+ Handle<FrameArray> array = EnsureSpace(isolate, in, new_length);
+ // The {code} will be {nullptr} for interpreted wasm frames.
+ Handle<Object> code_ref = isolate->factory()->undefined_value();
+ if (code) {
+ auto native_module = wasm_instance->module_object().shared_native_module();
+ code_ref = Managed<wasm::GlobalWasmCodeRef>::Allocate(
+ isolate, 0, code, std::move(native_module));
+ }
+ array->SetWasmInstance(frame_count, *wasm_instance);
+ array->SetWasmFunctionIndex(frame_count, Smi::FromInt(wasm_function_index));
+ array->SetWasmCodeObject(frame_count, *code_ref);
+ array->SetOffset(frame_count, Smi::FromInt(offset));
+ array->SetFlags(frame_count, Smi::FromInt(flags));
+ array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
+ return array;
+}
+
+void FrameArray::ShrinkToFit(Isolate* isolate) {
+ Shrink(isolate, LengthFor(FrameCount()));
+}
+
+// static
+Handle<FrameArray> FrameArray::EnsureSpace(Isolate* isolate,
+ Handle<FrameArray> array,
+ int length) {
+ return Handle<FrameArray>::cast(
+ EnsureSpaceInFixedArray(isolate, array, length));
+}
+
+Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
+ int nof_descriptors,
+ int slack,
+ AllocationType allocation) {
+ return nof_descriptors + slack == 0
+ ? isolate->factory()->empty_descriptor_array()
+ : isolate->factory()->NewDescriptorArray(nof_descriptors, slack,
+ allocation);
+}
+
+void DescriptorArray::Initialize(EnumCache enum_cache,
+ HeapObject undefined_value,
+ int nof_descriptors, int slack) {
+ DCHECK_GE(nof_descriptors, 0);
+ DCHECK_GE(slack, 0);
+ DCHECK_LE(nof_descriptors + slack, kMaxNumberOfDescriptors);
+ set_number_of_all_descriptors(nof_descriptors + slack);
+ set_number_of_descriptors(nof_descriptors);
+ set_raw_number_of_marked_descriptors(0);
+ set_filler16bits(0);
+ set_enum_cache(enum_cache);
+ MemsetTagged(GetDescriptorSlot(0), undefined_value,
+ number_of_all_descriptors() * kEntrySize);
+}
+
+void DescriptorArray::ClearEnumCache() {
+ set_enum_cache(GetReadOnlyRoots().empty_enum_cache());
+}
+
+void DescriptorArray::Replace(int index, Descriptor* descriptor) {
+ descriptor->SetSortedKeyIndex(GetSortedKeyIndex(index));
+ Set(index, descriptor);
+}
+
+// static
+void DescriptorArray::InitializeOrChangeEnumCache(
+ Handle<DescriptorArray> descriptors, Isolate* isolate,
+ Handle<FixedArray> keys, Handle<FixedArray> indices) {
+ EnumCache enum_cache = descriptors->enum_cache();
+ if (enum_cache == ReadOnlyRoots(isolate).empty_enum_cache()) {
+ enum_cache = *isolate->factory()->NewEnumCache(keys, indices);
+ descriptors->set_enum_cache(enum_cache);
+ } else {
+ enum_cache.set_keys(*keys);
+ enum_cache.set_indices(*indices);
+ }
+}
+
+void DescriptorArray::CopyFrom(int index, DescriptorArray src) {
+ PropertyDetails details = src.GetDetails(index);
+ Set(index, src.GetKey(index), src.GetValue(index), details);
+}
+
+void DescriptorArray::Sort() {
+ // In-place heap sort.
+ int len = number_of_descriptors();
+ // Reset sorting since the descriptor array might contain invalid pointers.
+ for (int i = 0; i < len; ++i) SetSortedKey(i, i);
+ // Bottom-up max-heap construction.
+ // Index of the last node with children
+ const int max_parent_index = (len / 2) - 1;
+ for (int i = max_parent_index; i >= 0; --i) {
+ int parent_index = i;
+ const uint32_t parent_hash = GetSortedKey(i).Hash();
+ while (parent_index <= max_parent_index) {
+ int child_index = 2 * parent_index + 1;
+ uint32_t child_hash = GetSortedKey(child_index).Hash();
+ if (child_index + 1 < len) {
+ uint32_t right_child_hash = GetSortedKey(child_index + 1).Hash();
+ if (right_child_hash > child_hash) {
+ child_index++;
+ child_hash = right_child_hash;
+ }
+ }
+ if (child_hash <= parent_hash) break;
+ SwapSortedKeys(parent_index, child_index);
+ // Now element at child_index could be < its children.
+ parent_index = child_index; // parent_hash remains correct.
+ }
+ }
+
+ // Extract elements and create sorted array.
+ for (int i = len - 1; i > 0; --i) {
+ // Put max element at the back of the array.
+ SwapSortedKeys(0, i);
+ // Shift down the new top element.
+ int parent_index = 0;
+ const uint32_t parent_hash = GetSortedKey(parent_index).Hash();
+ const int max_parent_index = (i / 2) - 1;
+ while (parent_index <= max_parent_index) {
+ int child_index = parent_index * 2 + 1;
+ uint32_t child_hash = GetSortedKey(child_index).Hash();
+ if (child_index + 1 < i) {
+ uint32_t right_child_hash = GetSortedKey(child_index + 1).Hash();
+ if (right_child_hash > child_hash) {
+ child_index++;
+ child_hash = right_child_hash;
+ }
+ }
+ if (child_hash <= parent_hash) break;
+ SwapSortedKeys(parent_index, child_index);
+ parent_index = child_index;
+ }
+ }
+ DCHECK(IsSortedNoDuplicates());
+}
+
+int16_t DescriptorArray::UpdateNumberOfMarkedDescriptors(
+ unsigned mark_compact_epoch, int16_t new_marked) {
+ STATIC_ASSERT(kMaxNumberOfDescriptors <=
+ NumberOfMarkedDescriptors::kMaxNumberOfMarkedDescriptors);
+ int16_t old_raw_marked = raw_number_of_marked_descriptors();
+ int16_t old_marked =
+ NumberOfMarkedDescriptors::decode(mark_compact_epoch, old_raw_marked);
+ int16_t new_raw_marked =
+ NumberOfMarkedDescriptors::encode(mark_compact_epoch, new_marked);
+ while (old_marked < new_marked) {
+ int16_t actual_raw_marked = CompareAndSwapRawNumberOfMarkedDescriptors(
+ old_raw_marked, new_raw_marked);
+ if (actual_raw_marked == old_raw_marked) {
+ break;
+ }
+ old_raw_marked = actual_raw_marked;
+ old_marked =
+ NumberOfMarkedDescriptors::decode(mark_compact_epoch, old_raw_marked);
+ }
+ return old_marked;
+}
+
+Handle<AccessorPair> AccessorPair::Copy(Isolate* isolate,
+ Handle<AccessorPair> pair) {
+ Handle<AccessorPair> copy = isolate->factory()->NewAccessorPair();
+ copy->set_getter(pair->getter());
+ copy->set_setter(pair->setter());
+ return copy;
+}
+
+Handle<Object> AccessorPair::GetComponent(Isolate* isolate,
+ Handle<AccessorPair> accessor_pair,
+ AccessorComponent component) {
+ Object accessor = accessor_pair->get(component);
+ if (accessor.IsFunctionTemplateInfo()) {
+ return ApiNatives::InstantiateFunction(
+ handle(FunctionTemplateInfo::cast(accessor), isolate))
+ .ToHandleChecked();
+ }
+ if (accessor.IsNull(isolate)) {
+ return isolate->factory()->undefined_value();
+ }
+ return handle(accessor, isolate);
+}
+
+#ifdef DEBUG
+bool DescriptorArray::IsEqualTo(DescriptorArray other) {
+ if (number_of_all_descriptors() != other.number_of_all_descriptors()) {
+ return false;
+ }
+ for (int i = 0; i < number_of_all_descriptors(); ++i) {
+ if (get(i) != other.get(i)) return false;
+ }
+ return true;
+}
+#endif
+
+// static
+MaybeHandle<String> Name::ToFunctionName(Isolate* isolate, Handle<Name> name) {
+ if (name->IsString()) return Handle<String>::cast(name);
+ // ES6 section 9.2.11 SetFunctionName, step 4.
+ Handle<Object> description(Handle<Symbol>::cast(name)->name(), isolate);
+ if (description->IsUndefined(isolate)) {
+ return isolate->factory()->empty_string();
+ }
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCharacter('[');
+ builder.AppendString(Handle<String>::cast(description));
+ builder.AppendCharacter(']');
+ return builder.Finish();
+}
+
+// static
+MaybeHandle<String> Name::ToFunctionName(Isolate* isolate, Handle<Name> name,
+ Handle<String> prefix) {
+ Handle<String> name_string;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name_string,
+ ToFunctionName(isolate, name), String);
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(prefix);
+ builder.AppendCharacter(' ');
+ builder.AppendString(name_string);
+ return builder.Finish();
+}
+
+void Relocatable::PostGarbageCollectionProcessing(Isolate* isolate) {
+ Relocatable* current = isolate->relocatable_top();
+ while (current != nullptr) {
+ current->PostGarbageCollection();
+ current = current->prev_;
+ }
+}
+
+// Reserve space for statics needing saving and restoring.
+int Relocatable::ArchiveSpacePerThread() {
+ return sizeof(Relocatable*); // NOLINT
+}
+
+// Archive statics that are thread-local.
+char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
+ *reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top();
+ isolate->set_relocatable_top(nullptr);
+ return to + ArchiveSpacePerThread();
+}
+
+// Restore statics that are thread-local.
+char* Relocatable::RestoreState(Isolate* isolate, char* from) {
+ isolate->set_relocatable_top(*reinterpret_cast<Relocatable**>(from));
+ return from + ArchiveSpacePerThread();
+}
+
+char* Relocatable::Iterate(RootVisitor* v, char* thread_storage) {
+ Relocatable* top = *reinterpret_cast<Relocatable**>(thread_storage);
+ Iterate(v, top);
+ return thread_storage + ArchiveSpacePerThread();
+}
+
+void Relocatable::Iterate(Isolate* isolate, RootVisitor* v) {
+ Iterate(v, isolate->relocatable_top());
+}
+
+void Relocatable::Iterate(RootVisitor* v, Relocatable* top) {
+ Relocatable* current = top;
+ while (current != nullptr) {
+ current->IterateInstance(v);
+ current = current->prev_;
+ }
+}
+
+namespace {
+
+template <typename sinkchar>
+void WriteFixedArrayToFlat(FixedArray fixed_array, int length, String separator,
+ sinkchar* sink, int sink_length) {
+ DisallowHeapAllocation no_allocation;
+ CHECK_GT(length, 0);
+ CHECK_LE(length, fixed_array.length());
+#ifdef DEBUG
+ sinkchar* sink_end = sink + sink_length;
+#endif
+
+ const int separator_length = separator.length();
+ const bool use_one_byte_separator_fast_path =
+ separator_length == 1 && sizeof(sinkchar) == 1 &&
+ StringShape(separator).IsSequentialOneByte();
+ uint8_t separator_one_char;
+ if (use_one_byte_separator_fast_path) {
+ CHECK(StringShape(separator).IsSequentialOneByte());
+ CHECK_EQ(separator.length(), 1);
+ separator_one_char =
+ SeqOneByteString::cast(separator).GetChars(no_allocation)[0];
+ }
+
+ uint32_t num_separators = 0;
+ for (int i = 0; i < length; i++) {
+ Object element = fixed_array.get(i);
+ const bool element_is_separator_sequence = element.IsSmi();
+
+ // If element is a Smi, it represents the number of separators to write.
+ if (V8_UNLIKELY(element_is_separator_sequence)) {
+ CHECK(element.ToUint32(&num_separators));
+ // Verify that Smis (number of separators) only occur when necessary:
+ // 1) at the beginning
+ // 2) at the end
+ // 3) when the number of separators > 1
+ // - It is assumed that consecutive Strings will have one separator,
+ // so there is no need for a Smi.
+ DCHECK(i == 0 || i == length - 1 || num_separators > 1);
+ }
+
+ // Write separator(s) if necessary.
+ if (num_separators > 0 && separator_length > 0) {
+ // TODO(pwong): Consider doubling strategy employed by runtime-strings.cc
+ // WriteRepeatToFlat().
+ // Fast path for single character, single byte separators.
+ if (use_one_byte_separator_fast_path) {
+ DCHECK_LE(sink + num_separators, sink_end);
+ memset(sink, separator_one_char, num_separators);
+ DCHECK_EQ(separator_length, 1);
+ sink += num_separators;
+ } else {
+ for (uint32_t j = 0; j < num_separators; j++) {
+ DCHECK_LE(sink + separator_length, sink_end);
+ String::WriteToFlat(separator, sink, 0, separator_length);
+ sink += separator_length;
+ }
+ }
+ }
+
+ if (V8_UNLIKELY(element_is_separator_sequence)) {
+ num_separators = 0;
+ } else {
+ DCHECK(element.IsString());
+ String string = String::cast(element);
+ const int string_length = string.length();
+
+ DCHECK(string_length == 0 || sink < sink_end);
+ String::WriteToFlat(string, sink, 0, string_length);
+ sink += string_length;
+
+ // Next string element, needs at least one separator preceding it.
+ num_separators = 1;
+ }
+ }
+
+ // Verify we have written to the end of the sink.
+ DCHECK_EQ(sink, sink_end);
+}
+
+} // namespace
+
+// static
+Address JSArray::ArrayJoinConcatToSequentialString(Isolate* isolate,
+ Address raw_fixed_array,
+ intptr_t length,
+ Address raw_separator,
+ Address raw_dest) {
+ DisallowHeapAllocation no_allocation;
+ DisallowJavascriptExecution no_js(isolate);
+ FixedArray fixed_array = FixedArray::cast(Object(raw_fixed_array));
+ String separator = String::cast(Object(raw_separator));
+ String dest = String::cast(Object(raw_dest));
+ DCHECK(fixed_array.IsFixedArray());
+ DCHECK(StringShape(dest).IsSequentialOneByte() ||
+ StringShape(dest).IsSequentialTwoByte());
+
+ if (StringShape(dest).IsSequentialOneByte()) {
+ WriteFixedArrayToFlat(fixed_array, static_cast<int>(length), separator,
+ SeqOneByteString::cast(dest).GetChars(no_allocation),
+ dest.length());
+ } else {
+ DCHECK(StringShape(dest).IsSequentialTwoByte());
+ WriteFixedArrayToFlat(fixed_array, static_cast<int>(length), separator,
+ SeqTwoByteString::cast(dest).GetChars(no_allocation),
+ dest.length());
+ }
+ return dest.ptr();
+}
+
+uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
+ // For array indexes mix the length into the hash as an array index could
+ // be zero.
+ DCHECK_GT(length, 0);
+ DCHECK_LE(length, String::kMaxArrayIndexSize);
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+
+ value <<= String::ArrayIndexValueBits::kShift;
+ value |= length << String::ArrayIndexLengthBits::kShift;
+
+ DCHECK_EQ(value & String::kIsNotArrayIndexMask, 0);
+ DCHECK_EQ(length <= String::kMaxCachedArrayIndexLength,
+ Name::ContainsCachedArrayIndex(value));
+ return value;
+}
+
+Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
+ Handle<Map> initial_map) {
+ // Replace all of the cached initial array maps in the native context with
+ // the appropriate transitioned elements kind maps.
+ Handle<Map> current_map = initial_map;
+ ElementsKind kind = current_map->elements_kind();
+ DCHECK_EQ(GetInitialFastElementsKind(), kind);
+ native_context->set(Context::ArrayMapIndex(kind), *current_map);
+ for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
+ i < kFastElementsKindCount; ++i) {
+ Handle<Map> new_map;
+ ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
+ Map maybe_elements_transition = current_map->ElementsTransitionMap();
+ if (!maybe_elements_transition.is_null()) {
+ new_map = handle(maybe_elements_transition, native_context->GetIsolate());
+ } else {
+ new_map =
+ Map::CopyAsElementsKind(native_context->GetIsolate(), current_map,
+ next_kind, INSERT_TRANSITION);
+ }
+ DCHECK_EQ(next_kind, new_map->elements_kind());
+ native_context->set(Context::ArrayMapIndex(next_kind), *new_map);
+ current_map = new_map;
+ }
+ return initial_map;
+}
+
+STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+
+void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
+ const char* to_string, Handle<Object> to_number,
+ const char* type_of, byte kind) {
+ Handle<String> internalized_to_string =
+ isolate->factory()->InternalizeUtf8String(to_string);
+ Handle<String> internalized_type_of =
+ isolate->factory()->InternalizeUtf8String(type_of);
+ if (to_number->IsHeapNumber()) {
+ oddball->set_to_number_raw_as_bits(
+ Handle<HeapNumber>::cast(to_number)->value_as_bits());
+ } else {
+ oddball->set_to_number_raw(to_number->Number());
+ }
+ oddball->set_to_number(*to_number);
+ oddball->set_to_string(*internalized_to_string);
+ oddball->set_type_of(*internalized_type_of);
+ oddball->set_kind(kind);
+}
+
+// static
+int Script::GetEvalPosition(Isolate* isolate, Handle<Script> script) {
+ DCHECK(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
+ int position = script->eval_from_position();
+ if (position < 0) {
+ // Due to laziness, the position may not have been translated from code
+ // offset yet, which would be encoded as negative integer. In that case,
+ // translate and set the position.
+ if (!script->has_eval_from_shared()) {
+ position = 0;
+ } else {
+ Handle<SharedFunctionInfo> shared =
+ handle(script->eval_from_shared(), isolate);
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared);
+ position = shared->abstract_code().SourcePosition(-position);
+ }
+ DCHECK_GE(position, 0);
+ script->set_eval_from_position(position);
+ }
+ return position;
+}
+
+void Script::InitLineEnds(Handle<Script> script) {
+ Isolate* isolate = script->GetIsolate();
+ if (!script->line_ends().IsUndefined(isolate)) return;
+ DCHECK(script->type() != Script::TYPE_WASM ||
+ script->source_mapping_url().IsString());
+
+ Object src_obj = script->source();
+ if (!src_obj.IsString()) {
+ DCHECK(src_obj.IsUndefined(isolate));
+ script->set_line_ends(ReadOnlyRoots(isolate).empty_fixed_array());
+ } else {
+ DCHECK(src_obj.IsString());
+ Handle<String> src(String::cast(src_obj), isolate);
+ Handle<FixedArray> array = String::CalculateLineEnds(isolate, src, true);
+ script->set_line_ends(*array);
+ }
+
+ DCHECK(script->line_ends().IsFixedArray());
+}
+
+bool Script::GetPositionInfo(Handle<Script> script, int position,
+ PositionInfo* info, OffsetFlag offset_flag) {
+ // For wasm, we do not create an artificial line_ends array, but do the
+ // translation directly.
+ if (script->type() != Script::TYPE_WASM) InitLineEnds(script);
+ return script->GetPositionInfo(position, info, offset_flag);
+}
+
+bool Script::IsUserJavaScript() { return type() == Script::TYPE_NORMAL; }
+
+bool Script::ContainsAsmModule() {
+ DisallowHeapAllocation no_gc;
+ SharedFunctionInfo::ScriptIterator iter(this->GetIsolate(), *this);
+ for (SharedFunctionInfo info = iter.Next(); !info.is_null();
+ info = iter.Next()) {
+ if (info.HasAsmWasmData()) return true;
+ }
+ return false;
+}
+
+namespace {
+bool GetPositionInfoSlow(const Script script, int position,
+ Script::PositionInfo* info) {
+ if (!script.source().IsString()) return false;
+ if (position < 0) position = 0;
+
+ String source_string = String::cast(script.source());
+ int line = 0;
+ int line_start = 0;
+ int len = source_string.length();
+ for (int pos = 0; pos <= len; ++pos) {
+ if (pos == len || source_string.Get(pos) == '\n') {
+ if (position <= pos) {
+ info->line = line;
+ info->column = position - line_start;
+ info->line_start = line_start;
+ info->line_end = pos;
+ return true;
+ }
+ line++;
+ line_start = pos + 1;
+ }
+ }
+ return false;
+}
+} // namespace
+
+#define SMI_VALUE(x) (Smi::ToInt(x))
+bool Script::GetPositionInfo(int position, PositionInfo* info,
+ OffsetFlag offset_flag) const {
+ DisallowHeapAllocation no_allocation;
+
+ // For wasm, we do not rely on the line_ends array, but do the translation
+ // directly.
+ if (type() == Script::TYPE_WASM) {
+ DCHECK_LE(0, position);
+ return WasmModuleObject::cast(wasm_module_object())
+ .GetPositionInfo(static_cast<uint32_t>(position), info);
+ }
+
+ if (line_ends().IsUndefined()) {
+ // Slow mode: we do not have line_ends. We have to iterate through source.
+ if (!GetPositionInfoSlow(*this, position, info)) return false;
+ } else {
+ DCHECK(line_ends().IsFixedArray());
+ FixedArray ends = FixedArray::cast(line_ends());
+
+ const int ends_len = ends.length();
+ if (ends_len == 0) return false;
+
+ // Return early on invalid positions. Negative positions behave as if 0 was
+ // passed, and positions beyond the end of the script return as failure.
+ if (position < 0) {
+ position = 0;
+ } else if (position > SMI_VALUE(ends.get(ends_len - 1))) {
+ return false;
+ }
+
+ // Determine line number by doing a binary search on the line ends array.
+ if (SMI_VALUE(ends.get(0)) >= position) {
+ info->line = 0;
+ info->line_start = 0;
+ info->column = position;
+ } else {
+ int left = 0;
+ int right = ends_len - 1;
+
+ while (right > 0) {
+ DCHECK_LE(left, right);
+ const int mid = (left + right) / 2;
+ if (position > SMI_VALUE(ends.get(mid))) {
+ left = mid + 1;
+ } else if (position <= SMI_VALUE(ends.get(mid - 1))) {
+ right = mid - 1;
+ } else {
+ info->line = mid;
+ break;
+ }
+ }
+ DCHECK(SMI_VALUE(ends.get(info->line)) >= position &&
+ SMI_VALUE(ends.get(info->line - 1)) < position);
+ info->line_start = SMI_VALUE(ends.get(info->line - 1)) + 1;
+ info->column = position - info->line_start;
+ }
+
+ // Line end is position of the linebreak character.
+ info->line_end = SMI_VALUE(ends.get(info->line));
+ if (info->line_end > 0) {
+ DCHECK(source().IsString());
+ String src = String::cast(source());
+ if (src.length() >= info->line_end &&
+ src.Get(info->line_end - 1) == '\r') {
+ info->line_end--;
+ }
+ }
+ }
+
+ // Add offsets if requested.
+ if (offset_flag == WITH_OFFSET) {
+ if (info->line == 0) {
+ info->column += column_offset();
+ }
+ info->line += line_offset();
+ }
+
+ return true;
+}
+#undef SMI_VALUE
+
+int Script::GetColumnNumber(Handle<Script> script, int code_pos) {
+ PositionInfo info;
+ GetPositionInfo(script, code_pos, &info, WITH_OFFSET);
+ return info.column;
+}
+
+int Script::GetColumnNumber(int code_pos) const {
+ PositionInfo info;
+ GetPositionInfo(code_pos, &info, WITH_OFFSET);
+ return info.column;
+}
+
+int Script::GetLineNumber(Handle<Script> script, int code_pos) {
+ PositionInfo info;
+ GetPositionInfo(script, code_pos, &info, WITH_OFFSET);
+ return info.line;
+}
+
+int Script::GetLineNumber(int code_pos) const {
+ PositionInfo info;
+ GetPositionInfo(code_pos, &info, WITH_OFFSET);
+ return info.line;
+}
+
+Object Script::GetNameOrSourceURL() {
+ // Keep in sync with ScriptNameOrSourceURL in messages.js.
+ if (!source_url().IsUndefined()) return source_url();
+ return name();
+}
+
+MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
+ Isolate* isolate, const FunctionLiteral* fun) {
+ CHECK_NE(fun->function_literal_id(), kFunctionLiteralIdInvalid);
+ // If this check fails, the problem is most probably the function id
+ // renumbering done by AstFunctionLiteralIdReindexer; in particular, that
+ // AstTraversalVisitor doesn't recurse properly in the construct which
+ // triggers the mismatch.
+ CHECK_LT(fun->function_literal_id(), shared_function_infos().length());
+ MaybeObject shared = shared_function_infos().Get(fun->function_literal_id());
+ HeapObject heap_object;
+ if (!shared->GetHeapObject(&heap_object) ||
+ heap_object.IsUndefined(isolate)) {
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+ return handle(SharedFunctionInfo::cast(heap_object), isolate);
+}
+
+std::unique_ptr<v8::tracing::TracedValue> Script::ToTracedValue() {
+ auto value = v8::tracing::TracedValue::Create();
+ if (name().IsString()) {
+ value->SetString("name", String::cast(name()).ToCString());
+ }
+ value->SetInteger("lineOffset", line_offset());
+ value->SetInteger("columnOffset", column_offset());
+ if (source_mapping_url().IsString()) {
+ value->SetString("sourceMappingURL",
+ String::cast(source_mapping_url()).ToCString());
+ }
+ if (source().IsString()) {
+ value->SetString("source", String::cast(source()).ToCString());
+ }
+ return value;
+}
+
+// static
+const char* Script::kTraceScope = "v8::internal::Script";
+
+uint64_t Script::TraceID() const { return id(); }
+
+std::unique_ptr<v8::tracing::TracedValue> Script::TraceIDRef() const {
+ auto value = v8::tracing::TracedValue::Create();
+ std::ostringstream ost;
+ ost << "0x" << std::hex << TraceID();
+ value->SetString("id_ref", ost.str());
+ value->SetString("scope", kTraceScope);
+ return value;
+}
+
+Script::Iterator::Iterator(Isolate* isolate)
+ : iterator_(isolate->heap()->script_list()) {}
+
+Script Script::Iterator::Next() {
+ Object o = iterator_.Next();
+ if (o != Object()) {
+ return Script::cast(o);
+ }
+ return Script();
+}
+
+uint32_t SharedFunctionInfo::Hash() {
+ // Hash SharedFunctionInfo based on its start position and script id. Note: we
+ // don't use the function's literal id since getting that is slow for compiled
+ // funcitons.
+ int start_pos = StartPosition();
+ int script_id = script().IsScript() ? Script::cast(script()).id() : 0;
+ return static_cast<uint32_t>(base::hash_combine(start_pos, script_id));
+}
+
+std::unique_ptr<v8::tracing::TracedValue> SharedFunctionInfo::ToTracedValue(
+ FunctionLiteral* literal) {
+ auto value = v8::tracing::TracedValue::Create();
+ if (HasSharedName()) {
+ value->SetString("name", Name().ToCString());
+ }
+ if (HasInferredName()) {
+ value->SetString("inferredName", inferred_name().ToCString());
+ }
+ if (is_toplevel()) {
+ value->SetBoolean("isToplevel", true);
+ }
+ value->SetInteger("formalParameterCount", internal_formal_parameter_count());
+ value->SetString("languageMode", LanguageMode2String(language_mode()));
+ value->SetString("kind", FunctionKind2String(kind()));
+ if (script().IsScript()) {
+ value->SetValue("script", Script::cast(script()).TraceIDRef());
+ value->BeginDictionary("sourcePosition");
+ Script::PositionInfo info;
+ // We get the start position from the {literal} here, because the
+ // SharedFunctionInfo itself might not have a way to get to the
+ // start position early on (currently that's the case when it's
+ // marked for eager compilation).
+ if (Script::cast(script()).GetPositionInfo(literal->start_position(), &info,
+ Script::WITH_OFFSET)) {
+ value->SetInteger("line", info.line + 1);
+ value->SetInteger("column", info.column + 1);
+ }
+ value->EndDictionary();
+ }
+ return value;
+}
+
+// static
+const char* SharedFunctionInfo::kTraceScope =
+ "v8::internal::SharedFunctionInfo";
+
+uint64_t SharedFunctionInfo::TraceID() const {
+ // TODO(bmeurer): We use a combination of Script ID and function literal
+ // ID (within the Script) to uniquely identify SharedFunctionInfos. This
+ // can add significant overhead, and we should probably find a better way
+ // to uniquely identify SharedFunctionInfos over time.
+ Script script = Script::cast(this->script());
+ WeakFixedArray script_functions = script.shared_function_infos();
+ for (int i = 0; i < script_functions.length(); ++i) {
+ HeapObject script_function;
+ if (script_functions.Get(i).GetHeapObjectIfWeak(&script_function) &&
+ script_function.address() == address()) {
+ return (static_cast<uint64_t>(script.id() + 1) << 32) |
+ (static_cast<uint64_t>(i));
+ }
+ }
+ UNREACHABLE();
+}
+
+std::unique_ptr<v8::tracing::TracedValue> SharedFunctionInfo::TraceIDRef()
+ const {
+ auto value = v8::tracing::TracedValue::Create();
+ std::ostringstream ost;
+ ost << "0x" << std::hex << TraceID();
+ value->SetString("id_ref", ost.str());
+ value->SetString("scope", kTraceScope);
+ return value;
+}
+
+Code SharedFunctionInfo::GetCode() const {
+ // ======
+ // NOTE: This chain of checks MUST be kept in sync with the equivalent CSA
+ // GetSharedFunctionInfoCode method in code-stub-assembler.cc.
+ // ======
+
+ Isolate* isolate = GetIsolate();
+ Object data = function_data();
+ if (data.IsSmi()) {
+ // Holding a Smi means we are a builtin.
+ DCHECK(HasBuiltinId());
+ return isolate->builtins()->builtin(builtin_id());
+ } else if (data.IsBytecodeArray()) {
+ // Having a bytecode array means we are a compiled, interpreted function.
+ DCHECK(HasBytecodeArray());
+ return isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
+ } else if (data.IsAsmWasmData()) {
+ // Having AsmWasmData means we are an asm.js/wasm function.
+ DCHECK(HasAsmWasmData());
+ return isolate->builtins()->builtin(Builtins::kInstantiateAsmJs);
+ } else if (data.IsUncompiledData()) {
+ // Having uncompiled data (with or without scope) means we need to compile.
+ DCHECK(HasUncompiledData());
+ return isolate->builtins()->builtin(Builtins::kCompileLazy);
+ } else if (data.IsFunctionTemplateInfo()) {
+ // Having a function template info means we are an API function.
+ DCHECK(IsApiFunction());
+ return isolate->builtins()->builtin(Builtins::kHandleApiCall);
+ } else if (data.IsWasmExportedFunctionData()) {
+ // Having a WasmExportedFunctionData means the code is in there.
+ DCHECK(HasWasmExportedFunctionData());
+ return wasm_exported_function_data().wrapper_code();
+ } else if (data.IsInterpreterData()) {
+ Code code = InterpreterTrampoline();
+ DCHECK(code.IsCode());
+ DCHECK(code.is_interpreter_trampoline_builtin());
+ return code;
+ } else if (data.IsWasmJSFunctionData()) {
+ return wasm_js_function_data().wrapper_code();
+ } else if (data.IsWasmCapiFunctionData()) {
+ return wasm_capi_function_data().wrapper_code();
+ }
+ UNREACHABLE();
+}
+
+WasmExportedFunctionData SharedFunctionInfo::wasm_exported_function_data()
+ const {
+ DCHECK(HasWasmExportedFunctionData());
+ return WasmExportedFunctionData::cast(function_data());
+}
+
+WasmJSFunctionData SharedFunctionInfo::wasm_js_function_data() const {
+ DCHECK(HasWasmJSFunctionData());
+ return WasmJSFunctionData::cast(function_data());
+}
+
+WasmCapiFunctionData SharedFunctionInfo::wasm_capi_function_data() const {
+ DCHECK(HasWasmCapiFunctionData());
+ return WasmCapiFunctionData::cast(function_data());
+}
+
+SharedFunctionInfo::ScriptIterator::ScriptIterator(Isolate* isolate,
+ Script script)
+ : ScriptIterator(isolate, handle(script.shared_function_infos(), isolate)) {
+}
+
+SharedFunctionInfo::ScriptIterator::ScriptIterator(
+ Isolate* isolate, Handle<WeakFixedArray> shared_function_infos)
+ : isolate_(isolate),
+ shared_function_infos_(shared_function_infos),
+ index_(0) {}
+
+SharedFunctionInfo SharedFunctionInfo::ScriptIterator::Next() {
+ while (index_ < shared_function_infos_->length()) {
+ MaybeObject raw = shared_function_infos_->Get(index_++);
+ HeapObject heap_object;
+ if (!raw->GetHeapObject(&heap_object) ||
+ heap_object.IsUndefined(isolate_)) {
+ continue;
+ }
+ return SharedFunctionInfo::cast(heap_object);
+ }
+ return SharedFunctionInfo();
+}
+
+void SharedFunctionInfo::ScriptIterator::Reset(Script script) {
+ shared_function_infos_ = handle(script.shared_function_infos(), isolate_);
+ index_ = 0;
+}
+
+SharedFunctionInfo::GlobalIterator::GlobalIterator(Isolate* isolate)
+ : script_iterator_(isolate),
+ noscript_sfi_iterator_(isolate->heap()->noscript_shared_function_infos()),
+ sfi_iterator_(isolate, script_iterator_.Next()) {}
+
+SharedFunctionInfo SharedFunctionInfo::GlobalIterator::Next() {
+ HeapObject next = noscript_sfi_iterator_.Next();
+ if (!next.is_null()) return SharedFunctionInfo::cast(next);
+ for (;;) {
+ next = sfi_iterator_.Next();
+ if (!next.is_null()) return SharedFunctionInfo::cast(next);
+ Script next_script = script_iterator_.Next();
+ if (next_script.is_null()) return SharedFunctionInfo();
+ sfi_iterator_.Reset(next_script);
+ }
+}
+
+void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
+ Handle<Object> script_object,
+ int function_literal_id,
+ bool reset_preparsed_scope_data) {
+ if (shared->script() == *script_object) return;
+ Isolate* isolate = shared->GetIsolate();
+
+ if (reset_preparsed_scope_data &&
+ shared->HasUncompiledDataWithPreparseData()) {
+ shared->ClearPreparseData();
+ }
+
+ // Add shared function info to new script's list. If a collection occurs,
+ // the shared function info may be temporarily in two lists.
+ // This is okay because the gc-time processing of these lists can tolerate
+ // duplicates.
+ if (script_object->IsScript()) {
+ DCHECK(!shared->script().IsScript());
+ Handle<Script> script = Handle<Script>::cast(script_object);
+ Handle<WeakFixedArray> list =
+ handle(script->shared_function_infos(), isolate);
+#ifdef DEBUG
+ DCHECK_LT(function_literal_id, list->length());
+ MaybeObject maybe_object = list->Get(function_literal_id);
+ HeapObject heap_object;
+ if (maybe_object->GetHeapObjectIfWeak(&heap_object)) {
+ DCHECK_EQ(heap_object, *shared);
+ }
+#endif
+ list->Set(function_literal_id, HeapObjectReference::Weak(*shared));
+
+ // Remove shared function info from root array.
+ WeakArrayList noscript_list =
+ isolate->heap()->noscript_shared_function_infos();
+ CHECK(noscript_list.RemoveOne(MaybeObjectHandle::Weak(shared)));
+ } else {
+ DCHECK(shared->script().IsScript());
+ Handle<WeakArrayList> list =
+ isolate->factory()->noscript_shared_function_infos();
+
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ WeakArrayList::Iterator iterator(*list);
+ for (HeapObject next = iterator.Next(); !next.is_null();
+ next = iterator.Next()) {
+ DCHECK_NE(next, *shared);
+ }
+ }
+#endif // DEBUG
+
+ list =
+ WeakArrayList::AddToEnd(isolate, list, MaybeObjectHandle::Weak(shared));
+
+ isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list);
+
+ // Remove shared function info from old script's list.
+ Script old_script = Script::cast(shared->script());
+
+ // Due to liveedit, it might happen that the old_script doesn't know
+ // about the SharedFunctionInfo, so we have to guard against that.
+ Handle<WeakFixedArray> infos(old_script.shared_function_infos(), isolate);
+ if (function_literal_id < infos->length()) {
+ MaybeObject raw =
+ old_script.shared_function_infos().Get(function_literal_id);
+ HeapObject heap_object;
+ if (raw->GetHeapObjectIfWeak(&heap_object) && heap_object == *shared) {
+ old_script.shared_function_infos().Set(
+ function_literal_id, HeapObjectReference::Strong(
+ ReadOnlyRoots(isolate).undefined_value()));
+ }
+ }
+ }
+
+ // Finally set new script.
+ shared->set_script(*script_object);
+}
+
+bool SharedFunctionInfo::HasBreakInfo() const {
+ if (!HasDebugInfo()) return false;
+ DebugInfo info = GetDebugInfo();
+ bool has_break_info = info.HasBreakInfo();
+ return has_break_info;
+}
+
+bool SharedFunctionInfo::BreakAtEntry() const {
+ if (!HasDebugInfo()) return false;
+ DebugInfo info = GetDebugInfo();
+ bool break_at_entry = info.BreakAtEntry();
+ return break_at_entry;
+}
+
+bool SharedFunctionInfo::HasCoverageInfo() const {
+ if (!HasDebugInfo()) return false;
+ DebugInfo info = GetDebugInfo();
+ bool has_coverage_info = info.HasCoverageInfo();
+ return has_coverage_info;
+}
+
+CoverageInfo SharedFunctionInfo::GetCoverageInfo() const {
+ DCHECK(HasCoverageInfo());
+ return CoverageInfo::cast(GetDebugInfo().coverage_info());
+}
+
+String SharedFunctionInfo::DebugName() {
+ DisallowHeapAllocation no_gc;
+ String function_name = Name();
+ if (function_name.length() > 0) return function_name;
+ return inferred_name();
+}
+
+bool SharedFunctionInfo::PassesFilter(const char* raw_filter) {
+ Vector<const char> filter = CStrVector(raw_filter);
+ std::unique_ptr<char[]> cstrname(DebugName().ToCString());
+ return v8::internal::PassesFilter(CStrVector(cstrname.get()), filter);
+}
+
+bool SharedFunctionInfo::HasSourceCode() const {
+ Isolate* isolate = GetIsolate();
+ return !script().IsUndefined(isolate) &&
+ !Script::cast(script()).source().IsUndefined(isolate);
+}
+
+void SharedFunctionInfo::DiscardCompiledMetadata(
+ Isolate* isolate,
+ std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
+ gc_notify_updated_slot) {
+ DisallowHeapAllocation no_gc;
+ if (is_compiled()) {
+ HeapObject outer_scope_info;
+ if (scope_info().HasOuterScopeInfo()) {
+ outer_scope_info = scope_info().OuterScopeInfo();
+ } else {
+ outer_scope_info = ReadOnlyRoots(isolate).the_hole_value();
+ }
+
+ // Raw setter to avoid validity checks, since we're performing the unusual
+ // task of decompiling.
+ set_raw_outer_scope_info_or_feedback_metadata(outer_scope_info);
+ gc_notify_updated_slot(
+ *this,
+ RawField(SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset),
+ outer_scope_info);
+ } else {
+ DCHECK(outer_scope_info().IsScopeInfo() || outer_scope_info().IsTheHole());
+ }
+
+ // TODO(rmcilroy): Possibly discard ScopeInfo here as well.
+}
+
+// static
+void SharedFunctionInfo::DiscardCompiled(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
+ DCHECK(shared_info->CanDiscardCompiled());
+
+ Handle<String> inferred_name_val =
+ handle(shared_info->inferred_name(), isolate);
+ int start_position = shared_info->StartPosition();
+ int end_position = shared_info->EndPosition();
+ int function_literal_id = shared_info->FunctionLiteralId(isolate);
+
+ shared_info->DiscardCompiledMetadata(isolate);
+
+ // Replace compiled data with a new UncompiledData object.
+ if (shared_info->HasUncompiledDataWithPreparseData()) {
+ // If this is uncompiled data with a pre-parsed scope data, we can just
+ // clear out the scope data and keep the uncompiled data.
+ shared_info->ClearPreparseData();
+ } else {
+ // Create a new UncompiledData, without pre-parsed scope, and update the
+ // function data to point to it. Use the raw function data setter to avoid
+ // validity checks, since we're performing the unusual task of decompiling.
+ Handle<UncompiledData> data =
+ isolate->factory()->NewUncompiledDataWithoutPreparseData(
+ inferred_name_val, start_position, end_position,
+ function_literal_id);
+ shared_info->set_function_data(*data);
+ }
+}
+
+// static
+Handle<Object> SharedFunctionInfo::GetSourceCode(
+ Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate = shared->GetIsolate();
+ if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
+ Handle<String> source(String::cast(Script::cast(shared->script()).source()),
+ isolate);
+ return isolate->factory()->NewSubString(source, shared->StartPosition(),
+ shared->EndPosition());
+}
+
+// static
+Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
+ Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate = shared->GetIsolate();
+ if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
+ Handle<String> script_source(
+ String::cast(Script::cast(shared->script()).source()), isolate);
+ int start_pos = shared->function_token_position();
+ DCHECK_NE(start_pos, kNoSourcePosition);
+ Handle<String> source = isolate->factory()->NewSubString(
+ script_source, start_pos, shared->EndPosition());
+ if (!shared->is_wrapped()) return source;
+
+ DCHECK(!shared->name_should_print_as_anonymous());
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("function ");
+ builder.AppendString(Handle<String>(shared->Name(), isolate));
+ builder.AppendCString("(");
+ Handle<FixedArray> args(Script::cast(shared->script()).wrapped_arguments(),
+ isolate);
+ int argc = args->length();
+ for (int i = 0; i < argc; i++) {
+ if (i > 0) builder.AppendCString(", ");
+ builder.AppendString(Handle<String>(String::cast(args->get(i)), isolate));
+ }
+ builder.AppendCString(") {\n");
+ builder.AppendString(source);
+ builder.AppendCString("\n}");
+ return builder.Finish().ToHandleChecked();
+}
+
+namespace {
+void TraceInlining(SharedFunctionInfo shared, const char* msg) {
+ if (FLAG_trace_turbo_inlining) {
+ StdoutStream os;
+ os << Brief(shared) << ": IsInlineable? " << msg << "\n";
+ }
+}
+} // namespace
+
+bool SharedFunctionInfo::IsInlineable() {
+ if (!script().IsScript()) {
+ TraceInlining(*this, "false (no Script associated with it)");
+ return false;
+ }
+
+ if (GetIsolate()->is_precise_binary_code_coverage() &&
+ !has_reported_binary_coverage()) {
+ // We may miss invocations if this function is inlined.
+ TraceInlining(*this, "false (requires reported binary coverage)");
+ return false;
+ }
+
+ if (optimization_disabled()) {
+ TraceInlining(*this, "false (optimization disabled)");
+ return false;
+ }
+
+ // Built-in functions are handled by the JSCallReducer.
+ if (HasBuiltinId()) {
+ TraceInlining(*this, "false (is a builtin)");
+ return false;
+ }
+
+ if (!IsUserJavaScript()) {
+ TraceInlining(*this, "false (is not user code)");
+ return false;
+ }
+
+ // If there is no bytecode array, it is either not compiled or it is compiled
+ // with WebAssembly for the asm.js pipeline. In either case we don't want to
+ // inline.
+ if (!HasBytecodeArray()) {
+ TraceInlining(*this, "false (has no BytecodeArray)");
+ return false;
+ }
+
+ if (GetBytecodeArray().length() > FLAG_max_inlined_bytecode_size) {
+ TraceInlining(*this, "false (length > FLAG_max_inlined_bytecode_size)");
+ return false;
+ }
+
+ if (HasBreakInfo()) {
+ TraceInlining(*this, "false (may contain break points)");
+ return false;
+ }
+
+ TraceInlining(*this, "true");
+ return true;
+}
+
+int SharedFunctionInfo::SourceSize() { return EndPosition() - StartPosition(); }
+
+int SharedFunctionInfo::FindIndexInScript(Isolate* isolate) const {
+ DisallowHeapAllocation no_gc;
+
+ Object script_obj = script();
+ if (!script_obj.IsScript()) return kFunctionLiteralIdInvalid;
+
+ WeakFixedArray shared_info_list =
+ Script::cast(script_obj).shared_function_infos();
+ SharedFunctionInfo::ScriptIterator iterator(
+ isolate,
+ Handle<WeakFixedArray>(reinterpret_cast<Address*>(&shared_info_list)));
+
+ for (SharedFunctionInfo shared = iterator.Next(); !shared.is_null();
+ shared = iterator.Next()) {
+ if (shared == *this) {
+ return iterator.CurrentIndex();
+ }
+ }
+
+ return kFunctionLiteralIdInvalid;
+}
+
+// Output the source code without any allocation in the heap.
+std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
+ const SharedFunctionInfo s = v.value;
+ // For some native functions there is no source.
+ if (!s.HasSourceCode()) return os << "<No Source>";
+
+ // Get the source for the script which this function came from.
+ // Don't use String::cast because we don't want more assertion errors while
+ // we are already creating a stack dump.
+ String script_source =
+ String::unchecked_cast(Script::cast(s.script()).source());
+
+ if (!script_source.LooksValid()) return os << "<Invalid Source>";
+
+ if (!s.is_toplevel()) {
+ os << "function ";
+ String name = s.Name();
+ if (name.length() > 0) {
+ name.PrintUC16(os);
+ }
+ }
+
+ int len = s.EndPosition() - s.StartPosition();
+ if (len <= v.max_length || v.max_length < 0) {
+ script_source.PrintUC16(os, s.StartPosition(), s.EndPosition());
+ return os;
+ } else {
+ script_source.PrintUC16(os, s.StartPosition(),
+ s.StartPosition() + v.max_length);
+ return os << "...\n";
+ }
+}
+
+void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
+ DCHECK_NE(reason, BailoutReason::kNoReason);
+
+ set_flags(DisabledOptimizationReasonBits::update(flags(), reason));
+ // Code should be the lazy compilation stub or else interpreted.
+ DCHECK(abstract_code().kind() == AbstractCode::INTERPRETED_FUNCTION ||
+ abstract_code().kind() == AbstractCode::BUILTIN);
+ PROFILE(GetIsolate(), CodeDisableOptEvent(abstract_code(), *this));
+ if (FLAG_trace_opt) {
+ PrintF("[disabled optimization for ");
+ ShortPrint();
+ PrintF(", reason: %s]\n", GetBailoutReason(reason));
+ }
+}
+
+void SharedFunctionInfo::InitFromFunctionLiteral(
+ Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit,
+ bool is_toplevel) {
+ Isolate* isolate = shared_info->GetIsolate();
+ bool needs_position_info = true;
+
+ // When adding fields here, make sure DeclarationScope::AnalyzePartially is
+ // updated accordingly.
+ shared_info->set_internal_formal_parameter_count(lit->parameter_count());
+ shared_info->SetFunctionTokenPosition(lit->function_token_position(),
+ lit->start_position());
+ if (shared_info->scope_info().HasPositionInfo()) {
+ shared_info->scope_info().SetPositionInfo(lit->start_position(),
+ lit->end_position());
+ needs_position_info = false;
+ }
+ shared_info->set_is_declaration(lit->is_declaration());
+ shared_info->set_is_named_expression(lit->is_named_expression());
+ shared_info->set_is_anonymous_expression(lit->is_anonymous_expression());
+ shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+ shared_info->set_language_mode(lit->language_mode());
+ shared_info->set_is_wrapped(lit->is_wrapped());
+ // shared_info->set_kind(lit->kind());
+ // FunctionKind must have already been set.
+ DCHECK(lit->kind() == shared_info->kind());
+ shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
+ DCHECK_IMPLIES(lit->requires_instance_members_initializer(),
+ IsClassConstructor(lit->kind()));
+ shared_info->set_requires_instance_members_initializer(
+ lit->requires_instance_members_initializer());
+
+ shared_info->set_is_toplevel(is_toplevel);
+ DCHECK(shared_info->outer_scope_info().IsTheHole());
+ if (!is_toplevel) {
+ Scope* outer_scope = lit->scope()->GetOuterScopeWithContext();
+ if (outer_scope) {
+ shared_info->set_outer_scope_info(*outer_scope->scope_info());
+ }
+ }
+
+ shared_info->set_length(lit->function_length());
+
+ // For lazy parsed functions, the following flags will be inaccurate since we
+ // don't have the information yet. They're set later in
+ // SetSharedFunctionFlagsFromLiteral (compiler.cc), when the function is
+ // really parsed and compiled.
+ if (lit->ShouldEagerCompile()) {
+ shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
+ shared_info->UpdateAndFinalizeExpectedNofPropertiesFromEstimate(lit);
+ shared_info->set_is_safe_to_skip_arguments_adaptor(
+ lit->SafeToSkipArgumentsAdaptor());
+ DCHECK_NULL(lit->produced_preparse_data());
+ // If we're about to eager compile, we'll have the function literal
+ // available, so there's no need to wastefully allocate an uncompiled data.
+ // TODO(leszeks): This should be explicitly passed as a parameter, rather
+ // than relying on a property of the literal.
+ needs_position_info = false;
+ } else {
+ shared_info->set_is_safe_to_skip_arguments_adaptor(false);
+ ProducedPreparseData* scope_data = lit->produced_preparse_data();
+ if (scope_data != nullptr) {
+ Handle<PreparseData> preparse_data =
+ scope_data->Serialize(shared_info->GetIsolate());
+ Handle<UncompiledData> data =
+ isolate->factory()->NewUncompiledDataWithPreparseData(
+ lit->inferred_name(), lit->start_position(), lit->end_position(),
+ lit->function_literal_id(), preparse_data);
+ shared_info->set_uncompiled_data(*data);
+ needs_position_info = false;
+ }
+ shared_info->UpdateExpectedNofPropertiesFromEstimate(lit);
+ }
+ if (needs_position_info) {
+ Handle<UncompiledData> data =
+ isolate->factory()->NewUncompiledDataWithoutPreparseData(
+ lit->inferred_name(), lit->start_position(), lit->end_position(),
+ lit->function_literal_id());
+ shared_info->set_uncompiled_data(*data);
+ }
+}
+
+uint16_t SharedFunctionInfo::get_property_estimate_from_literal(
+ FunctionLiteral* literal) {
+ int estimate = literal->expected_property_count();
+
+ // If this is a class constructor, we may have already parsed fields.
+ if (is_class_constructor()) {
+ estimate += expected_nof_properties();
+ }
+ return estimate;
+}
+
+void SharedFunctionInfo::UpdateExpectedNofPropertiesFromEstimate(
+ FunctionLiteral* literal) {
+ set_expected_nof_properties(get_property_estimate_from_literal(literal));
+}
+
+void SharedFunctionInfo::UpdateAndFinalizeExpectedNofPropertiesFromEstimate(
+ FunctionLiteral* literal) {
+ DCHECK(literal->ShouldEagerCompile());
+ if (are_properties_final()) {
+ return;
+ }
+ int estimate = get_property_estimate_from_literal(literal);
+
+ // If no properties are added in the constructor, they are more likely
+ // to be added later.
+ if (estimate == 0) estimate = 2;
+
+ // Limit actual estimate to fit in a 8 bit field, we will never allocate
+ // more than this in any case.
+ STATIC_ASSERT(JSObject::kMaxInObjectProperties <= kMaxUInt8);
+ estimate = std::min(estimate, kMaxUInt8);
+
+ set_expected_nof_properties(estimate);
+ set_are_properties_final(true);
+}
+
+void SharedFunctionInfo::SetFunctionTokenPosition(int function_token_position,
+ int start_position) {
+ int offset;
+ if (function_token_position == kNoSourcePosition) {
+ offset = 0;
+ } else {
+ offset = start_position - function_token_position;
+ }
+
+ if (offset > kMaximumFunctionTokenOffset) {
+ offset = kFunctionTokenOutOfRange;
+ }
+ set_raw_function_token_offset(offset);
+}
+
+int SharedFunctionInfo::StartPosition() const {
+ Object maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info.IsScopeInfo()) {
+ ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
+ if (info.HasPositionInfo()) {
+ return info.StartPosition();
+ }
+ } else if (HasUncompiledData()) {
+ // Works with or without scope.
+ return uncompiled_data().start_position();
+ } else if (IsApiFunction() || HasBuiltinId()) {
+ DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
+ return 0;
+ }
+ return kNoSourcePosition;
+}
+
+int SharedFunctionInfo::EndPosition() const {
+ Object maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info.IsScopeInfo()) {
+ ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
+ if (info.HasPositionInfo()) {
+ return info.EndPosition();
+ }
+ } else if (HasUncompiledData()) {
+ // Works with or without scope.
+ return uncompiled_data().end_position();
+ } else if (IsApiFunction() || HasBuiltinId()) {
+ DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
+ return 0;
+ }
+ return kNoSourcePosition;
+}
+
+int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
+ // Fast path for the common case when the SFI is uncompiled and so the
+ // function literal id is already in the uncompiled data.
+ if (HasUncompiledData() && uncompiled_data().has_function_literal_id()) {
+ int id = uncompiled_data().function_literal_id();
+ // Make sure the id is what we should have found with the slow path.
+ DCHECK_EQ(id, FindIndexInScript(isolate));
+ return id;
+ }
+
+ // Otherwise, search for the function in the SFI's script's function list,
+ // and return its index in that list.
+ return FindIndexInScript(isolate);
+}
+
+void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
+ Object maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info.IsScopeInfo()) {
+ ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
+ if (info.HasPositionInfo()) {
+ info.SetPositionInfo(start_position, end_position);
+ }
+ } else if (HasUncompiledData()) {
+ if (HasUncompiledDataWithPreparseData()) {
+ // Clear out preparsed scope data, since the position setter invalidates
+ // any scope data.
+ ClearPreparseData();
+ }
+ uncompiled_data().set_start_position(start_position);
+ uncompiled_data().set_end_position(end_position);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+bool SharedFunctionInfo::AreSourcePositionsAvailable() const {
+ if (FLAG_enable_lazy_source_positions) {
+ return !HasBytecodeArray() || GetBytecodeArray().HasSourcePositionTable();
+ }
+ return true;
+}
+
+// static
+void SharedFunctionInfo::EnsureSourcePositionsAvailable(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
+ if (FLAG_enable_lazy_source_positions && shared_info->HasBytecodeArray() &&
+ !shared_info->GetBytecodeArray().HasSourcePositionTable()) {
+ Compiler::CollectSourcePositions(isolate, shared_info);
+ }
+}
+
+bool BytecodeArray::IsBytecodeEqual(const BytecodeArray other) const {
+ if (length() != other.length()) return false;
+
+ for (int i = 0; i < length(); ++i) {
+ if (get(i) != other.get(i)) return false;
+ }
+
+ return true;
+}
+
+// static
+void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
+ DCHECK_GE(capacity, 0);
+ array->GetIsolate()->factory()->NewJSArrayStorage(
+ array, length, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+}
+
+void JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
+ // We should never end in here with a pixel or external array.
+ DCHECK(array->AllowsSetLength());
+ if (array->SetLengthWouldNormalize(new_length)) {
+ JSObject::NormalizeElements(array);
+ }
+ array->GetElementsAccessor()->SetLength(array, new_length);
+}
+
+// ES6: 9.5.2 [[SetPrototypeOf]] (V)
+// static
+Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw) {
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(isolate, Nothing<bool>());
+ Handle<Name> trap_name = isolate->factory()->setPrototypeOf_string();
+ // 1. Assert: Either Type(V) is Object or Type(V) is Null.
+ DCHECK(value->IsJSReceiver() || value->IsNull(isolate));
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot.
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ // 6. Let trap be ? GetMethod(handler, "getPrototypeOf").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
+ Nothing<bool>());
+ // 7. If trap is undefined, then return target.[[SetPrototypeOf]]().
+ if (trap->IsUndefined(isolate)) {
+ return JSReceiver::SetPrototype(target, value, from_javascript,
+ should_throw);
+ }
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «target, V»)).
+ Handle<Object> argv[] = {target, value};
+ Handle<Object> trap_result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(argv), argv),
+ Nothing<bool>());
+ bool bool_trap_result = trap_result->BooleanValue(isolate);
+ // 9. If booleanTrapResult is false, return false.
+ if (!bool_trap_result) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+ }
+ // 10. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> is_extensible = JSReceiver::IsExtensible(target);
+ if (is_extensible.IsNothing()) return Nothing<bool>();
+ // 11. If extensibleTarget is true, return true.
+ if (is_extensible.FromJust()) {
+ if (bool_trap_result) return Just(true);
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+ }
+ // 12. Let targetProto be ? target.[[GetPrototypeOf]]().
+ Handle<Object> target_proto;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_proto,
+ JSReceiver::GetPrototype(isolate, target),
+ Nothing<bool>());
+ // 13. If SameValue(V, targetProto) is false, throw a TypeError exception.
+ if (bool_trap_result && !value->SameValue(*target_proto)) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetPrototypeOfNonExtensible));
+ return Nothing<bool>();
+ }
+ // 14. Return true.
+ return Just(true);
+}
+
+bool JSArray::SetLengthWouldNormalize(uint32_t new_length) {
+ if (!HasFastElements()) return false;
+ uint32_t capacity = static_cast<uint32_t>(elements().length());
+ uint32_t new_capacity;
+ return JSArray::SetLengthWouldNormalize(GetHeap(), new_length) &&
+ ShouldConvertToSlowElements(*this, capacity, new_length - 1,
+ &new_capacity);
+}
+
+const double AllocationSite::kPretenureRatio = 0.85;
+
+void AllocationSite::ResetPretenureDecision() {
+ set_pretenure_decision(kUndecided);
+ set_memento_found_count(0);
+ set_memento_create_count(0);
+}
+
+AllocationType AllocationSite::GetAllocationType() const {
+ PretenureDecision mode = pretenure_decision();
+ // Zombie objects "decide" to be untenured.
+ return mode == kTenure ? AllocationType::kOld : AllocationType::kYoung;
+}
+
+bool AllocationSite::IsNested() {
+ DCHECK(FLAG_trace_track_allocation_sites);
+ Object current = boilerplate().GetHeap()->allocation_sites_list();
+ while (current.IsAllocationSite()) {
+ AllocationSite current_site = AllocationSite::cast(current);
+ if (current_site.nested_site() == *this) {
+ return true;
+ }
+ current = current_site.weak_next();
+ }
+ return false;
+}
+
+bool AllocationSite::ShouldTrack(ElementsKind from, ElementsKind to) {
+ return IsSmiElementsKind(from) &&
+ IsMoreGeneralElementsKindTransition(from, to);
+}
+
+const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
+ switch (decision) {
+ case kUndecided:
+ return "undecided";
+ case kDontTenure:
+ return "don't tenure";
+ case kMaybeTenure:
+ return "maybe tenure";
+ case kTenure:
+ return "tenure";
+ case kZombie:
+ return "zombie";
+ default:
+ UNREACHABLE();
+ }
+ return nullptr;
+}
+
+bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
+ Map map = array->map();
+ // Fast path: "length" is the first fast property of arrays. Since it's not
+ // configurable, it's guaranteed to be the first in the descriptor array.
+ if (!map.is_dictionary_map()) {
+ DCHECK(map.instance_descriptors().GetKey(0) ==
+ array->GetReadOnlyRoots().length_string());
+ return map.instance_descriptors().GetDetails(0).IsReadOnly();
+ }
+
+ Isolate* isolate = array->GetIsolate();
+ LookupIterator it(array, isolate->factory()->length_string(), array,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_EQ(LookupIterator::ACCESSOR, it.state());
+ return it.IsReadOnly();
+}
+
+bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index) {
+ uint32_t length = 0;
+ CHECK(array->length().ToArrayLength(&length));
+ if (length <= index) return HasReadOnlyLength(array);
+ return false;
+}
+
+// Certain compilers request function template instantiation when they
+// see the definition of the other template functions in the
+// class. This requires us to have the template functions put
+// together, so even though this function belongs in objects-debug.cc,
+// we keep it here instead to satisfy certain compilers.
+#ifdef OBJECT_PRINT
+template <typename Derived, typename Shape>
+void Dictionary<Derived, Shape>::Print(std::ostream& os) {
+ DisallowHeapAllocation no_gc;
+ ReadOnlyRoots roots = this->GetReadOnlyRoots();
+ Derived dictionary = Derived::cast(*this);
+ int capacity = dictionary.Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object k = dictionary.KeyAt(i);
+ if (!dictionary.ToKey(roots, i, &k)) continue;
+ os << "\n ";
+ if (k.IsString()) {
+ String::cast(k).StringPrint(os);
+ } else {
+ os << Brief(k);
+ }
+ os << ": " << Brief(dictionary.ValueAt(i)) << " ";
+ dictionary.DetailsAt(i).PrintAsSlowTo(os);
+ }
+}
+template <typename Derived, typename Shape>
+void Dictionary<Derived, Shape>::Print() {
+ StdoutStream os;
+ Print(os);
+ os << std::endl;
+}
+#endif
+
+int FixedArrayBase::GetMaxLengthForNewSpaceAllocation(ElementsKind kind) {
+ return ((kMaxRegularHeapObjectSize - FixedArrayBase::kHeaderSize) >>
+ ElementsKindToShiftSize(kind));
+}
+
+bool FixedArrayBase::IsCowArray() const {
+ return map() == GetReadOnlyRoots().fixed_cow_array_map();
+}
+
+const char* Symbol::PrivateSymbolToName() const {
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+#define SYMBOL_CHECK_AND_PRINT(_, name) \
+ if (*this == roots.name()) return #name;
+ PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_CHECK_AND_PRINT, /* not used */)
+#undef SYMBOL_CHECK_AND_PRINT
+ return "UNKNOWN";
+}
+
+void Symbol::SymbolShortPrint(std::ostream& os) {
+ os << "<Symbol:";
+ if (!name().IsUndefined()) {
+ os << " ";
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ String::cast(name()).StringShortPrint(&accumulator, false);
+ os << accumulator.ToCString().get();
+ } else {
+ os << " (" << PrivateSymbolToName() << ")";
+ }
+ os << ">";
+}
+
+// StringSharedKeys are used as keys in the eval cache.
+class StringSharedKey : public HashTableKey {
+ public:
+ // This tuple unambiguously identifies calls to eval() or
+ // CreateDynamicFunction() (such as through the Function() constructor).
+ // * source is the string passed into eval(). For dynamic functions, this is
+ // the effective source for the function, some of which is implicitly
+ // generated.
+ // * shared is the shared function info for the function containing the call
+ // to eval(). for dynamic functions, shared is the native context closure.
+ // * When positive, position is the position in the source where eval is
+ // called. When negative, position is the negation of the position in the
+ // dynamic function's effective source where the ')' ends the parameters.
+ StringSharedKey(Handle<String> source, Handle<SharedFunctionInfo> shared,
+ LanguageMode language_mode, int position)
+ : HashTableKey(CompilationCacheShape::StringSharedHash(
+ *source, *shared, language_mode, position)),
+ source_(source),
+ shared_(shared),
+ language_mode_(language_mode),
+ position_(position) {}
+
+ bool IsMatch(Object other) override {
+ DisallowHeapAllocation no_allocation;
+ if (!other.IsFixedArray()) {
+ DCHECK(other.IsNumber());
+ uint32_t other_hash = static_cast<uint32_t>(other.Number());
+ return Hash() == other_hash;
+ }
+ FixedArray other_array = FixedArray::cast(other);
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(other_array.get(0));
+ if (shared != *shared_) return false;
+ int language_unchecked = Smi::ToInt(other_array.get(2));
+ DCHECK(is_valid_language_mode(language_unchecked));
+ LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+ if (language_mode != language_mode_) return false;
+ int position = Smi::ToInt(other_array.get(3));
+ if (position != position_) return false;
+ String source = String::cast(other_array.get(1));
+ return source.Equals(*source_);
+ }
+
+ Handle<Object> AsHandle(Isolate* isolate) {
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
+ array->set(0, *shared_);
+ array->set(1, *source_);
+ array->set(2, Smi::FromEnum(language_mode_));
+ array->set(3, Smi::FromInt(position_));
+ array->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
+ return array;
+ }
+
+ private:
+ Handle<String> source_;
+ Handle<SharedFunctionInfo> shared_;
+ LanguageMode language_mode_;
+ int position_;
+};
+
+v8::Promise::PromiseState JSPromise::status() const {
+ int value = flags() & kStatusMask;
+ DCHECK(value == 0 || value == 1 || value == 2);
+ return static_cast<v8::Promise::PromiseState>(value);
+}
+
+void JSPromise::set_status(Promise::PromiseState status) {
+ int value = flags() & ~kStatusMask;
+ set_flags(value | status);
+}
+
+// static
+const char* JSPromise::Status(v8::Promise::PromiseState status) {
+ switch (status) {
+ case v8::Promise::kFulfilled:
+ return "resolved";
+ case v8::Promise::kPending:
+ return "pending";
+ case v8::Promise::kRejected:
+ return "rejected";
+ }
+ UNREACHABLE();
+}
+
+int JSPromise::async_task_id() const {
+ return AsyncTaskIdField::decode(flags());
+}
+
+void JSPromise::set_async_task_id(int id) {
+ set_flags(AsyncTaskIdField::update(flags(), id));
+}
+
+// static
+Handle<Object> JSPromise::Fulfill(Handle<JSPromise> promise,
+ Handle<Object> value) {
+ Isolate* const isolate = promise->GetIsolate();
+
+ // 1. Assert: The value of promise.[[PromiseState]] is "pending".
+ CHECK_EQ(Promise::kPending, promise->status());
+
+ // 2. Let reactions be promise.[[PromiseFulfillReactions]].
+ Handle<Object> reactions(promise->reactions(), isolate);
+
+ // 3. Set promise.[[PromiseResult]] to value.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ promise->set_reactions_or_result(*value);
+
+ // 6. Set promise.[[PromiseState]] to "fulfilled".
+ promise->set_status(Promise::kFulfilled);
+
+ // 7. Return TriggerPromiseReactions(reactions, value).
+ return TriggerPromiseReactions(isolate, reactions, value,
+ PromiseReaction::kFulfill);
+}
+
+// static
+Handle<Object> JSPromise::Reject(Handle<JSPromise> promise,
+ Handle<Object> reason, bool debug_event) {
+ Isolate* const isolate = promise->GetIsolate();
+
+ if (debug_event) isolate->debug()->OnPromiseReject(promise, reason);
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
+
+ // 1. Assert: The value of promise.[[PromiseState]] is "pending".
+ CHECK_EQ(Promise::kPending, promise->status());
+
+ // 2. Let reactions be promise.[[PromiseRejectReactions]].
+ Handle<Object> reactions(promise->reactions(), isolate);
+
+ // 3. Set promise.[[PromiseResult]] to reason.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ promise->set_reactions_or_result(*reason);
+
+ // 6. Set promise.[[PromiseState]] to "rejected".
+ promise->set_status(Promise::kRejected);
+
+ // 7. If promise.[[PromiseIsHandled]] is false, perform
+ // HostPromiseRejectionTracker(promise, "reject").
+ if (!promise->has_handler()) {
+ isolate->ReportPromiseReject(promise, reason, kPromiseRejectWithNoHandler);
+ }
+
+ // 8. Return TriggerPromiseReactions(reactions, reason).
+ return TriggerPromiseReactions(isolate, reactions, reason,
+ PromiseReaction::kReject);
+}
+
+// static
+MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
+ Handle<Object> resolution) {
+ Isolate* const isolate = promise->GetIsolate();
+
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
+
+ // 6. If SameValue(resolution, promise) is true, then
+ if (promise.is_identical_to(resolution)) {
+ // a. Let selfResolutionError be a newly created TypeError object.
+ Handle<Object> self_resolution_error = isolate->factory()->NewTypeError(
+ MessageTemplate::kPromiseCyclic, resolution);
+ // b. Return RejectPromise(promise, selfResolutionError).
+ return Reject(promise, self_resolution_error);
+ }
+
+ // 7. If Type(resolution) is not Object, then
+ if (!resolution->IsJSReceiver()) {
+ // a. Return FulfillPromise(promise, resolution).
+ return Fulfill(promise, resolution);
+ }
+
+ // 8. Let then be Get(resolution, "then").
+ MaybeHandle<Object> then;
+ if (isolate->IsPromiseThenLookupChainIntact(
+ Handle<JSReceiver>::cast(resolution))) {
+ // We can skip the "then" lookup on {resolution} if its [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ then = isolate->promise_then();
+ } else {
+ then =
+ JSReceiver::GetProperty(isolate, Handle<JSReceiver>::cast(resolution),
+ isolate->factory()->then_string());
+ }
+
+ // 9. If then is an abrupt completion, then
+ Handle<Object> then_action;
+ if (!then.ToHandle(&then_action)) {
+ // a. Return RejectPromise(promise, then.[[Value]]).
+ Handle<Object> reason(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
+ return Reject(promise, reason, false);
+ }
+
+ // 10. Let thenAction be then.[[Value]].
+ // 11. If IsCallable(thenAction) is false, then
+ if (!then_action->IsCallable()) {
+ // a. Return FulfillPromise(promise, resolution).
+ return Fulfill(promise, resolution);
+ }
+
+ // 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
+ // «promise, resolution, thenAction»).
+ Handle<PromiseResolveThenableJobTask> task =
+ isolate->factory()->NewPromiseResolveThenableJobTask(
+ promise, Handle<JSReceiver>::cast(then_action),
+ Handle<JSReceiver>::cast(resolution), isolate->native_context());
+ if (isolate->debug()->is_active() && resolution->IsJSPromise()) {
+ // Mark the dependency of the new {promise} on the {resolution}.
+ Object::SetProperty(isolate, resolution,
+ isolate->factory()->promise_handled_by_symbol(),
+ promise)
+ .Check();
+ }
+ MicrotaskQueue* microtask_queue =
+ isolate->native_context()->microtask_queue();
+ if (microtask_queue) microtask_queue->EnqueueMicrotask(*task);
+
+ // 13. Return undefined.
+ return isolate->factory()->undefined_value();
+}
+
+// static
+Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
+ Handle<Object> reactions,
+ Handle<Object> argument,
+ PromiseReaction::Type type) {
+ CHECK(reactions->IsSmi() || reactions->IsPromiseReaction());
+
+ // We need to reverse the {reactions} here, since we record them
+ // on the JSPromise in the reverse order.
+ {
+ DisallowHeapAllocation no_gc;
+ Object current = *reactions;
+ Object reversed = Smi::kZero;
+ while (!current.IsSmi()) {
+ Object next = PromiseReaction::cast(current).next();
+ PromiseReaction::cast(current).set_next(reversed);
+ reversed = current;
+ current = next;
+ }
+ reactions = handle(reversed, isolate);
+ }
+
+ // Morph the {reactions} into PromiseReactionJobTasks
+ // and push them onto the microtask queue.
+ while (!reactions->IsSmi()) {
+ Handle<HeapObject> task = Handle<HeapObject>::cast(reactions);
+ Handle<PromiseReaction> reaction = Handle<PromiseReaction>::cast(task);
+ reactions = handle(reaction->next(), isolate);
+
+ Handle<NativeContext> handler_context;
+
+ Handle<HeapObject> primary_handler;
+ Handle<HeapObject> secondary_handler;
+ if (type == PromiseReaction::kFulfill) {
+ primary_handler = handle(reaction->fulfill_handler(), isolate);
+ secondary_handler = handle(reaction->reject_handler(), isolate);
+ } else {
+ primary_handler = handle(reaction->reject_handler(), isolate);
+ secondary_handler = handle(reaction->fulfill_handler(), isolate);
+ }
+
+ if (primary_handler->IsJSReceiver()) {
+ JSReceiver::GetContextForMicrotask(
+ Handle<JSReceiver>::cast(primary_handler))
+ .ToHandle(&handler_context);
+ }
+ if (handler_context.is_null() && secondary_handler->IsJSReceiver()) {
+ JSReceiver::GetContextForMicrotask(
+ Handle<JSReceiver>::cast(secondary_handler))
+ .ToHandle(&handler_context);
+ }
+ if (handler_context.is_null()) handler_context = isolate->native_context();
+
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kSize) ==
+ static_cast<int>(
+ PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks));
+ if (type == PromiseReaction::kFulfill) {
+ task->synchronized_set_map(
+ ReadOnlyRoots(isolate).promise_fulfill_reaction_job_task_map());
+ Handle<PromiseFulfillReactionJobTask>::cast(task)->set_argument(
+ *argument);
+ Handle<PromiseFulfillReactionJobTask>::cast(task)->set_context(
+ *handler_context);
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kFulfillHandlerOffset) ==
+ static_cast<int>(PromiseFulfillReactionJobTask::kHandlerOffset));
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kPromiseOrCapabilityOffset) ==
+ static_cast<int>(
+ PromiseFulfillReactionJobTask::kPromiseOrCapabilityOffset));
+ } else {
+ DisallowHeapAllocation no_gc;
+ task->synchronized_set_map(
+ ReadOnlyRoots(isolate).promise_reject_reaction_job_task_map());
+ Handle<PromiseRejectReactionJobTask>::cast(task)->set_argument(*argument);
+ Handle<PromiseRejectReactionJobTask>::cast(task)->set_context(
+ *handler_context);
+ Handle<PromiseRejectReactionJobTask>::cast(task)->set_handler(
+ *primary_handler);
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kPromiseOrCapabilityOffset) ==
+ static_cast<int>(
+ PromiseRejectReactionJobTask::kPromiseOrCapabilityOffset));
+ }
+
+ MicrotaskQueue* microtask_queue = handler_context->microtask_queue();
+ if (microtask_queue) {
+ microtask_queue->EnqueueMicrotask(
+ *Handle<PromiseReactionJobTask>::cast(task));
+ }
+ }
+
+ return isolate->factory()->undefined_value();
+}
+
+namespace {
+
+constexpr JSRegExp::Flag kCharFlagValues[] = {
+ JSRegExp::kGlobal, // g
+ JSRegExp::kInvalid, // h
+ JSRegExp::kIgnoreCase, // i
+ JSRegExp::kInvalid, // j
+ JSRegExp::kInvalid, // k
+ JSRegExp::kInvalid, // l
+ JSRegExp::kMultiline, // m
+ JSRegExp::kInvalid, // n
+ JSRegExp::kInvalid, // o
+ JSRegExp::kInvalid, // p
+ JSRegExp::kInvalid, // q
+ JSRegExp::kInvalid, // r
+ JSRegExp::kDotAll, // s
+ JSRegExp::kInvalid, // t
+ JSRegExp::kUnicode, // u
+ JSRegExp::kInvalid, // v
+ JSRegExp::kInvalid, // w
+ JSRegExp::kInvalid, // x
+ JSRegExp::kSticky, // y
+};
+
+constexpr JSRegExp::Flag CharToFlag(uc16 flag_char) {
+ return (flag_char < 'g' || flag_char > 'y')
+ ? JSRegExp::kInvalid
+ : kCharFlagValues[flag_char - 'g'];
+}
+
+JSRegExp::Flags RegExpFlagsFromString(Isolate* isolate, Handle<String> flags,
+ bool* success) {
+ STATIC_ASSERT(CharToFlag('g') == JSRegExp::kGlobal);
+ STATIC_ASSERT(CharToFlag('i') == JSRegExp::kIgnoreCase);
+ STATIC_ASSERT(CharToFlag('m') == JSRegExp::kMultiline);
+ STATIC_ASSERT(CharToFlag('s') == JSRegExp::kDotAll);
+ STATIC_ASSERT(CharToFlag('u') == JSRegExp::kUnicode);
+ STATIC_ASSERT(CharToFlag('y') == JSRegExp::kSticky);
+
+ int length = flags->length();
+ if (length == 0) {
+ *success = true;
+ return JSRegExp::kNone;
+ }
+ // A longer flags string cannot be valid.
+ if (length > JSRegExp::FlagCount()) return JSRegExp::Flags(0);
+ // Initialize {value} to {kInvalid} to allow 2-in-1 duplicate/invalid check.
+ JSRegExp::Flags value = JSRegExp::kInvalid;
+ if (flags->IsSeqOneByteString()) {
+ DisallowHeapAllocation no_gc;
+ SeqOneByteString seq_flags = SeqOneByteString::cast(*flags);
+ for (int i = 0; i < length; i++) {
+ JSRegExp::Flag flag = CharToFlag(seq_flags.Get(i));
+ // Duplicate or invalid flag.
+ if (value & flag) return JSRegExp::Flags(0);
+ value |= flag;
+ }
+ } else {
+ flags = String::Flatten(isolate, flags);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flags_content = flags->GetFlatContent(no_gc);
+ for (int i = 0; i < length; i++) {
+ JSRegExp::Flag flag = CharToFlag(flags_content.Get(i));
+ // Duplicate or invalid flag.
+ if (value & flag) return JSRegExp::Flags(0);
+ value |= flag;
+ }
+ }
+ *success = true;
+ // Drop the initially set {kInvalid} bit.
+ value ^= JSRegExp::kInvalid;
+ return value;
+}
+
+} // namespace
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::New(Isolate* isolate, Handle<String> pattern,
+ Flags flags) {
+ Handle<JSFunction> constructor = isolate->regexp_function();
+ Handle<JSRegExp> regexp =
+ Handle<JSRegExp>::cast(isolate->factory()->NewJSObject(constructor));
+
+ return JSRegExp::Initialize(regexp, pattern, flags);
+}
+
+// static
+Handle<JSRegExp> JSRegExp::Copy(Handle<JSRegExp> regexp) {
+ Isolate* const isolate = regexp->GetIsolate();
+ return Handle<JSRegExp>::cast(isolate->factory()->CopyJSObject(regexp));
+}
+
+namespace {
+
+template <typename Char>
+int CountRequiredEscapes(Handle<String> source) {
+ DisallowHeapAllocation no_gc;
+ int escapes = 0;
+ Vector<const Char> src = source->GetCharVector<Char>(no_gc);
+ for (int i = 0; i < src.length(); i++) {
+ const Char c = src[i];
+ if (c == '\\') {
+ // Escape. Skip next character;
+ i++;
+ } else if (c == '/') {
+ // Not escaped forward-slash needs escape.
+ escapes++;
+ } else if (c == '\n') {
+ escapes++;
+ } else if (c == '\r') {
+ escapes++;
+ } else if (static_cast<int>(c) == 0x2028) {
+ escapes += std::strlen("\\u2028") - 1;
+ } else if (static_cast<int>(c) == 0x2029) {
+ escapes += std::strlen("\\u2029") - 1;
+ } else {
+ DCHECK(!unibrow::IsLineTerminator(static_cast<unibrow::uchar>(c)));
+ }
+ }
+ return escapes;
+}
+
+template <typename Char>
+void WriteStringToCharVector(Vector<Char> v, int* d, const char* string) {
+ int s = 0;
+ while (string[s] != '\0') v[(*d)++] = string[s++];
+}
+
+template <typename Char, typename StringType>
+Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
+ Handle<StringType> result) {
+ DisallowHeapAllocation no_gc;
+ Vector<const Char> src = source->GetCharVector<Char>(no_gc);
+ Vector<Char> dst(result->GetChars(no_gc), result->length());
+ int s = 0;
+ int d = 0;
+ // TODO(v8:1982): Fully implement
+ // https://tc39.github.io/ecma262/#sec-escaperegexppattern
+ while (s < src.length()) {
+ if (src[s] == '\\') {
+ // Escape. Copy this and next character.
+ dst[d++] = src[s++];
+ if (s == src.length()) break;
+ } else if (src[s] == '/') {
+ // Not escaped forward-slash needs escape.
+ dst[d++] = '\\';
+ } else if (src[s] == '\n') {
+ WriteStringToCharVector(dst, &d, "\\n");
+ s++;
+ continue;
+ } else if (src[s] == '\r') {
+ WriteStringToCharVector(dst, &d, "\\r");
+ s++;
+ continue;
+ } else if (static_cast<int>(src[s]) == 0x2028) {
+ WriteStringToCharVector(dst, &d, "\\u2028");
+ s++;
+ continue;
+ } else if (static_cast<int>(src[s]) == 0x2029) {
+ WriteStringToCharVector(dst, &d, "\\u2029");
+ s++;
+ continue;
+ }
+ dst[d++] = src[s++];
+ }
+ DCHECK_EQ(result->length(), d);
+ return result;
+}
+
+MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
+ Handle<String> source) {
+ DCHECK(source->IsFlat());
+ if (source->length() == 0) return isolate->factory()->query_colon_string();
+ bool one_byte = String::IsOneByteRepresentationUnderneath(*source);
+ int escapes = one_byte ? CountRequiredEscapes<uint8_t>(source)
+ : CountRequiredEscapes<uc16>(source);
+ if (escapes == 0) return source;
+ int length = source->length() + escapes;
+ if (one_byte) {
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ isolate->factory()->NewRawOneByteString(length),
+ String);
+ return WriteEscapedRegExpSource<uint8_t>(source, result);
+ } else {
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ isolate->factory()->NewRawTwoByteString(length),
+ String);
+ return WriteEscapedRegExpSource<uc16>(source, result);
+ }
+}
+
+} // namespace
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source,
+ Handle<String> flags_string) {
+ Isolate* isolate = regexp->GetIsolate();
+ bool success = false;
+ Flags flags = RegExpFlagsFromString(isolate, flags_string, &success);
+ if (!success) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string),
+ JSRegExp);
+ }
+ return Initialize(regexp, source, flags);
+}
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source, Flags flags) {
+ Isolate* isolate = regexp->GetIsolate();
+ Factory* factory = isolate->factory();
+ // If source is the empty string we set it to "(?:)" instead as
+ // suggested by ECMA-262, 5th, section 15.10.4.1.
+ if (source->length() == 0) source = factory->query_colon_string();
+
+ source = String::Flatten(isolate, source);
+
+ Handle<String> escaped_source;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, escaped_source,
+ EscapeRegExpSource(isolate, source), JSRegExp);
+
+ RETURN_ON_EXCEPTION(
+ isolate, RegExpImpl::Compile(isolate, regexp, source, flags), JSRegExp);
+
+ regexp->set_source(*escaped_source);
+ regexp->set_flags(Smi::FromInt(flags));
+
+ Map map = regexp->map();
+ Object constructor = map.GetConstructor();
+ if (constructor.IsJSFunction() &&
+ JSFunction::cast(constructor).initial_map() == map) {
+ // If we still have the original map, set in-object properties directly.
+ regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, Smi::kZero,
+ SKIP_WRITE_BARRIER);
+ } else {
+ // Map has changed, so use generic, but slower, method.
+ RETURN_ON_EXCEPTION(
+ isolate,
+ Object::SetProperty(isolate, regexp, factory->lastIndex_string(),
+ Handle<Smi>(Smi::zero(), isolate)),
+ JSRegExp);
+ }
+
+ return regexp;
+}
+
+// RegExpKey carries the source and flags of a regular expression as key.
+class RegExpKey : public HashTableKey {
+ public:
+ RegExpKey(Handle<String> string, JSRegExp::Flags flags)
+ : HashTableKey(
+ CompilationCacheShape::RegExpHash(*string, Smi::FromInt(flags))),
+ string_(string),
+ flags_(Smi::FromInt(flags)) {}
+
+ // Rather than storing the key in the hash table, a pointer to the
+ // stored value is stored where the key should be. IsMatch then
+ // compares the search key to the found object, rather than comparing
+ // a key to a key.
+ bool IsMatch(Object obj) override {
+ FixedArray val = FixedArray::cast(obj);
+ return string_->Equals(String::cast(val.get(JSRegExp::kSourceIndex))) &&
+ (flags_ == val.get(JSRegExp::kFlagsIndex));
+ }
+
+ Handle<String> string_;
+ Smi flags_;
+};
+
+// InternalizedStringKey carries a string/internalized-string object as key.
+class InternalizedStringKey final : public StringTableKey {
+ public:
+ explicit InternalizedStringKey(Handle<String> string)
+ : StringTableKey(0, string->length()), string_(string) {
+ DCHECK(!string->IsInternalizedString());
+ DCHECK(string->IsFlat());
+ // Make sure hash_field is computed.
+ string->Hash();
+ set_hash_field(string->hash_field());
+ }
+
+ bool IsMatch(String string) override { return string_->SlowEquals(string); }
+
+ Handle<String> AsHandle(Isolate* isolate) override {
+ // Internalize the string if possible.
+ MaybeHandle<Map> maybe_map =
+ isolate->factory()->InternalizedStringMapForString(string_);
+ Handle<Map> map;
+ if (maybe_map.ToHandle(&map)) {
+ string_->set_map_no_write_barrier(*map);
+ DCHECK(string_->IsInternalizedString());
+ return string_;
+ }
+ if (FLAG_thin_strings) {
+ // External strings get special treatment, to avoid copying their
+ // contents.
+ if (string_->IsExternalOneByteString()) {
+ return isolate->factory()
+ ->InternalizeExternalString<ExternalOneByteString>(string_);
+ } else if (string_->IsExternalTwoByteString()) {
+ return isolate->factory()
+ ->InternalizeExternalString<ExternalTwoByteString>(string_);
+ }
+ }
+ // Otherwise allocate a new internalized string.
+ return isolate->factory()->NewInternalizedStringImpl(
+ string_, string_->length(), string_->hash_field());
+ }
+
+ private:
+ Handle<String> string_;
+};
+
+template <typename Derived, typename Shape>
+void HashTable<Derived, Shape>::IteratePrefix(ObjectVisitor* v) {
+ BodyDescriptorBase::IteratePointers(*this, 0, kElementsStartOffset, v);
+}
+
+template <typename Derived, typename Shape>
+void HashTable<Derived, Shape>::IterateElements(ObjectVisitor* v) {
+ BodyDescriptorBase::IteratePointers(*this, kElementsStartOffset,
+ SizeFor(length()), v);
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> HashTable<Derived, Shape>::New(
+ Isolate* isolate, int at_least_space_for, AllocationType allocation,
+ MinimumCapacity capacity_option) {
+ DCHECK_LE(0, at_least_space_for);
+ DCHECK_IMPLIES(capacity_option == USE_CUSTOM_MINIMUM_CAPACITY,
+ base::bits::IsPowerOfTwo(at_least_space_for));
+
+ int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
+ ? at_least_space_for
+ : ComputeCapacity(at_least_space_for);
+ if (capacity > HashTable::kMaxCapacity) {
+ isolate->heap()->FatalProcessOutOfMemory("invalid table size");
+ }
+ return NewInternal(isolate, capacity, allocation);
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> HashTable<Derived, Shape>::NewInternal(
+ Isolate* isolate, int capacity, AllocationType allocation) {
+ Factory* factory = isolate->factory();
+ int length = EntryToIndex(capacity);
+ RootIndex map_root_index = Shape::GetMapRootIndex();
+ Handle<FixedArray> array =
+ factory->NewFixedArrayWithMap(map_root_index, length, allocation);
+ Handle<Derived> table = Handle<Derived>::cast(array);
+
+ table->SetNumberOfElements(0);
+ table->SetNumberOfDeletedElements(0);
+ table->SetCapacity(capacity);
+ return table;
+}
+
+template <typename Derived, typename Shape>
+void HashTable<Derived, Shape>::Rehash(ReadOnlyRoots roots, Derived new_table) {
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = new_table.GetWriteBarrierMode(no_gc);
+
+ DCHECK_LT(NumberOfElements(), new_table.Capacity());
+
+ // Copy prefix to new array.
+ for (int i = kPrefixStartIndex; i < kElementsStartIndex; i++) {
+ new_table.set(i, get(i), mode);
+ }
+
+ // Rehash the elements.
+ int capacity = this->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ uint32_t from_index = EntryToIndex(i);
+ Object k = this->get(from_index);
+ if (!Shape::IsLive(roots, k)) continue;
+ uint32_t hash = Shape::HashForObject(roots, k);
+ uint32_t insertion_index = EntryToIndex(new_table.FindInsertionEntry(hash));
+ new_table.set_key(insertion_index, get(from_index), mode);
+ for (int j = 1; j < Shape::kEntrySize; j++) {
+ new_table.set(insertion_index + j, get(from_index + j), mode);
+ }
+ }
+ new_table.SetNumberOfElements(NumberOfElements());
+ new_table.SetNumberOfDeletedElements(0);
+}
+
+template <typename Derived, typename Shape>
+uint32_t HashTable<Derived, Shape>::EntryForProbe(ReadOnlyRoots roots, Object k,
+ int probe,
+ uint32_t expected) {
+ uint32_t hash = Shape::HashForObject(roots, k);
+ uint32_t capacity = this->Capacity();
+ uint32_t entry = FirstProbe(hash, capacity);
+ for (int i = 1; i < probe; i++) {
+ if (entry == expected) return expected;
+ entry = NextProbe(entry, i, capacity);
+ }
+ return entry;
+}
+
+template <typename Derived, typename Shape>
+void HashTable<Derived, Shape>::Swap(uint32_t entry1, uint32_t entry2,
+ WriteBarrierMode mode) {
+ int index1 = EntryToIndex(entry1);
+ int index2 = EntryToIndex(entry2);
+ Object temp[Shape::kEntrySize];
+ Derived* self = static_cast<Derived*>(this);
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ temp[j] = get(index1 + j);
+ }
+ self->set_key(index1, get(index2), mode);
+ for (int j = 1; j < Shape::kEntrySize; j++) {
+ set(index1 + j, get(index2 + j), mode);
+ }
+ self->set_key(index2, temp[0], mode);
+ for (int j = 1; j < Shape::kEntrySize; j++) {
+ set(index2 + j, temp[j], mode);
+ }
+}
+
+template <typename Derived, typename Shape>
+void HashTable<Derived, Shape>::Rehash(ReadOnlyRoots roots) {
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
+ uint32_t capacity = Capacity();
+ bool done = false;
+ for (int probe = 1; !done; probe++) {
+ // All elements at entries given by one of the first _probe_ probes
+ // are placed correctly. Other elements might need to be moved.
+ done = true;
+ for (uint32_t current = 0; current < capacity; current++) {
+ Object current_key = KeyAt(current);
+ if (!Shape::IsLive(roots, current_key)) continue;
+ uint32_t target = EntryForProbe(roots, current_key, probe, current);
+ if (current == target) continue;
+ Object target_key = KeyAt(target);
+ if (!Shape::IsLive(roots, target_key) ||
+ EntryForProbe(roots, target_key, probe, target) != target) {
+ // Put the current element into the correct position.
+ Swap(current, target, mode);
+ // The other element will be processed on the next iteration.
+ current--;
+ } else {
+ // The place for the current element is occupied. Leave the element
+ // for the next probe.
+ done = false;
+ }
+ }
+ }
+ // Wipe deleted entries.
+ Object the_hole = roots.the_hole_value();
+ HeapObject undefined = roots.undefined_value();
+ Derived* self = static_cast<Derived*>(this);
+ for (uint32_t current = 0; current < capacity; current++) {
+ if (KeyAt(current) == the_hole) {
+ self->set_key(EntryToIndex(current) + kEntryKeyIndex, undefined,
+ SKIP_WRITE_BARRIER);
+ }
+ }
+ SetNumberOfDeletedElements(0);
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> HashTable<Derived, Shape>::EnsureCapacity(
+ Isolate* isolate, Handle<Derived> table, int n, AllocationType allocation) {
+ if (table->HasSufficientCapacityToAdd(n)) return table;
+
+ int capacity = table->Capacity();
+ int new_nof = table->NumberOfElements() + n;
+
+ const int kMinCapacityForPretenure = 256;
+ bool should_pretenure = allocation == AllocationType::kOld ||
+ ((capacity > kMinCapacityForPretenure) &&
+ !Heap::InYoungGeneration(*table));
+ Handle<Derived> new_table = HashTable::New(
+ isolate, new_nof,
+ should_pretenure ? AllocationType::kOld : AllocationType::kYoung);
+
+ table->Rehash(ReadOnlyRoots(isolate), *new_table);
+ return new_table;
+}
+
+template bool
+HashTable<NameDictionary, NameDictionaryShape>::HasSufficientCapacityToAdd(int);
+
+template <typename Derived, typename Shape>
+bool HashTable<Derived, Shape>::HasSufficientCapacityToAdd(
+ int number_of_additional_elements) {
+ int capacity = Capacity();
+ int nof = NumberOfElements() + number_of_additional_elements;
+ int nod = NumberOfDeletedElements();
+ // Return true if:
+ // 50% is still free after adding number_of_additional_elements elements and
+ // at most 50% of the free elements are deleted elements.
+ if ((nof < capacity) && ((nod <= (capacity - nof) >> 1))) {
+ int needed_free = nof >> 1;
+ if (nof + needed_free <= capacity) return true;
+ }
+ return false;
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> HashTable<Derived, Shape>::Shrink(Isolate* isolate,
+ Handle<Derived> table,
+ int additionalCapacity) {
+ int capacity = table->Capacity();
+ int nof = table->NumberOfElements();
+
+ // Shrink to fit the number of elements if only a quarter of the
+ // capacity is filled with elements.
+ if (nof > (capacity >> 2)) return table;
+ // Allocate a new dictionary with room for at least the current number of
+ // elements + {additionalCapacity}. The allocation method will make sure that
+ // there is extra room in the dictionary for additions. Don't go lower than
+ // room for {kMinShrinkCapacity} elements.
+ int at_least_room_for = nof + additionalCapacity;
+ int new_capacity = ComputeCapacity(at_least_room_for);
+ if (new_capacity < Derived::kMinShrinkCapacity) return table;
+ if (new_capacity == capacity) return table;
+
+ const int kMinCapacityForPretenure = 256;
+ bool pretenure = (at_least_room_for > kMinCapacityForPretenure) &&
+ !Heap::InYoungGeneration(*table);
+ Handle<Derived> new_table =
+ HashTable::New(isolate, new_capacity,
+ pretenure ? AllocationType::kOld : AllocationType::kYoung,
+ USE_CUSTOM_MINIMUM_CAPACITY);
+
+ table->Rehash(ReadOnlyRoots(isolate), *new_table);
+ return new_table;
+}
+
+template <typename Derived, typename Shape>
+uint32_t HashTable<Derived, Shape>::FindInsertionEntry(uint32_t hash) {
+ uint32_t capacity = Capacity();
+ uint32_t entry = FirstProbe(hash, capacity);
+ uint32_t count = 1;
+ // EnsureCapacity will guarantee the hash table is never full.
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ while (true) {
+ if (!Shape::IsLive(roots, KeyAt(entry))) break;
+ entry = NextProbe(entry, count++, capacity);
+ }
+ return entry;
+}
+
+void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
+ int expected) {
+ Handle<StringTable> table = isolate->factory()->string_table();
+ // We need a key instance for the virtual hash function.
+ table = StringTable::EnsureCapacity(isolate, table, expected);
+ isolate->heap()->SetRootStringTable(*table);
+}
+
+// static
+Handle<String> StringTable::LookupString(Isolate* isolate,
+ Handle<String> string) {
+ string = String::Flatten(isolate, string);
+ if (string->IsInternalizedString()) return string;
+
+ InternalizedStringKey key(string);
+ Handle<String> result = LookupKey(isolate, &key);
+
+ if (FLAG_thin_strings) {
+ if (!string->IsInternalizedString()) {
+ string->MakeThin(isolate, *result);
+ }
+ } else { // !FLAG_thin_strings
+ if (string->IsConsString()) {
+ Handle<ConsString> cons = Handle<ConsString>::cast(string);
+ cons->set_first(isolate, *result);
+ cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
+ } else if (string->IsSlicedString()) {
+ STATIC_ASSERT(static_cast<int>(ConsString::kSize) ==
+ static_cast<int>(SlicedString::kSize));
+ DisallowHeapAllocation no_gc;
+ bool one_byte = result->IsOneByteRepresentation();
+ Handle<Map> map = one_byte
+ ? isolate->factory()->cons_one_byte_string_map()
+ : isolate->factory()->cons_string_map();
+ string->set_map(*map);
+ Handle<ConsString> cons = Handle<ConsString>::cast(string);
+ cons->set_first(isolate, *result);
+ cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
+ }
+ }
+ return result;
+}
+
+// static
+template <typename StringTableKey>
+Handle<String> StringTable::LookupKey(Isolate* isolate, StringTableKey* key) {
+ Handle<StringTable> table = isolate->factory()->string_table();
+ int entry = table->FindEntry(isolate, key);
+
+ // String already in table.
+ if (entry != kNotFound) {
+ return handle(String::cast(table->KeyAt(entry)), isolate);
+ }
+
+ table = StringTable::CautiousShrink(isolate, table);
+ // Adding new string. Grow table if needed.
+ table = StringTable::EnsureCapacity(isolate, table, 1);
+ isolate->heap()->SetRootStringTable(*table);
+
+ return AddKeyNoResize(isolate, key);
+}
+
+template Handle<String> StringTable::LookupKey(Isolate* isolate,
+ OneByteStringKey* key);
+template Handle<String> StringTable::LookupKey(Isolate* isolate,
+ TwoByteStringKey* key);
+template Handle<String> StringTable::LookupKey(Isolate* isolate,
+ SeqOneByteSubStringKey* key);
+template Handle<String> StringTable::LookupKey(Isolate* isolate,
+ SeqTwoByteSubStringKey* key);
+
+Handle<String> StringTable::AddKeyNoResize(Isolate* isolate,
+ StringTableKey* key) {
+ Handle<StringTable> table = isolate->factory()->string_table();
+ DCHECK(table->HasSufficientCapacityToAdd(1));
+ // Create string object.
+ Handle<String> string = key->AsHandle(isolate);
+ // There must be no attempts to internalize strings that could throw
+ // InvalidStringLength error.
+ CHECK(!string.is_null());
+ DCHECK(string->HasHashCode());
+ DCHECK_EQ(table->FindEntry(isolate, key), kNotFound);
+
+ // Add the new string and return it along with the string table.
+ int entry = table->FindInsertionEntry(key->hash());
+ table->set(EntryToIndex(entry), *string);
+ table->ElementAdded();
+
+ return Handle<String>::cast(string);
+}
+
+Handle<StringTable> StringTable::CautiousShrink(Isolate* isolate,
+ Handle<StringTable> table) {
+ // Only shrink if the table is very empty to avoid performance penalty.
+ int capacity = table->Capacity();
+ int nof = table->NumberOfElements();
+ if (capacity <= StringTable::kMinCapacity) return table;
+ if (nof > (capacity / kMaxEmptyFactor)) return table;
+ // Keep capacity for at least half of the current nof elements.
+ int slack_capacity = nof >> 2;
+ return Shrink(isolate, table, slack_capacity);
+}
+
+namespace {
+
+template <typename Char>
+Address LookupString(Isolate* isolate, String string, String source,
+ size_t start) {
+ DisallowHeapAllocation no_gc;
+ StringTable table = isolate->heap()->string_table();
+ uint64_t seed = HashSeed(isolate);
+
+ int length = string.length();
+
+ std::unique_ptr<Char[]> buffer;
+ const Char* chars;
+
+ if (source.IsConsString()) {
+ DCHECK(!source.IsFlat());
+ buffer.reset(new Char[length]);
+ String::WriteToFlat(source, buffer.get(), 0, length);
+ chars = buffer.get();
+ } else {
+ chars = source.GetChars<Char>(no_gc) + start;
+ }
+ // TODO(verwaest): Internalize to one-byte when possible.
+ SequentialStringKey<Char> key(Vector<const Char>(chars, length), seed);
+
+ // String could be an array index.
+ uint32_t hash_field = key.hash_field();
+
+ if (Name::ContainsCachedArrayIndex(hash_field)) {
+ return Smi::FromInt(String::ArrayIndexValueBits::decode(hash_field)).ptr();
+ }
+
+ if ((hash_field & Name::kIsNotArrayIndexMask) == 0) {
+ // It is an indexed, but it's not cached.
+ return Smi::FromInt(ResultSentinel::kUnsupported).ptr();
+ }
+
+ int entry = table.FindEntry(ReadOnlyRoots(isolate), &key, key.hash());
+ if (entry == kNotFound) {
+ // A string that's not an array index, and not in the string table,
+ // cannot have been used as a property name before.
+ return Smi::FromInt(ResultSentinel::kNotFound).ptr();
+ }
+
+ String internalized = String::cast(table.KeyAt(entry));
+ if (FLAG_thin_strings) {
+ string.MakeThin(isolate, internalized);
+ }
+ return internalized.ptr();
+}
+
+} // namespace
+
+// static
+Address StringTable::LookupStringIfExists_NoAllocate(Isolate* isolate,
+ Address raw_string) {
+ String string = String::cast(Object(raw_string));
+ DCHECK(!string.IsInternalizedString());
+
+ // Valid array indices are >= 0, so they cannot be mixed up with any of
+ // the result sentinels, which are negative.
+ STATIC_ASSERT(
+ !String::ArrayIndexValueBits::is_valid(ResultSentinel::kUnsupported));
+ STATIC_ASSERT(
+ !String::ArrayIndexValueBits::is_valid(ResultSentinel::kNotFound));
+
+ size_t start = 0;
+ String source = string;
+ if (source.IsSlicedString()) {
+ SlicedString sliced = SlicedString::cast(source);
+ start = sliced.offset();
+ source = sliced.parent();
+ } else if (source.IsConsString() && source.IsFlat()) {
+ source = ConsString::cast(source).first();
+ }
+ if (source.IsThinString()) {
+ source = ThinString::cast(source).actual();
+ if (string.length() == source.length()) {
+ return source.ptr();
+ }
+ }
+ if (source.IsOneByteRepresentation()) {
+ return i::LookupString<uint8_t>(isolate, string, source, start);
+ }
+ return i::LookupString<uint16_t>(isolate, string, source, start);
+}
+
+Handle<StringSet> StringSet::New(Isolate* isolate) {
+ return HashTable::New(isolate, 0);
+}
+
+Handle<StringSet> StringSet::Add(Isolate* isolate, Handle<StringSet> stringset,
+ Handle<String> name) {
+ if (!stringset->Has(isolate, name)) {
+ stringset = EnsureCapacity(isolate, stringset, 1);
+ uint32_t hash = ShapeT::Hash(isolate, *name);
+ int entry = stringset->FindInsertionEntry(hash);
+ stringset->set(EntryToIndex(entry), *name);
+ stringset->ElementAdded();
+ }
+ return stringset;
+}
+
+bool StringSet::Has(Isolate* isolate, Handle<String> name) {
+ return FindEntry(isolate, *name) != kNotFound;
+}
+
+Handle<ObjectHashSet> ObjectHashSet::Add(Isolate* isolate,
+ Handle<ObjectHashSet> set,
+ Handle<Object> key) {
+ int32_t hash = key->GetOrCreateHash(isolate).value();
+ if (!set->Has(isolate, key, hash)) {
+ set = EnsureCapacity(isolate, set, 1);
+ int entry = set->FindInsertionEntry(hash);
+ set->set(EntryToIndex(entry), *key);
+ set->ElementAdded();
+ }
+ return set;
+}
+
+namespace {
+
+const int kLiteralEntryLength = 2;
+const int kLiteralInitialLength = 2;
+const int kLiteralContextOffset = 0;
+const int kLiteralLiteralsOffset = 1;
+
+int SearchLiteralsMapEntry(CompilationCacheTable cache, int cache_entry,
+ Context native_context) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(native_context.IsNativeContext());
+ Object obj = cache.get(cache_entry);
+
+ // Check that there's no confusion between FixedArray and WeakFixedArray (the
+ // object used to be a FixedArray here).
+ DCHECK(!obj.IsFixedArray());
+ if (obj.IsWeakFixedArray()) {
+ WeakFixedArray literals_map = WeakFixedArray::cast(obj);
+ int length = literals_map.length();
+ for (int i = 0; i < length; i += kLiteralEntryLength) {
+ DCHECK(literals_map.Get(i + kLiteralContextOffset)->IsWeakOrCleared());
+ if (literals_map.Get(i + kLiteralContextOffset) ==
+ HeapObjectReference::Weak(native_context)) {
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
+ Handle<Context> native_context,
+ Handle<FeedbackCell> feedback_cell) {
+ Isolate* isolate = native_context->GetIsolate();
+ DCHECK(native_context->IsNativeContext());
+ STATIC_ASSERT(kLiteralEntryLength == 2);
+ Handle<WeakFixedArray> new_literals_map;
+ int entry;
+
+ Object obj = cache->get(cache_entry);
+
+ // Check that there's no confusion between FixedArray and WeakFixedArray (the
+ // object used to be a FixedArray here).
+ DCHECK(!obj.IsFixedArray());
+ if (!obj.IsWeakFixedArray() || WeakFixedArray::cast(obj).length() == 0) {
+ new_literals_map = isolate->factory()->NewWeakFixedArray(
+ kLiteralInitialLength, AllocationType::kOld);
+ entry = 0;
+ } else {
+ Handle<WeakFixedArray> old_literals_map(WeakFixedArray::cast(obj), isolate);
+ entry = SearchLiteralsMapEntry(*cache, cache_entry, *native_context);
+ if (entry >= 0) {
+ // Just set the code of the entry.
+ old_literals_map->Set(entry + kLiteralLiteralsOffset,
+ HeapObjectReference::Weak(*feedback_cell));
+ return;
+ }
+
+ // Can we reuse an entry?
+ DCHECK_LT(entry, 0);
+ int length = old_literals_map->length();
+ for (int i = 0; i < length; i += kLiteralEntryLength) {
+ if (old_literals_map->Get(i + kLiteralContextOffset)->IsCleared()) {
+ new_literals_map = old_literals_map;
+ entry = i;
+ break;
+ }
+ }
+
+ if (entry < 0) {
+ // Copy old optimized code map and append one new entry.
+ new_literals_map = isolate->factory()->CopyWeakFixedArrayAndGrow(
+ old_literals_map, kLiteralEntryLength, AllocationType::kOld);
+ entry = old_literals_map->length();
+ }
+ }
+
+ new_literals_map->Set(entry + kLiteralContextOffset,
+ HeapObjectReference::Weak(*native_context));
+ new_literals_map->Set(entry + kLiteralLiteralsOffset,
+ HeapObjectReference::Weak(*feedback_cell));
+
+#ifdef DEBUG
+ for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
+ MaybeObject object = new_literals_map->Get(i + kLiteralContextOffset);
+ DCHECK(object->IsCleared() ||
+ object->GetHeapObjectAssumeWeak().IsNativeContext());
+ object = new_literals_map->Get(i + kLiteralLiteralsOffset);
+ DCHECK(object->IsCleared() ||
+ object->GetHeapObjectAssumeWeak().IsFeedbackCell());
+ }
+#endif
+
+ Object old_literals_map = cache->get(cache_entry);
+ if (old_literals_map != *new_literals_map) {
+ cache->set(cache_entry, *new_literals_map);
+ }
+}
+
+FeedbackCell SearchLiteralsMap(CompilationCacheTable cache, int cache_entry,
+ Context native_context) {
+ FeedbackCell result;
+ int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
+ if (entry >= 0) {
+ WeakFixedArray literals_map = WeakFixedArray::cast(cache.get(cache_entry));
+ DCHECK_LE(entry + kLiteralEntryLength, literals_map.length());
+ MaybeObject object = literals_map.Get(entry + kLiteralLiteralsOffset);
+
+ if (!object->IsCleared()) {
+ result = FeedbackCell::cast(object->GetHeapObjectAssumeWeak());
+ }
+ }
+ DCHECK(result.is_null() || result.IsFeedbackCell());
+ return result;
+}
+
+} // namespace
+
+MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
+ Handle<CompilationCacheTable> table, Handle<String> src,
+ Handle<Context> native_context, LanguageMode language_mode) {
+ // We use the empty function SFI as part of the key. Although the
+ // empty_function is native context dependent, the SFI is de-duped on
+ // snapshot builds by the PartialSnapshotCache, and so this does not prevent
+ // reuse of scripts in the compilation cache across native contexts.
+ Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
+ native_context->GetIsolate());
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
+ StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+ int entry = table->FindEntry(isolate, &key);
+ if (entry == kNotFound) return MaybeHandle<SharedFunctionInfo>();
+ int index = EntryToIndex(entry);
+ if (!table->get(index).IsFixedArray()) {
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+ Object obj = table->get(index + 1);
+ if (obj.IsSharedFunctionInfo()) {
+ return handle(SharedFunctionInfo::cast(obj), native_context->GetIsolate());
+ }
+ return MaybeHandle<SharedFunctionInfo>();
+}
+
+InfoCellPair CompilationCacheTable::LookupEval(
+ Handle<CompilationCacheTable> table, Handle<String> src,
+ Handle<SharedFunctionInfo> outer_info, Handle<Context> native_context,
+ LanguageMode language_mode, int position) {
+ InfoCellPair empty_result;
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
+ StringSharedKey key(src, outer_info, language_mode, position);
+ int entry = table->FindEntry(isolate, &key);
+ if (entry == kNotFound) return empty_result;
+ int index = EntryToIndex(entry);
+ if (!table->get(index).IsFixedArray()) return empty_result;
+ Object obj = table->get(EntryToIndex(entry) + 1);
+ if (obj.IsSharedFunctionInfo()) {
+ FeedbackCell feedback_cell =
+ SearchLiteralsMap(*table, EntryToIndex(entry) + 2, *native_context);
+ return InfoCellPair(SharedFunctionInfo::cast(obj), feedback_cell);
+ }
+ return empty_result;
+}
+
+Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
+ JSRegExp::Flags flags) {
+ Isolate* isolate = GetIsolate();
+ DisallowHeapAllocation no_allocation;
+ RegExpKey key(src, flags);
+ int entry = FindEntry(isolate, &key);
+ if (entry == kNotFound) return isolate->factory()->undefined_value();
+ return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> native_context, LanguageMode language_mode,
+ Handle<SharedFunctionInfo> value) {
+ Isolate* isolate = native_context->GetIsolate();
+ // We use the empty function SFI as part of the key. Although the
+ // empty_function is native context dependent, the SFI is de-duped on
+ // snapshot builds by the PartialSnapshotCache, and so this does not prevent
+ // reuse of scripts in the compilation cache across native contexts.
+ Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
+ isolate);
+ src = String::Flatten(isolate, src);
+ StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+ Handle<Object> k = key.AsHandle(isolate);
+ cache = EnsureCapacity(isolate, cache, 1);
+ int entry = cache->FindInsertionEntry(key.Hash());
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ cache->ElementAdded();
+ return cache;
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
+ Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
+ int position) {
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
+ StringSharedKey key(src, outer_info, value->language_mode(), position);
+ {
+ Handle<Object> k = key.AsHandle(isolate);
+ int entry = cache->FindEntry(isolate, &key);
+ if (entry != kNotFound) {
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ // AddToFeedbackCellsMap may allocate a new sub-array to live in the
+ // entry, but it won't change the cache array. Therefore EntryToIndex
+ // and entry remains correct.
+ AddToFeedbackCellsMap(cache, EntryToIndex(entry) + 2, native_context,
+ feedback_cell);
+ // Add hash again even on cache hit to avoid unnecessary cache delay in
+ // case of hash collisions.
+ }
+ }
+
+ cache = EnsureCapacity(isolate, cache, 1);
+ int entry = cache->FindInsertionEntry(key.Hash());
+ Handle<Object> k =
+ isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, Smi::FromInt(kHashGenerations));
+ cache->ElementAdded();
+ return cache;
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
+ Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
+ JSRegExp::Flags flags, Handle<FixedArray> value) {
+ RegExpKey key(src, flags);
+ cache = EnsureCapacity(isolate, cache, 1);
+ int entry = cache->FindInsertionEntry(key.Hash());
+ // We store the value in the key slot, and compare the search key
+ // to the stored value with a custon IsMatch function during lookups.
+ cache->set(EntryToIndex(entry), *value);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ cache->ElementAdded();
+ return cache;
+}
+
+void CompilationCacheTable::Age() {
+ DisallowHeapAllocation no_allocation;
+ Object the_hole_value = GetReadOnlyRoots().the_hole_value();
+ for (int entry = 0, size = Capacity(); entry < size; entry++) {
+ int entry_index = EntryToIndex(entry);
+ int value_index = entry_index + 1;
+
+ if (get(entry_index).IsNumber()) {
+ Smi count = Smi::cast(get(value_index));
+ count = Smi::FromInt(count.value() - 1);
+ if (count.value() == 0) {
+ NoWriteBarrierSet(*this, entry_index, the_hole_value);
+ NoWriteBarrierSet(*this, value_index, the_hole_value);
+ ElementRemoved();
+ } else {
+ NoWriteBarrierSet(*this, value_index, count);
+ }
+ } else if (get(entry_index).IsFixedArray()) {
+ SharedFunctionInfo info = SharedFunctionInfo::cast(get(value_index));
+ if (info.IsInterpreted() && info.GetBytecodeArray().IsOld()) {
+ for (int i = 0; i < kEntrySize; i++) {
+ NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
+ }
+ ElementRemoved();
+ }
+ }
+ }
+}
+
+void CompilationCacheTable::Remove(Object value) {
+ DisallowHeapAllocation no_allocation;
+ Object the_hole_value = GetReadOnlyRoots().the_hole_value();
+ for (int entry = 0, size = Capacity(); entry < size; entry++) {
+ int entry_index = EntryToIndex(entry);
+ int value_index = entry_index + 1;
+ if (get(value_index) == value) {
+ for (int i = 0; i < kEntrySize; i++) {
+ NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
+ }
+ ElementRemoved();
+ }
+ }
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> BaseNameDictionary<Derived, Shape>::New(
+ Isolate* isolate, int at_least_space_for, AllocationType allocation,
+ MinimumCapacity capacity_option) {
+ DCHECK_LE(0, at_least_space_for);
+ Handle<Derived> dict = Dictionary<Derived, Shape>::New(
+ isolate, at_least_space_for, allocation, capacity_option);
+ dict->SetHash(PropertyArray::kNoHashSentinel);
+ dict->SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
+ return dict;
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> BaseNameDictionary<Derived, Shape>::EnsureCapacity(
+ Isolate* isolate, Handle<Derived> dictionary, int n) {
+ // Check whether there are enough enumeration indices to add n elements.
+ if (!PropertyDetails::IsValidIndex(dictionary->NextEnumerationIndex() + n)) {
+ // If not, we generate new indices for the properties.
+ int length = dictionary->NumberOfElements();
+
+ Handle<FixedArray> iteration_order = IterationIndices(isolate, dictionary);
+ DCHECK_EQ(length, iteration_order->length());
+
+ // Iterate over the dictionary using the enumeration order and update
+ // the dictionary with new enumeration indices.
+ for (int i = 0; i < length; i++) {
+ int index = Smi::ToInt(iteration_order->get(i));
+ DCHECK(dictionary->IsKey(dictionary->GetReadOnlyRoots(),
+ dictionary->KeyAt(index)));
+
+ int enum_index = PropertyDetails::kInitialIndex + i;
+
+ PropertyDetails details = dictionary->DetailsAt(index);
+ PropertyDetails new_details = details.set_index(enum_index);
+ dictionary->DetailsAtPut(isolate, index, new_details);
+ }
+
+ // Set the next enumeration index.
+ dictionary->SetNextEnumerationIndex(PropertyDetails::kInitialIndex +
+ length);
+ }
+ return HashTable<Derived, Shape>::EnsureCapacity(isolate, dictionary, n);
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> Dictionary<Derived, Shape>::DeleteEntry(
+ Isolate* isolate, Handle<Derived> dictionary, int entry) {
+ DCHECK(Shape::kEntrySize != 3 ||
+ dictionary->DetailsAt(entry).IsConfigurable());
+ dictionary->ClearEntry(isolate, entry);
+ dictionary->ElementRemoved();
+ return Shrink(isolate, dictionary);
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> Dictionary<Derived, Shape>::AtPut(Isolate* isolate,
+ Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details) {
+ int entry = dictionary->FindEntry(isolate, key);
+
+ // If the entry is present set the value;
+ if (entry == Dictionary::kNotFound) {
+ return Derived::Add(isolate, dictionary, key, value, details);
+ }
+
+ // We don't need to copy over the enumeration index.
+ dictionary->ValueAtPut(entry, *value);
+ if (Shape::kEntrySize == 3) dictionary->DetailsAtPut(isolate, entry, details);
+ return dictionary;
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived>
+BaseNameDictionary<Derived, Shape>::AddNoUpdateNextEnumerationIndex(
+ Isolate* isolate, Handle<Derived> dictionary, Key key, Handle<Object> value,
+ PropertyDetails details, int* entry_out) {
+ // Insert element at empty or deleted entry
+ return Dictionary<Derived, Shape>::Add(isolate, dictionary, key, value,
+ details, entry_out);
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> BaseNameDictionary<Derived, Shape>::Add(
+ Isolate* isolate, Handle<Derived> dictionary, Key key, Handle<Object> value,
+ PropertyDetails details, int* entry_out) {
+ // Insert element at empty or deleted entry
+ DCHECK_EQ(0, details.dictionary_index());
+ // Assign an enumeration index to the property and update
+ // SetNextEnumerationIndex.
+ int index = dictionary->NextEnumerationIndex();
+ details = details.set_index(index);
+ dictionary = AddNoUpdateNextEnumerationIndex(isolate, dictionary, key, value,
+ details, entry_out);
+ // Update enumeration index here in order to avoid potential modification of
+ // the canonical empty dictionary which lives in read only space.
+ dictionary->SetNextEnumerationIndex(index + 1);
+ return dictionary;
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> Dictionary<Derived, Shape>::Add(Isolate* isolate,
+ Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details,
+ int* entry_out) {
+ uint32_t hash = Shape::Hash(isolate, key);
+ // Valdate key is absent.
+ SLOW_DCHECK((dictionary->FindEntry(isolate, key) == Dictionary::kNotFound));
+ // Check whether the dictionary should be extended.
+ dictionary = Derived::EnsureCapacity(isolate, dictionary, 1);
+
+ // Compute the key object.
+ Handle<Object> k = Shape::AsHandle(isolate, key);
+
+ uint32_t entry = dictionary->FindInsertionEntry(hash);
+ dictionary->SetEntry(isolate, entry, *k, *value, details);
+ DCHECK(dictionary->KeyAt(entry).IsNumber() ||
+ Shape::Unwrap(dictionary->KeyAt(entry)).IsUniqueName());
+ dictionary->ElementAdded();
+ if (entry_out) *entry_out = entry;
+ return dictionary;
+}
+
+// static
+Handle<SimpleNumberDictionary> SimpleNumberDictionary::Set(
+ Isolate* isolate, Handle<SimpleNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value) {
+ return AtPut(isolate, dictionary, key, value, PropertyDetails::Empty());
+}
+
+void NumberDictionary::UpdateMaxNumberKey(uint32_t key,
+ Handle<JSObject> dictionary_holder) {
+ DisallowHeapAllocation no_allocation;
+ // If the dictionary requires slow elements an element has already
+ // been added at a high index.
+ if (requires_slow_elements()) return;
+ // Check if this index is high enough that we should require slow
+ // elements.
+ if (key > kRequiresSlowElementsLimit) {
+ if (!dictionary_holder.is_null()) {
+ dictionary_holder->RequireSlowElements(*this);
+ }
+ set_requires_slow_elements();
+ return;
+ }
+ // Update max key value.
+ Object max_index_object = get(kMaxNumberKeyIndex);
+ if (!max_index_object.IsSmi() || max_number_key() < key) {
+ FixedArray::set(kMaxNumberKeyIndex,
+ Smi::FromInt(key << kRequiresSlowElementsTagSize));
+ }
+}
+
+Handle<NumberDictionary> NumberDictionary::Set(
+ Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value, Handle<JSObject> dictionary_holder,
+ PropertyDetails details) {
+ dictionary->UpdateMaxNumberKey(key, dictionary_holder);
+ return AtPut(isolate, dictionary, key, value, details);
+}
+
+void NumberDictionary::CopyValuesTo(FixedArray elements) {
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ int pos = 0;
+ int capacity = this->Capacity();
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = elements.GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < capacity; i++) {
+ Object k;
+ if (this->ToKey(roots, i, &k)) {
+ elements.set(pos++, this->ValueAt(i), mode);
+ }
+ }
+ DCHECK_EQ(pos, elements.length());
+}
+
+template <typename Derived, typename Shape>
+int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
+ ReadOnlyRoots roots = this->GetReadOnlyRoots();
+ int capacity = this->Capacity();
+ int result = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object k;
+ if (!this->ToKey(roots, i, &k)) continue;
+ if (k.FilterKey(ENUMERABLE_STRINGS)) continue;
+ PropertyDetails details = this->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & ONLY_ENUMERABLE) == 0) result++;
+ }
+ return result;
+}
+
+template <typename Dictionary>
+struct EnumIndexComparator {
+ explicit EnumIndexComparator(Dictionary dict) : dict(dict) {}
+ bool operator()(Tagged_t a, Tagged_t b) {
+ PropertyDetails da(dict.DetailsAt(Smi(static_cast<Address>(a)).value()));
+ PropertyDetails db(dict.DetailsAt(Smi(static_cast<Address>(b)).value()));
+ return da.dictionary_index() < db.dictionary_index();
+ }
+ Dictionary dict;
+};
+
+template <typename Derived, typename Shape>
+void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
+ Isolate* isolate, Handle<Derived> dictionary, Handle<FixedArray> storage,
+ KeyCollectionMode mode, KeyAccumulator* accumulator) {
+ DCHECK_IMPLIES(mode != KeyCollectionMode::kOwnOnly, accumulator != nullptr);
+ int length = storage->length();
+ int capacity = dictionary->Capacity();
+ int properties = 0;
+ ReadOnlyRoots roots(isolate);
+ for (int i = 0; i < capacity; i++) {
+ Object key;
+ if (!dictionary->ToKey(roots, i, &key)) continue;
+ bool is_shadowing_key = false;
+ if (key.IsSymbol()) continue;
+ PropertyDetails details = dictionary->DetailsAt(i);
+ if (details.IsDontEnum()) {
+ if (mode == KeyCollectionMode::kIncludePrototypes) {
+ is_shadowing_key = true;
+ } else {
+ continue;
+ }
+ }
+ if (is_shadowing_key) {
+ accumulator->AddShadowingKey(key);
+ continue;
+ } else {
+ storage->set(properties, Smi::FromInt(i));
+ }
+ properties++;
+ if (mode == KeyCollectionMode::kOwnOnly && properties == length) break;
+ }
+
+ CHECK_EQ(length, properties);
+ DisallowHeapAllocation no_gc;
+ Derived raw_dictionary = *dictionary;
+ FixedArray raw_storage = *storage;
+ EnumIndexComparator<Derived> cmp(raw_dictionary);
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ AtomicSlot start(storage->GetFirstElementAddress());
+ std::sort(start, start + length, cmp);
+ for (int i = 0; i < length; i++) {
+ int index = Smi::ToInt(raw_storage.get(i));
+ raw_storage.set(i, raw_dictionary.NameAt(index));
+ }
+}
+
+template <typename Derived, typename Shape>
+Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
+ Isolate* isolate, Handle<Derived> dictionary) {
+ int capacity = dictionary->Capacity();
+ int length = dictionary->NumberOfElements();
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
+ ReadOnlyRoots roots(isolate);
+ int array_size = 0;
+ {
+ DisallowHeapAllocation no_gc;
+ Derived raw_dictionary = *dictionary;
+ for (int i = 0; i < capacity; i++) {
+ Object k;
+ if (!raw_dictionary.ToKey(roots, i, &k)) continue;
+ array->set(array_size++, Smi::FromInt(i));
+ }
+
+ DCHECK_EQ(array_size, length);
+
+ EnumIndexComparator<Derived> cmp(raw_dictionary);
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ AtomicSlot start(array->GetFirstElementAddress());
+ std::sort(start, start + array_size, cmp);
+ }
+ return FixedArray::ShrinkOrEmpty(isolate, array, array_size);
+}
+
+template <typename Derived, typename Shape>
+void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
+ Handle<Derived> dictionary, KeyAccumulator* keys) {
+ Isolate* isolate = keys->isolate();
+ ReadOnlyRoots roots(isolate);
+ int capacity = dictionary->Capacity();
+ Handle<FixedArray> array =
+ isolate->factory()->NewFixedArray(dictionary->NumberOfElements());
+ int array_size = 0;
+ PropertyFilter filter = keys->filter();
+ {
+ DisallowHeapAllocation no_gc;
+ Derived raw_dictionary = *dictionary;
+ for (int i = 0; i < capacity; i++) {
+ Object k;
+ if (!raw_dictionary.ToKey(roots, i, &k)) continue;
+ if (k.FilterKey(filter)) continue;
+ PropertyDetails details = raw_dictionary.DetailsAt(i);
+ if ((details.attributes() & filter) != 0) {
+ keys->AddShadowingKey(k);
+ continue;
+ }
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object accessors = raw_dictionary.ValueAt(i);
+ if (!accessors.IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors).all_can_read()) continue;
+ }
+ array->set(array_size++, Smi::FromInt(i));
+ }
+
+ EnumIndexComparator<Derived> cmp(raw_dictionary);
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ AtomicSlot start(array->GetFirstElementAddress());
+ std::sort(start, start + array_size, cmp);
+ }
+
+ bool has_seen_symbol = false;
+ for (int i = 0; i < array_size; i++) {
+ int index = Smi::ToInt(array->get(i));
+ Object key = dictionary->NameAt(index);
+ if (key.IsSymbol()) {
+ has_seen_symbol = true;
+ continue;
+ }
+ keys->AddKey(key, DO_NOT_CONVERT);
+ }
+ if (has_seen_symbol) {
+ for (int i = 0; i < array_size; i++) {
+ int index = Smi::ToInt(array->get(i));
+ Object key = dictionary->NameAt(index);
+ if (!key.IsSymbol()) continue;
+ keys->AddKey(key, DO_NOT_CONVERT);
+ }
+ }
+}
+
+// Backwards lookup (slow).
+template <typename Derived, typename Shape>
+Object Dictionary<Derived, Shape>::SlowReverseLookup(Object value) {
+ Derived dictionary = Derived::cast(*this);
+ ReadOnlyRoots roots = dictionary.GetReadOnlyRoots();
+ int capacity = dictionary.Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object k;
+ if (!dictionary.ToKey(roots, i, &k)) continue;
+ Object e = dictionary.ValueAt(i);
+ if (e == value) return k;
+ }
+ return roots.undefined_value();
+}
+
+template <typename Derived, typename Shape>
+void ObjectHashTableBase<Derived, Shape>::FillEntriesWithHoles(
+ Handle<Derived> table) {
+ int length = table->length();
+ for (int i = Derived::EntryToIndex(0); i < length; i++) {
+ table->set_the_hole(i);
+ }
+}
+
+template <typename Derived, typename Shape>
+Object ObjectHashTableBase<Derived, Shape>::Lookup(ReadOnlyRoots roots,
+ Handle<Object> key,
+ int32_t hash) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(this->IsKey(roots, *key));
+
+ int entry = this->FindEntry(roots, key, hash);
+ if (entry == kNotFound) return roots.the_hole_value();
+ return this->get(Derived::EntryToIndex(entry) + 1);
+}
+
+template <typename Derived, typename Shape>
+Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
+ DisallowHeapAllocation no_gc;
+
+ ReadOnlyRoots roots = this->GetReadOnlyRoots();
+ DCHECK(this->IsKey(roots, *key));
+
+ // If the object does not have an identity hash, it was never used as a key.
+ Object hash = key->GetHash();
+ if (hash.IsUndefined(roots)) {
+ return roots.the_hole_value();
+ }
+ return Lookup(roots, key, Smi::ToInt(hash));
+}
+
+template <typename Derived, typename Shape>
+Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key,
+ int32_t hash) {
+ return Lookup(this->GetReadOnlyRoots(), key, hash);
+}
+
+template <typename Derived, typename Shape>
+Object ObjectHashTableBase<Derived, Shape>::ValueAt(int entry) {
+ return this->get(EntryToValueIndex(entry));
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Handle<Derived> table,
+ Handle<Object> key,
+ Handle<Object> value) {
+ Isolate* isolate = Heap::FromWritableHeapObject(*table)->isolate();
+ DCHECK(table->IsKey(ReadOnlyRoots(isolate), *key));
+ DCHECK(!value->IsTheHole(ReadOnlyRoots(isolate)));
+
+ // Make sure the key object has an identity hash code.
+ int32_t hash = key->GetOrCreateHash(isolate).value();
+
+ return ObjectHashTableBase<Derived, Shape>::Put(isolate, table, key, value,
+ hash);
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Isolate* isolate,
+ Handle<Derived> table,
+ Handle<Object> key,
+ Handle<Object> value,
+ int32_t hash) {
+ ReadOnlyRoots roots(isolate);
+ DCHECK(table->IsKey(roots, *key));
+ DCHECK(!value->IsTheHole(roots));
+
+ int entry = table->FindEntry(roots, key, hash);
+
+ // Key is already in table, just overwrite value.
+ if (entry != kNotFound) {
+ table->set(Derived::EntryToValueIndex(entry), *value);
+ return table;
+ }
+
+ // Rehash if more than 33% of the entries are deleted entries.
+ // TODO(jochen): Consider to shrink the fixed array in place.
+ if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
+ table->Rehash(roots);
+ }
+ // If we're out of luck, we didn't get a GC recently, and so rehashing
+ // isn't enough to avoid a crash.
+ if (!table->HasSufficientCapacityToAdd(1)) {
+ int nof = table->NumberOfElements() + 1;
+ int capacity = ObjectHashTable::ComputeCapacity(nof * 2);
+ if (capacity > ObjectHashTable::kMaxCapacity) {
+ for (size_t i = 0; i < 2; ++i) {
+ isolate->heap()->CollectAllGarbage(
+ Heap::kNoGCFlags, GarbageCollectionReason::kFullHashtable);
+ }
+ table->Rehash(roots);
+ }
+ }
+
+ // Check whether the hash table should be extended.
+ table = Derived::EnsureCapacity(isolate, table, 1);
+ table->AddEntry(table->FindInsertionEntry(hash), *key, *value);
+ return table;
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> ObjectHashTableBase<Derived, Shape>::Remove(
+ Isolate* isolate, Handle<Derived> table, Handle<Object> key,
+ bool* was_present) {
+ DCHECK(table->IsKey(table->GetReadOnlyRoots(), *key));
+
+ Object hash = key->GetHash();
+ if (hash.IsUndefined()) {
+ *was_present = false;
+ return table;
+ }
+
+ return Remove(isolate, table, key, was_present, Smi::ToInt(hash));
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> ObjectHashTableBase<Derived, Shape>::Remove(
+ Isolate* isolate, Handle<Derived> table, Handle<Object> key,
+ bool* was_present, int32_t hash) {
+ ReadOnlyRoots roots = table->GetReadOnlyRoots();
+ DCHECK(table->IsKey(roots, *key));
+
+ int entry = table->FindEntry(roots, key, hash);
+ if (entry == kNotFound) {
+ *was_present = false;
+ return table;
+ }
+
+ *was_present = true;
+ table->RemoveEntry(entry);
+ return Derived::Shrink(isolate, table);
+}
+
+template <typename Derived, typename Shape>
+void ObjectHashTableBase<Derived, Shape>::AddEntry(int entry, Object key,
+ Object value) {
+ Derived* self = static_cast<Derived*>(this);
+ self->set_key(Derived::EntryToIndex(entry), key);
+ self->set(Derived::EntryToValueIndex(entry), value);
+ self->ElementAdded();
+}
+
+template <typename Derived, typename Shape>
+void ObjectHashTableBase<Derived, Shape>::RemoveEntry(int entry) {
+ this->set_the_hole(Derived::EntryToIndex(entry));
+ this->set_the_hole(Derived::EntryToValueIndex(entry));
+ this->ElementRemoved();
+}
+
+void JSSet::Initialize(Handle<JSSet> set, Isolate* isolate) {
+ Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
+ set->set_table(*table);
+}
+
+void JSSet::Clear(Isolate* isolate, Handle<JSSet> set) {
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()), isolate);
+ table = OrderedHashSet::Clear(isolate, table);
+ set->set_table(*table);
+}
+
+void JSMap::Initialize(Handle<JSMap> map, Isolate* isolate) {
+ Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
+ map->set_table(*table);
+}
+
+void JSMap::Clear(Isolate* isolate, Handle<JSMap> map) {
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()), isolate);
+ table = OrderedHashMap::Clear(isolate, table);
+ map->set_table(*table);
+}
+
+void JSWeakCollection::Initialize(Handle<JSWeakCollection> weak_collection,
+ Isolate* isolate) {
+ Handle<EphemeronHashTable> table = EphemeronHashTable::New(isolate, 0);
+ weak_collection->set_table(*table);
+}
+
+void JSWeakCollection::Set(Handle<JSWeakCollection> weak_collection,
+ Handle<Object> key, Handle<Object> value,
+ int32_t hash) {
+ DCHECK(key->IsJSReceiver() || key->IsSymbol());
+ Handle<EphemeronHashTable> table(
+ EphemeronHashTable::cast(weak_collection->table()),
+ weak_collection->GetIsolate());
+ DCHECK(table->IsKey(weak_collection->GetReadOnlyRoots(), *key));
+ Handle<EphemeronHashTable> new_table = EphemeronHashTable::Put(
+ weak_collection->GetIsolate(), table, key, value, hash);
+ weak_collection->set_table(*new_table);
+ if (*table != *new_table) {
+ // Zap the old table since we didn't record slots for its elements.
+ EphemeronHashTable::FillEntriesWithHoles(table);
+ }
+}
+
+bool JSWeakCollection::Delete(Handle<JSWeakCollection> weak_collection,
+ Handle<Object> key, int32_t hash) {
+ DCHECK(key->IsJSReceiver() || key->IsSymbol());
+ Handle<EphemeronHashTable> table(
+ EphemeronHashTable::cast(weak_collection->table()),
+ weak_collection->GetIsolate());
+ DCHECK(table->IsKey(weak_collection->GetReadOnlyRoots(), *key));
+ bool was_present = false;
+ Handle<EphemeronHashTable> new_table = EphemeronHashTable::Remove(
+ weak_collection->GetIsolate(), table, key, &was_present, hash);
+ weak_collection->set_table(*new_table);
+ if (*table != *new_table) {
+ // Zap the old table since we didn't record slots for its elements.
+ EphemeronHashTable::FillEntriesWithHoles(table);
+ }
+ return was_present;
+}
+
+Handle<JSArray> JSWeakCollection::GetEntries(Handle<JSWeakCollection> holder,
+ int max_entries) {
+ Isolate* isolate = holder->GetIsolate();
+ Handle<EphemeronHashTable> table(EphemeronHashTable::cast(holder->table()),
+ isolate);
+ if (max_entries == 0 || max_entries > table->NumberOfElements()) {
+ max_entries = table->NumberOfElements();
+ }
+ int values_per_entry = holder->IsJSWeakMap() ? 2 : 1;
+ Handle<FixedArray> entries =
+ isolate->factory()->NewFixedArray(max_entries * values_per_entry);
+ // Recompute max_values because GC could have removed elements from the table.
+ if (max_entries > table->NumberOfElements()) {
+ max_entries = table->NumberOfElements();
+ }
+
+ {
+ DisallowHeapAllocation no_gc;
+ ReadOnlyRoots roots = ReadOnlyRoots(isolate);
+ int count = 0;
+ for (int i = 0;
+ count / values_per_entry < max_entries && i < table->Capacity(); i++) {
+ Object key;
+ if (table->ToKey(roots, i, &key)) {
+ entries->set(count++, key);
+ if (values_per_entry > 1) {
+ Object value = table->Lookup(handle(key, isolate));
+ entries->set(count++, value);
+ }
+ }
+ }
+ DCHECK_EQ(max_entries * values_per_entry, count);
+ }
+ return isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+Handle<PropertyCell> PropertyCell::InvalidateEntry(
+ Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry) {
+ // Swap with a copy.
+ Handle<PropertyCell> cell(dictionary->CellAt(entry), isolate);
+ Handle<Name> name(cell->name(), isolate);
+ Handle<PropertyCell> new_cell = isolate->factory()->NewPropertyCell(name);
+ new_cell->set_value(cell->value());
+ dictionary->ValueAtPut(entry, *new_cell);
+ bool is_the_hole = cell->value().IsTheHole(isolate);
+ // Cell is officially mutable henceforth.
+ PropertyDetails details = cell->property_details();
+ details = details.set_cell_type(is_the_hole ? PropertyCellType::kUninitialized
+ : PropertyCellType::kMutable);
+ new_cell->set_property_details(details);
+ // Old cell is ready for invalidation.
+ if (is_the_hole) {
+ cell->set_value(ReadOnlyRoots(isolate).undefined_value());
+ } else {
+ cell->set_value(ReadOnlyRoots(isolate).the_hole_value());
+ }
+ details = details.set_cell_type(PropertyCellType::kInvalidated);
+ cell->set_property_details(details);
+ cell->dependent_code().DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kPropertyCellChangedGroup);
+ return new_cell;
+}
+
+PropertyCellConstantType PropertyCell::GetConstantType() {
+ if (value().IsSmi()) return PropertyCellConstantType::kSmi;
+ return PropertyCellConstantType::kStableMap;
+}
+
+static bool RemainsConstantType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
+ // TODO(dcarney): double->smi and smi->double transition from kConstant
+ if (cell->value().IsSmi() && value->IsSmi()) {
+ return true;
+ } else if (cell->value().IsHeapObject() && value->IsHeapObject()) {
+ return HeapObject::cast(cell->value()).map() ==
+ HeapObject::cast(*value).map() &&
+ HeapObject::cast(*value).map().is_stable();
+ }
+ return false;
+}
+
+PropertyCellType PropertyCell::UpdatedType(Isolate* isolate,
+ Handle<PropertyCell> cell,
+ Handle<Object> value,
+ PropertyDetails details) {
+ PropertyCellType type = details.cell_type();
+ DCHECK(!value->IsTheHole(isolate));
+ if (cell->value().IsTheHole(isolate)) {
+ switch (type) {
+ // Only allow a cell to transition once into constant state.
+ case PropertyCellType::kUninitialized:
+ if (value->IsUndefined(isolate)) return PropertyCellType::kUndefined;
+ return PropertyCellType::kConstant;
+ case PropertyCellType::kInvalidated:
+ return PropertyCellType::kMutable;
+ default:
+ UNREACHABLE();
+ }
+ }
+ switch (type) {
+ case PropertyCellType::kUndefined:
+ return PropertyCellType::kConstant;
+ case PropertyCellType::kConstant:
+ if (*value == cell->value()) return PropertyCellType::kConstant;
+ V8_FALLTHROUGH;
+ case PropertyCellType::kConstantType:
+ if (RemainsConstantType(cell, value)) {
+ return PropertyCellType::kConstantType;
+ }
+ V8_FALLTHROUGH;
+ case PropertyCellType::kMutable:
+ return PropertyCellType::kMutable;
+ }
+ UNREACHABLE();
+}
+
+Handle<PropertyCell> PropertyCell::PrepareForValue(
+ Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry,
+ Handle<Object> value, PropertyDetails details) {
+ DCHECK(!value->IsTheHole(isolate));
+ Handle<PropertyCell> cell(dictionary->CellAt(entry), isolate);
+ const PropertyDetails original_details = cell->property_details();
+ // Data accesses could be cached in ics or optimized code.
+ bool invalidate =
+ (original_details.kind() == kData && details.kind() == kAccessor) ||
+ (!original_details.IsReadOnly() && details.IsReadOnly());
+ int index;
+ PropertyCellType old_type = original_details.cell_type();
+ // Preserve the enumeration index unless the property was deleted or never
+ // initialized.
+ if (cell->value().IsTheHole(isolate)) {
+ index = dictionary->NextEnumerationIndex();
+ dictionary->SetNextEnumerationIndex(index + 1);
+ } else {
+ index = original_details.dictionary_index();
+ }
+ DCHECK_LT(0, index);
+ details = details.set_index(index);
+
+ PropertyCellType new_type =
+ UpdatedType(isolate, cell, value, original_details);
+ if (invalidate) {
+ cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
+ }
+
+ // Install new property details.
+ details = details.set_cell_type(new_type);
+ cell->set_property_details(details);
+
+ if (new_type == PropertyCellType::kConstant ||
+ new_type == PropertyCellType::kConstantType) {
+ // Store the value now to ensure that the cell contains the constant or
+ // type information. Otherwise subsequent store operation will turn
+ // the cell to mutable.
+ cell->set_value(*value);
+ }
+
+ // Deopt when transitioning from a constant type.
+ if (!invalidate && (old_type != new_type ||
+ original_details.IsReadOnly() != details.IsReadOnly())) {
+ cell->dependent_code().DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kPropertyCellChangedGroup);
+ }
+ return cell;
+}
+
+// static
+void PropertyCell::SetValueWithInvalidation(Isolate* isolate,
+ Handle<PropertyCell> cell,
+ Handle<Object> new_value) {
+ if (cell->value() != *new_value) {
+ cell->set_value(*new_value);
+ cell->dependent_code().DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kPropertyCellChangedGroup);
+ }
+}
+
+int JSGeneratorObject::source_position() const {
+ CHECK(is_suspended());
+ DCHECK(function().shared().HasBytecodeArray());
+ DCHECK(function().shared().GetBytecodeArray().HasSourcePositionTable());
+
+ int code_offset = Smi::ToInt(input_or_debug_pos());
+
+ // The stored bytecode offset is relative to a different base than what
+ // is used in the source position table, hence the subtraction.
+ code_offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
+ AbstractCode code =
+ AbstractCode::cast(function().shared().GetBytecodeArray());
+ return code.SourcePosition(code_offset);
+}
+
+// static
+AccessCheckInfo AccessCheckInfo::Get(Isolate* isolate,
+ Handle<JSObject> receiver) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(receiver->map().is_access_check_needed());
+ Object maybe_constructor = receiver->map().GetConstructor();
+ if (maybe_constructor.IsFunctionTemplateInfo()) {
+ Object data_obj =
+ FunctionTemplateInfo::cast(maybe_constructor).GetAccessCheckInfo();
+ if (data_obj.IsUndefined(isolate)) return AccessCheckInfo();
+ return AccessCheckInfo::cast(data_obj);
+ }
+ // Might happen for a detached context.
+ if (!maybe_constructor.IsJSFunction()) return AccessCheckInfo();
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
+ // Might happen for the debug context.
+ if (!constructor.shared().IsApiFunction()) return AccessCheckInfo();
+
+ Object data_obj =
+ constructor.shared().get_api_func_data().GetAccessCheckInfo();
+ if (data_obj.IsUndefined(isolate)) return AccessCheckInfo();
+
+ return AccessCheckInfo::cast(data_obj);
+}
+
+MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
+ Isolate* isolate, Handle<Object> getter) {
+ if (getter->IsFunctionTemplateInfo()) {
+ Handle<FunctionTemplateInfo> fti =
+ Handle<FunctionTemplateInfo>::cast(getter);
+ // Check if the accessor uses a cached property.
+ if (!fti->cached_property_name().IsTheHole(isolate)) {
+ return handle(Name::cast(fti->cached_property_name()), isolate);
+ }
+ }
+ return MaybeHandle<Name>();
+}
+
+Address Smi::LexicographicCompare(Isolate* isolate, Smi x, Smi y) {
+ DisallowHeapAllocation no_allocation;
+ DisallowJavascriptExecution no_js(isolate);
+
+ int x_value = Smi::ToInt(x);
+ int y_value = Smi::ToInt(y);
+
+ // If the integers are equal so are the string representations.
+ if (x_value == y_value) return Smi::FromInt(0).ptr();
+
+ // If one of the integers is zero the normal integer order is the
+ // same as the lexicographic order of the string representations.
+ if (x_value == 0 || y_value == 0) {
+ return Smi::FromInt(x_value < y_value ? -1 : 1).ptr();
+ }
+
+ // If only one of the integers is negative the negative number is
+ // smallest because the char code of '-' is less than the char code
+ // of any digit. Otherwise, we make both values positive.
+
+ // Use unsigned values otherwise the logic is incorrect for -MIN_INT on
+ // architectures using 32-bit Smis.
+ uint32_t x_scaled = x_value;
+ uint32_t y_scaled = y_value;
+ if (x_value < 0) {
+ if (y_value >= 0) {
+ return Smi::FromInt(-1).ptr();
+ } else {
+ y_scaled = base::NegateWithWraparound(y_value);
+ }
+ x_scaled = base::NegateWithWraparound(x_value);
+ } else if (y_value < 0) {
+ return Smi::FromInt(1).ptr();
+ }
+
+ // clang-format off
+ static const uint32_t kPowersOf10[] = {
+ 1, 10, 100, 1000,
+ 10 * 1000, 100 * 1000, 1000 * 1000, 10 * 1000 * 1000,
+ 100 * 1000 * 1000, 1000 * 1000 * 1000};
+ // clang-format on
+
+ // If the integers have the same number of decimal digits they can be
+ // compared directly as the numeric order is the same as the
+ // lexicographic order. If one integer has fewer digits, it is scaled
+ // by some power of 10 to have the same number of digits as the longer
+ // integer. If the scaled integers are equal it means the shorter
+ // integer comes first in the lexicographic order.
+
+ // From http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
+ int x_log2 = 31 - base::bits::CountLeadingZeros(x_scaled);
+ int x_log10 = ((x_log2 + 1) * 1233) >> 12;
+ x_log10 -= x_scaled < kPowersOf10[x_log10];
+
+ int y_log2 = 31 - base::bits::CountLeadingZeros(y_scaled);
+ int y_log10 = ((y_log2 + 1) * 1233) >> 12;
+ y_log10 -= y_scaled < kPowersOf10[y_log10];
+
+ int tie = 0;
+
+ if (x_log10 < y_log10) {
+ // X has fewer digits. We would like to simply scale up X but that
+ // might overflow, e.g when comparing 9 with 1_000_000_000, 9 would
+ // be scaled up to 9_000_000_000. So we scale up by the next
+ // smallest power and scale down Y to drop one digit. It is OK to
+ // drop one digit from the longer integer since the final digit is
+ // past the length of the shorter integer.
+ x_scaled *= kPowersOf10[y_log10 - x_log10 - 1];
+ y_scaled /= 10;
+ tie = -1;
+ } else if (y_log10 < x_log10) {
+ y_scaled *= kPowersOf10[x_log10 - y_log10 - 1];
+ x_scaled /= 10;
+ tie = 1;
+ }
+
+ if (x_scaled < y_scaled) return Smi::FromInt(-1).ptr();
+ if (x_scaled > y_scaled) return Smi::FromInt(1).ptr();
+ return Smi::FromInt(tie).ptr();
+}
+
+// Force instantiation of template instances class.
+// Please note this list is compiler dependent.
+// Keep this at the end of this file
+
+template class HashTable<StringTable, StringTableShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) HashTable<CompilationCacheTable, CompilationCacheShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) HashTable<ObjectHashTable, ObjectHashTableShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) HashTable<ObjectHashSet, ObjectHashSetShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) HashTable<EphemeronHashTable, EphemeronHashTableShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ BaseNameDictionary<NameDictionary, NameDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Dictionary<NameDictionary, NameDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Dictionary<GlobalDictionary, GlobalDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) HashTable<NumberDictionary, NumberDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Dictionary<NumberDictionary, NumberDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ HashTable<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
+template Handle<NameDictionary>
+HashTable<NameDictionary, NameDictionaryShape>::New(Isolate*, int,
+ AllocationType,
+ MinimumCapacity);
+
+template V8_EXPORT_PRIVATE Handle<NameDictionary>
+HashTable<NameDictionary, NameDictionaryShape>::Shrink(Isolate* isolate,
+ Handle<NameDictionary>,
+ int additionalCapacity);
+
+void JSFinalizationGroup::Cleanup(
+ Handle<JSFinalizationGroup> finalization_group, Isolate* isolate) {
+ // It's possible that the cleared_cells list is empty, since
+ // FinalizationGroup.unregister() removed all its elements before this task
+ // ran. In that case, don't call the cleanup function.
+ if (!finalization_group->cleared_cells().IsUndefined(isolate)) {
+ // Construct the iterator.
+ Handle<JSFinalizationGroupCleanupIterator> iterator;
+ {
+ Handle<Map> cleanup_iterator_map(
+ isolate->native_context()
+ ->js_finalization_group_cleanup_iterator_map(),
+ isolate);
+ iterator = Handle<JSFinalizationGroupCleanupIterator>::cast(
+ isolate->factory()->NewJSObjectFromMap(
+ cleanup_iterator_map, AllocationType::kYoung,
+ Handle<AllocationSite>::null()));
+ iterator->set_finalization_group(*finalization_group);
+ }
+ Handle<Object> cleanup(finalization_group->cleanup(), isolate);
+
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ v8::Local<v8::Value> result;
+ MaybeHandle<Object> exception;
+ Handle<Object> args[] = {iterator};
+ bool has_pending_exception = !ToLocal<Value>(
+ Execution::TryCall(
+ isolate, cleanup,
+ handle(ReadOnlyRoots(isolate).undefined_value(), isolate), 1, args,
+ Execution::MessageHandling::kReport, &exception),
+ &result);
+ // TODO(marja): (spec): What if there's an exception?
+ USE(has_pending_exception);
+
+ // TODO(marja): (spec): Should the iterator be invalidated after the
+ // function returns?
+ }
+}
+
+MaybeHandle<FixedArray> JSReceiver::GetPrivateEntries(
+ Isolate* isolate, Handle<JSReceiver> receiver) {
+ PropertyFilter key_filter = static_cast<PropertyFilter>(PRIVATE_NAMES_ONLY);
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys,
+ KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly, key_filter,
+ GetKeysConversion::kConvertToString),
+ MaybeHandle<FixedArray>());
+
+ Handle<FixedArray> entries =
+ isolate->factory()->NewFixedArray(keys->length() * 2);
+ int length = 0;
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> obj_key = handle(keys->get(i), isolate);
+ Handle<Symbol> key(Symbol::cast(*obj_key), isolate);
+ CHECK(key->is_private_name());
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, Object::GetProperty(isolate, receiver, key),
+ MaybeHandle<FixedArray>());
+
+ entries->set(length++, *key);
+ entries->set(length++, *value);
+ }
+ DCHECK_EQ(length, entries->length());
+ return FixedArray::ShrinkOrEmpty(isolate, entries, length);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
new file mode 100644
index 0000000000..857f3ed0f6
--- /dev/null
+++ b/deps/v8/src/objects/objects.h
@@ -0,0 +1,836 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_OBJECTS_H_
+#define V8_OBJECTS_OBJECTS_H_
+
+#include <iosfwd>
+#include <memory>
+
+#include "include/v8-internal.h"
+#include "include/v8.h"
+#include "include/v8config.h"
+#include "src/base/bits.h"
+#include "src/base/build_config.h"
+#include "src/base/flags.h"
+#include "src/base/logging.h"
+#include "src/codegen/constants-arch.h"
+#include "src/common/assert-scope.h"
+#include "src/common/checks.h"
+#include "src/execution/message-template.h"
+#include "src/flags/flags.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/field-index.h"
+#include "src/objects/object-list-macros.h"
+#include "src/objects/objects-definitions.h"
+#include "src/objects/property-details.h"
+#include "src/objects/tagged-impl.h"
+#include "src/utils/utils.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+//
+// Most object types in the V8 JavaScript are described in this file.
+//
+// Inheritance hierarchy:
+// - Object
+// - Smi (immediate small integer)
+// - HeapObject (superclass for everything allocated in the heap)
+// - JSReceiver (suitable for property access)
+// - JSObject
+// - JSArray
+// - JSArrayBuffer
+// - JSArrayBufferView
+// - JSTypedArray
+// - JSDataView
+// - JSBoundFunction
+// - JSCollection
+// - JSSet
+// - JSMap
+// - JSStringIterator
+// - JSSetIterator
+// - JSMapIterator
+// - JSWeakCollection
+// - JSWeakMap
+// - JSWeakSet
+// - JSRegExp
+// - JSFunction
+// - JSGeneratorObject
+// - JSGlobalObject
+// - JSGlobalProxy
+// - JSValue
+// - JSDate
+// - JSMessageObject
+// - JSModuleNamespace
+// - JSV8BreakIterator // If V8_INTL_SUPPORT enabled.
+// - JSCollator // If V8_INTL_SUPPORT enabled.
+// - JSDateTimeFormat // If V8_INTL_SUPPORT enabled.
+// - JSListFormat // If V8_INTL_SUPPORT enabled.
+// - JSLocale // If V8_INTL_SUPPORT enabled.
+// - JSNumberFormat // If V8_INTL_SUPPORT enabled.
+// - JSPluralRules // If V8_INTL_SUPPORT enabled.
+// - JSRelativeTimeFormat // If V8_INTL_SUPPORT enabled.
+// - JSSegmentIterator // If V8_INTL_SUPPORT enabled.
+// - JSSegmenter // If V8_INTL_SUPPORT enabled.
+// - WasmExceptionObject
+// - WasmGlobalObject
+// - WasmInstanceObject
+// - WasmMemoryObject
+// - WasmModuleObject
+// - WasmTableObject
+// - JSProxy
+// - FixedArrayBase
+// - ByteArray
+// - BytecodeArray
+// - FixedArray
+// - FrameArray
+// - HashTable
+// - Dictionary
+// - StringTable
+// - StringSet
+// - CompilationCacheTable
+// - MapCache
+// - OrderedHashTable
+// - OrderedHashSet
+// - OrderedHashMap
+// - FeedbackMetadata
+// - TemplateList
+// - TransitionArray
+// - ScopeInfo
+// - ModuleInfo
+// - ScriptContextTable
+// - ClosureFeedbackCellArray
+// - FixedDoubleArray
+// - Name
+// - String
+// - SeqString
+// - SeqOneByteString
+// - SeqTwoByteString
+// - SlicedString
+// - ConsString
+// - ThinString
+// - ExternalString
+// - ExternalOneByteString
+// - ExternalTwoByteString
+// - InternalizedString
+// - SeqInternalizedString
+// - SeqOneByteInternalizedString
+// - SeqTwoByteInternalizedString
+// - ConsInternalizedString
+// - ExternalInternalizedString
+// - ExternalOneByteInternalizedString
+// - ExternalTwoByteInternalizedString
+// - Symbol
+// - Context
+// - NativeContext
+// - HeapNumber
+// - BigInt
+// - Cell
+// - DescriptorArray
+// - PropertyCell
+// - PropertyArray
+// - Code
+// - AbstractCode, a wrapper around Code or BytecodeArray
+// - Map
+// - Oddball
+// - Foreign
+// - SmallOrderedHashTable
+// - SmallOrderedHashMap
+// - SmallOrderedHashSet
+// - SharedFunctionInfo
+// - Struct
+// - AccessorInfo
+// - AsmWasmData
+// - PromiseReaction
+// - PromiseCapability
+// - AccessorPair
+// - AccessCheckInfo
+// - InterceptorInfo
+// - CallHandlerInfo
+// - EnumCache
+// - TemplateInfo
+// - FunctionTemplateInfo
+// - ObjectTemplateInfo
+// - Script
+// - DebugInfo
+// - BreakPoint
+// - BreakPointInfo
+// - StackFrameInfo
+// - StackTraceFrame
+// - SourcePositionTableWithFrameCache
+// - CodeCache
+// - PrototypeInfo
+// - Microtask
+// - CallbackTask
+// - CallableTask
+// - PromiseReactionJobTask
+// - PromiseFulfillReactionJobTask
+// - PromiseRejectReactionJobTask
+// - PromiseResolveThenableJobTask
+// - Module
+// - ModuleInfoEntry
+// - FeedbackCell
+// - FeedbackVector
+// - PreparseData
+// - UncompiledData
+// - UncompiledDataWithoutPreparseData
+// - UncompiledDataWithPreparseData
+//
+// Formats of Object::ptr_:
+// Smi: [31 bit signed int] 0
+// HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
+
+namespace v8 {
+namespace internal {
+
+struct InliningPosition;
+class PropertyDescriptorObject;
+
+// UNSAFE_SKIP_WRITE_BARRIER skips the write barrier.
+// SKIP_WRITE_BARRIER skips the write barrier and asserts that this is safe in
+// the MemoryOptimizer
+// UPDATE_WEAK_WRITE_BARRIER skips the marking part of the write barrier and
+// only performs the generational part.
+// UPDATE_WRITE_BARRIER is doing the full barrier, marking and generational.
+enum WriteBarrierMode {
+ SKIP_WRITE_BARRIER,
+ UNSAFE_SKIP_WRITE_BARRIER,
+ UPDATE_WEAK_WRITE_BARRIER,
+ UPDATE_EPHEMERON_KEY_WRITE_BARRIER,
+ UPDATE_WRITE_BARRIER
+};
+
+// PropertyNormalizationMode is used to specify whether to keep
+// inobject properties when normalizing properties of a JSObject.
+enum PropertyNormalizationMode {
+ CLEAR_INOBJECT_PROPERTIES,
+ KEEP_INOBJECT_PROPERTIES
+};
+
+// Indicates whether transitions can be added to a source map or not.
+enum TransitionFlag { INSERT_TRANSITION, OMIT_TRANSITION };
+
+// Indicates whether the transition is simple: the target map of the transition
+// either extends the current map with a new property, or it modifies the
+// property that was added last to the current map.
+enum SimpleTransitionFlag {
+ SIMPLE_PROPERTY_TRANSITION,
+ PROPERTY_TRANSITION,
+ SPECIAL_TRANSITION
+};
+
+// Indicates whether we are only interested in the descriptors of a particular
+// map, or in all descriptors in the descriptor array.
+enum DescriptorFlag { ALL_DESCRIPTORS, OWN_DESCRIPTORS };
+
+// Instance size sentinel for objects of variable size.
+const int kVariableSizeSentinel = 0;
+
+// We may store the unsigned bit field as signed Smi value and do not
+// use the sign bit.
+const int kStubMajorKeyBits = 8;
+const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
+
+// Result of an abstract relational comparison of x and y, implemented according
+// to ES6 section 7.2.11 Abstract Relational Comparison.
+enum class ComparisonResult {
+ kLessThan, // x < y
+ kEqual, // x = y
+ kGreaterThan, // x > y
+ kUndefined // at least one of x or y was undefined or NaN
+};
+
+// (Returns false whenever {result} is kUndefined.)
+bool ComparisonResultToBool(Operation op, ComparisonResult result);
+
+enum class OnNonExistent { kThrowReferenceError, kReturnUndefined };
+
+// The element types selection for CreateListFromArrayLike.
+enum class ElementTypes { kAll, kStringAndSymbol };
+
+// TODO(mythria): Move this to a better place.
+ShouldThrow GetShouldThrow(Isolate* isolate, Maybe<ShouldThrow> should_throw);
+
+// Object is the abstract superclass for all classes in the
+// object hierarchy.
+// Object does not use any virtual functions to avoid the
+// allocation of the C++ vtable.
+// There must only be a single data member in Object: the Address ptr,
+// containing the tagged heap pointer that this Object instance refers to.
+// For a design overview, see https://goo.gl/Ph4CGz.
+class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
+ public:
+ constexpr Object() : TaggedImpl(kNullAddress) {}
+ explicit constexpr Object(Address ptr) : TaggedImpl(ptr) {}
+
+#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
+ OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+ HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
+
+// Oddball checks are faster when they are raw pointer comparisons, so the
+// isolate/read-only roots overloads should be preferred where possible.
+#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+ V8_INLINE bool Is##Type(Isolate* isolate) const; \
+ V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
+ V8_INLINE bool Is##Type() const;
+ ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
+
+ V8_INLINE bool IsNullOrUndefined(Isolate* isolate) const;
+ V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
+ V8_INLINE bool IsNullOrUndefined() const;
+
+ V8_INLINE bool IsZero() const;
+ V8_INLINE bool IsNoSharedNameSentinel() const;
+
+ enum class Conversion { kToNumber, kToNumeric };
+
+#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
+ STRUCT_LIST(DECL_STRUCT_PREDICATE)
+#undef DECL_STRUCT_PREDICATE
+
+ // ES6, #sec-isarray. NOT to be confused with %_IsArray.
+ V8_INLINE
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsArray(Handle<Object> object);
+
+ V8_INLINE bool IsHashTableBase() const;
+ V8_INLINE bool IsSmallOrderedHashTable() const;
+
+ // Extract the number.
+ inline double Number() const;
+ V8_INLINE bool IsNaN() const;
+ V8_INLINE bool IsMinusZero() const;
+ V8_EXPORT_PRIVATE bool ToInt32(int32_t* value);
+ inline bool ToUint32(uint32_t* value) const;
+
+ inline Representation OptimalRepresentation();
+
+ inline ElementsKind OptimalElementsKind();
+
+ inline bool FitsRepresentation(Representation representation);
+
+ inline bool FilterKey(PropertyFilter filter);
+
+ Handle<FieldType> OptimalType(Isolate* isolate,
+ Representation representation);
+
+ V8_EXPORT_PRIVATE static Handle<Object> NewStorageFor(
+ Isolate* isolate, Handle<Object> object, Representation representation);
+
+ static Handle<Object> WrapForRead(Isolate* isolate, Handle<Object> object,
+ Representation representation);
+
+ // Returns true if the object is of the correct type to be used as a
+ // implementation of a JSObject's elements.
+ inline bool HasValidElements();
+
+ // ECMA-262 9.2.
+ V8_EXPORT_PRIVATE bool BooleanValue(Isolate* isolate);
+ Object ToBoolean(Isolate* isolate);
+
+ // ES6 section 7.2.11 Abstract Relational Comparison
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<ComparisonResult>
+ Compare(Isolate* isolate, Handle<Object> x, Handle<Object> y);
+
+ // ES6 section 7.2.12 Abstract Equality Comparison
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool> Equals(
+ Isolate* isolate, Handle<Object> x, Handle<Object> y);
+
+ // ES6 section 7.2.13 Strict Equality Comparison
+ V8_EXPORT_PRIVATE bool StrictEquals(Object that);
+
+ // ES6 section 7.1.13 ToObject
+ // Convert to a JSObject if needed.
+ // native_context is used when creating wrapper object.
+ //
+ // Passing a non-null method_name allows us to give a more informative
+ // error message for those cases where ToObject is being called on
+ // the receiver of a built-in method.
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<JSReceiver> ToObject(
+ Isolate* isolate, Handle<Object> object,
+ const char* method_name = nullptr);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> ToObjectImpl(
+ Isolate* isolate, Handle<Object> object,
+ const char* method_name = nullptr);
+
+ // ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> ConvertReceiver(
+ Isolate* isolate, Handle<Object> object);
+
+ // ES6 section 7.1.14 ToPropertyKey
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Name> ToName(
+ Isolate* isolate, Handle<Object> input);
+
+ // ES6 section 7.1.1 ToPrimitive
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToPrimitive(
+ Handle<Object> input, ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
+
+ // ES6 section 7.1.3 ToNumber
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToNumber(
+ Isolate* isolate, Handle<Object> input);
+
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToNumeric(
+ Isolate* isolate, Handle<Object> input);
+
+ // ES6 section 7.1.4 ToInteger
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToInteger(
+ Isolate* isolate, Handle<Object> input);
+
+ // ES6 section 7.1.5 ToInt32
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToInt32(
+ Isolate* isolate, Handle<Object> input);
+
+ // ES6 section 7.1.6 ToUint32
+ V8_WARN_UNUSED_RESULT inline static MaybeHandle<Object> ToUint32(
+ Isolate* isolate, Handle<Object> input);
+
+ // ES6 section 7.1.12 ToString
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<String> ToString(
+ Isolate* isolate, Handle<Object> input);
+
+ V8_EXPORT_PRIVATE static Handle<String> NoSideEffectsToString(
+ Isolate* isolate, Handle<Object> input);
+
+ // ES6 section 7.1.14 ToPropertyKey
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToPropertyKey(
+ Isolate* isolate, Handle<Object> value);
+
+ // ES6 section 7.1.15 ToLength
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToLength(
+ Isolate* isolate, Handle<Object> input);
+
+ // ES6 section 7.1.17 ToIndex
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToIndex(
+ Isolate* isolate, Handle<Object> input, MessageTemplate error_index);
+
+ // ES6 section 7.3.9 GetMethod
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetMethod(
+ Handle<JSReceiver> receiver, Handle<Name> name);
+
+ // ES6 section 7.3.17 CreateListFromArrayLike
+ V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> CreateListFromArrayLike(
+ Isolate* isolate, Handle<Object> object, ElementTypes element_types);
+
+ // Get length property and apply ToLength.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetLengthFromArrayLike(
+ Isolate* isolate, Handle<JSReceiver> object);
+
+ // ES6 section 12.5.6 The typeof Operator
+ static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
+
+ // ES6 section 12.7 Additive Operators
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Add(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
+
+ // ES6 section 12.9 Relational Operators
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> GreaterThan(Isolate* isolate,
+ Handle<Object> x,
+ Handle<Object> y);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> GreaterThanOrEqual(
+ Isolate* isolate, Handle<Object> x, Handle<Object> y);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> LessThan(Isolate* isolate,
+ Handle<Object> x,
+ Handle<Object> y);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> LessThanOrEqual(
+ Isolate* isolate, Handle<Object> x, Handle<Object> y);
+
+ // ES6 section 7.3.19 OrdinaryHasInstance (C, O).
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> OrdinaryHasInstance(
+ Isolate* isolate, Handle<Object> callable, Handle<Object> object);
+
+ // ES6 section 12.10.4 Runtime Semantics: InstanceofOperator(O, C)
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> InstanceOf(
+ Isolate* isolate, Handle<Object> object, Handle<Object> callable);
+
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ GetProperty(LookupIterator* it,
+ OnNonExistent on_non_existent = OnNonExistent::kReturnUndefined);
+
+ // ES6 [[Set]] (when passed kDontThrow)
+ // Invariants for this and related functions (unless stated otherwise):
+ // 1) When the result is Nothing, an exception is pending.
+ // 2) When passed kThrowOnError, the result is never Just(false).
+ // In some cases, an exception is thrown regardless of the ShouldThrow
+ // argument. These cases are either in accordance with the spec or not
+ // covered by it (eg., concerning API callbacks).
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool> SetProperty(
+ LookupIterator* it, Handle<Object> value, StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>());
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ SetProperty(Isolate* isolate, Handle<Object> object, Handle<Name> name,
+ Handle<Object> value,
+ StoreOrigin store_origin = StoreOrigin::kMaybeKeyed,
+ Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>());
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> SetPropertyOrElement(
+ Isolate* isolate, Handle<Object> object, Handle<Name> name,
+ Handle<Object> value,
+ Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>(),
+ StoreOrigin store_origin = StoreOrigin::kMaybeKeyed);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetSuperProperty(
+ LookupIterator* it, Handle<Object> value, StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>());
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CannotCreateProperty(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
+ Handle<Object> value, Maybe<ShouldThrow> should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> WriteToReadOnlyProperty(
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> WriteToReadOnlyProperty(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
+ Handle<Object> value, ShouldThrow should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> RedefineIncompatibleProperty(
+ Isolate* isolate, Handle<Object> name, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetDataProperty(
+ LookupIterator* it, Handle<Object> value);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> AddDataProperty(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ Maybe<ShouldThrow> should_throw, StoreOrigin store_origin);
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
+ Isolate* isolate, Handle<Object> object, Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
+ Handle<Object> receiver, Handle<Name> name, Handle<JSReceiver> holder);
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
+ Isolate* isolate, Handle<Object> object, Handle<Name> name);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
+ LookupIterator* it);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithAccessor(
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter(
+ Handle<Object> receiver, Handle<JSReceiver> getter);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithDefinedSetter(
+ Handle<Object> receiver, Handle<JSReceiver> setter, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw);
+
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetElement(
+ Isolate* isolate, Handle<Object> object, uint32_t index);
+
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> SetElement(
+ Isolate* isolate, Handle<Object> object, uint32_t index,
+ Handle<Object> value, ShouldThrow should_throw);
+
+ // Returns the permanent hash code associated with this object. May return
+ // undefined if not yet created.
+ inline Object GetHash();
+
+ // Returns the permanent hash code associated with this object depending on
+ // the actual object type. May create and store a hash code if needed and none
+ // exists.
+ V8_EXPORT_PRIVATE Smi GetOrCreateHash(Isolate* isolate);
+
+ // Checks whether this object has the same value as the given one. This
+ // function is implemented according to ES5, section 9.12 and can be used
+ // to implement the Object.is function.
+ V8_EXPORT_PRIVATE bool SameValue(Object other);
+
+ // A part of SameValue which handles Number vs. Number case.
+ // Treats NaN == NaN and +0 != -0.
+ inline static bool SameNumberValue(double number1, double number2);
+
+ // Checks whether this object has the same value as the given one.
+ // +0 and -0 are treated equal. Everything else is the same as SameValue.
+ // This function is implemented according to ES6, section 7.2.4 and is used
+ // by ES6 Map and Set.
+ bool SameValueZero(Object other);
+
+ // ES6 section 9.4.2.3 ArraySpeciesCreate (part of it)
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ArraySpeciesConstructor(
+ Isolate* isolate, Handle<Object> original_array);
+
+ // ES6 section 7.3.20 SpeciesConstructor ( O, defaultConstructor )
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SpeciesConstructor(
+ Isolate* isolate, Handle<JSReceiver> recv,
+ Handle<JSFunction> default_ctor);
+
+ // Tries to convert an object to an array length. Returns true and sets the
+ // output parameter if it succeeds.
+ inline bool ToArrayLength(uint32_t* index) const;
+
+ // Tries to convert an object to an array index. Returns true and sets the
+ // output parameter if it succeeds. Equivalent to ToArrayLength, but does not
+ // allow kMaxUInt32.
+ V8_WARN_UNUSED_RESULT inline bool ToArrayIndex(uint32_t* index) const;
+
+ // Returns true if the result of iterating over the object is the same
+ // (including observable effects) as simply accessing the properties between 0
+ // and length.
+ bool IterationHasObservableEffects();
+
+ EXPORT_DECL_VERIFIER(Object)
+
+#ifdef VERIFY_HEAP
+ // Verify a pointer is a valid object pointer.
+ static void VerifyPointer(Isolate* isolate, Object p);
+#endif
+
+ inline void VerifyApiCallResultType();
+
+ // Prints this object without details.
+ V8_EXPORT_PRIVATE void ShortPrint(FILE* out = stdout) const;
+
+ // Prints this object without details to a message accumulator.
+ V8_EXPORT_PRIVATE void ShortPrint(StringStream* accumulator) const;
+
+ V8_EXPORT_PRIVATE void ShortPrint(std::ostream& os) const; // NOLINT
+
+ inline static Object cast(Object object) { return object; }
+ inline static Object unchecked_cast(Object object) { return object; }
+
+ // Layout description.
+ static const int kHeaderSize = 0; // Object does not take up any space.
+
+#ifdef OBJECT_PRINT
+ // For our gdb macros, we should perhaps change these in the future.
+ V8_EXPORT_PRIVATE void Print() const;
+
+ // Prints this object with details.
+ V8_EXPORT_PRIVATE void Print(std::ostream& os) const; // NOLINT
+#else
+ void Print() const { ShortPrint(); }
+ void Print(std::ostream& os) const { ShortPrint(os); } // NOLINT
+#endif
+
+ // For use with std::unordered_set.
+ struct Hasher {
+ size_t operator()(const Object o) const {
+ return std::hash<v8::internal::Address>{}(o.ptr());
+ }
+ };
+
+ // For use with std::map.
+ struct Comparer {
+ bool operator()(const Object a, const Object b) const {
+ return a.ptr() < b.ptr();
+ }
+ };
+
+ template <class T, typename std::enable_if<std::is_arithmetic<T>::value,
+ int>::type = 0>
+ inline T ReadField(size_t offset) const {
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+#ifdef V8_COMPRESS_POINTERS
+ constexpr bool v8_pointer_compression_unaligned = sizeof(T) > kTaggedSize;
+#else
+ constexpr bool v8_pointer_compression_unaligned = false;
+#endif
+ if (std::is_same<T, double>::value || v8_pointer_compression_unaligned) {
+ // Bug(v8:8875) Double fields may be unaligned.
+ return ReadUnalignedValue<T>(field_address(offset));
+ } else {
+ return Memory<T>(field_address(offset));
+ }
+ }
+
+ template <class T, typename std::enable_if<std::is_arithmetic<T>::value,
+ int>::type = 0>
+ inline void WriteField(size_t offset, T value) const {
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+#ifdef V8_COMPRESS_POINTERS
+ constexpr bool v8_pointer_compression_unaligned = sizeof(T) > kTaggedSize;
+#else
+ constexpr bool v8_pointer_compression_unaligned = false;
+#endif
+ if (std::is_same<T, double>::value || v8_pointer_compression_unaligned) {
+ // Bug(v8:8875) Double fields may be unaligned.
+ WriteUnalignedValue<T>(field_address(offset), value);
+ } else {
+ Memory<T>(field_address(offset)) = value;
+ }
+ }
+
+ protected:
+ inline Address field_address(size_t offset) const {
+ return ptr() + offset - kHeapObjectTag;
+ }
+
+ private:
+ friend class CompressedObjectSlot;
+ friend class FullObjectSlot;
+ friend class LookupIterator;
+ friend class StringStream;
+
+ // Return the map of the root of object's prototype chain.
+ Map GetPrototypeChainRootMap(Isolate* isolate) const;
+
+ // Returns a non-SMI for JSReceivers, but returns the hash code for
+ // simple objects. This avoids a double lookup in the cases where
+ // we know we will add the hash to the JSReceiver if it does not
+ // already exist.
+ //
+ // Despite its size, this needs to be inlined for performance
+ // reasons.
+ static inline Object GetSimpleHash(Object object);
+
+ // Helper for SetProperty and SetSuperProperty.
+ // Return value is only meaningful if [found] is set to true on return.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyInternal(
+ LookupIterator* it, Handle<Object> value, Maybe<ShouldThrow> should_throw,
+ StoreOrigin store_origin, bool* found);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Name> ConvertToName(
+ Isolate* isolate, Handle<Object> input);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToPropertyKey(
+ Isolate* isolate, Handle<Object> value);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<String>
+ ConvertToString(Isolate* isolate, Handle<Object> input);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToNumberOrNumeric(
+ Isolate* isolate, Handle<Object> input, Conversion mode);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToInteger(
+ Isolate* isolate, Handle<Object> input);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToInt32(
+ Isolate* isolate, Handle<Object> input);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToUint32(
+ Isolate* isolate, Handle<Object> input);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToLength(
+ Isolate* isolate, Handle<Object> input);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToIndex(
+ Isolate* isolate, Handle<Object> input, MessageTemplate error_index);
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Object& obj);
+
+struct Brief {
+ template <typename TObject>
+ explicit Brief(TObject v) : value{v.ptr()} {}
+ // {value} is a tagged heap object reference (weak or strong), equivalent to
+ // a MaybeObject's payload. It has a plain Address type to keep #includes
+ // lightweight.
+ const Address value;
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Brief& v);
+
+// Objects should never have the weak tag; this variant is for overzealous
+// checking.
+V8_INLINE static bool HasWeakHeapObjectTag(const Object value) {
+ return HAS_WEAK_HEAP_OBJECT_TAG(value.ptr());
+}
+
+// Heap objects typically have a map pointer in their first word. However,
+// during GC other data (e.g. mark bits, forwarding addresses) is sometimes
+// encoded in the first word. The class MapWord is an abstraction of the
+// value in a heap object's first word.
+class MapWord {
+ public:
+ // Normal state: the map word contains a map pointer.
+
+ // Create a map word from a map pointer.
+ static inline MapWord FromMap(const Map map);
+
+ // View this map word as a map pointer.
+ inline Map ToMap() const;
+
+ // Scavenge collection: the map word of live objects in the from space
+ // contains a forwarding address (a heap object pointer in the to space).
+
+ // True if this map word is a forwarding address for a scavenge
+ // collection. Only valid during a scavenge collection (specifically,
+ // when all map words are heap object pointers, i.e. not during a full GC).
+ inline bool IsForwardingAddress() const;
+
+ // Create a map word from a forwarding address.
+ static inline MapWord FromForwardingAddress(HeapObject object);
+
+ // View this map word as a forwarding address.
+ inline HeapObject ToForwardingAddress();
+
+ static inline MapWord FromRawValue(uintptr_t value) { return MapWord(value); }
+
+ inline uintptr_t ToRawValue() { return value_; }
+
+ private:
+ // HeapObject calls the private constructor and directly reads the value.
+ friend class HeapObject;
+
+ explicit MapWord(Address value) : value_(value) {}
+
+ Address value_;
+};
+
+template <int start_offset, int end_offset, int size>
+class FixedBodyDescriptor;
+
+template <int start_offset>
+class FlexibleBodyDescriptor;
+
+template <int start_offset>
+class FlexibleWeakBodyDescriptor;
+
+template <class ParentBodyDescriptor, class ChildBodyDescriptor>
+class SubclassBodyDescriptor;
+
+enum EnsureElementsMode {
+ DONT_ALLOW_DOUBLE_ELEMENTS,
+ ALLOW_COPIED_DOUBLE_ELEMENTS,
+ ALLOW_CONVERTED_DOUBLE_ELEMENTS
+};
+
+// Indicator for one component of an AccessorPair.
+enum AccessorComponent { ACCESSOR_GETTER, ACCESSOR_SETTER };
+
+enum class GetKeysConversion {
+ kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
+ kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString)
+};
+
+enum class KeyCollectionMode {
+ kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
+ kIncludePrototypes =
+ static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
+};
+
+// Utility superclass for stack-allocated objects that must be updated
+// on gc. It provides two ways for the gc to update instances, either
+// iterating or updating after gc.
+class Relocatable {
+ public:
+ explicit inline Relocatable(Isolate* isolate);
+ inline virtual ~Relocatable();
+ virtual void IterateInstance(RootVisitor* v) {}
+ virtual void PostGarbageCollection() {}
+
+ static void PostGarbageCollectionProcessing(Isolate* isolate);
+ static int ArchiveSpacePerThread();
+ static char* ArchiveState(Isolate* isolate, char* to);
+ static char* RestoreState(Isolate* isolate, char* from);
+ static void Iterate(Isolate* isolate, RootVisitor* v);
+ static void Iterate(RootVisitor* v, Relocatable* top);
+ static char* Iterate(RootVisitor* v, char* t);
+
+ private:
+ Isolate* isolate_;
+ Relocatable* prev_;
+};
+
+// BooleanBit is a helper class for setting and getting a bit in an integer.
+class BooleanBit : public AllStatic {
+ public:
+ static inline bool get(int value, int bit_position) {
+ return (value & (1 << bit_position)) != 0;
+ }
+
+ static inline int set(int value, int bit_position, bool v) {
+ if (v) {
+ value |= (1 << bit_position);
+ } else {
+ value &= ~(1 << bit_position);
+ }
+ return value;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_OBJECTS_H_
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index fbd5a1b2c6..e0d77b9043 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -7,9 +7,9 @@
#include "src/objects/oddball.h"
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/string-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -18,32 +18,19 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(Oddball, HeapObject)
-
-CAST_ACCESSOR(Oddball)
-
-double Oddball::to_number_raw() const {
- return READ_DOUBLE_FIELD(*this, kToNumberRawOffset);
-}
-
-void Oddball::set_to_number_raw(double value) {
- WRITE_DOUBLE_FIELD(*this, kToNumberRawOffset, value);
-}
+TQ_OBJECT_CONSTRUCTORS_IMPL(Oddball)
void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
- WRITE_UINT64_FIELD(*this, kToNumberRawOffset, bits);
+ // Bug(v8:8875): HeapNumber's double may be unaligned.
+ WriteUnalignedValue<uint64_t>(field_address(kToNumberRawOffset), bits);
}
-ACCESSORS(Oddball, to_string, String, kToStringOffset)
-ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
-ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
-
byte Oddball::kind() const {
- return Smi::ToInt(READ_FIELD(*this, kKindOffset));
+ return Smi::ToInt(TorqueGeneratedOddball::kind());
}
void Oddball::set_kind(byte value) {
- WRITE_FIELD(*this, kKindOffset, Smi::FromInt(value));
+ TorqueGeneratedOddball::set_kind(Smi::FromInt(value));
}
// static
@@ -53,7 +40,7 @@ Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
bool HeapObject::IsBoolean() const {
return IsOddball() &&
- ((Oddball::cast(*this)->kind() & Oddball::kNotBooleanMask) == 0);
+ ((Oddball::cast(*this).kind() & Oddball::kNotBooleanMask) == 0);
}
} // namespace internal
diff --git a/deps/v8/src/objects/oddball.h b/deps/v8/src/objects/oddball.h
index f608a76a2f..025f9379ba 100644
--- a/deps/v8/src/objects/oddball.h
+++ b/deps/v8/src/objects/oddball.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_ODDBALL_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,45 +15,26 @@ namespace v8 {
namespace internal {
// The Oddball describes objects null, undefined, true, and false.
-class Oddball : public HeapObject {
+class Oddball : public TorqueGeneratedOddball<Oddball, HeapObject> {
public:
// [to_number_raw]: Cached raw to_number computed at startup.
- inline double to_number_raw() const;
- inline void set_to_number_raw(double value);
inline void set_to_number_raw_as_bits(uint64_t bits);
- // [to_string]: Cached to_string computed at startup.
- DECL_ACCESSORS(to_string, String)
-
- // [to_number]: Cached to_number computed at startup.
- DECL_ACCESSORS(to_number, Object)
-
- // [typeof]: Cached type_of computed at startup.
- DECL_ACCESSORS(type_of, String)
-
inline byte kind() const;
inline void set_kind(byte kind);
+ // Oddball has a custom verifier.
+ void OddballVerify(Isolate* isolate);
+
// ES6 section 7.1.3 ToNumber for Boolean, Null, Undefined.
V8_WARN_UNUSED_RESULT static inline Handle<Object> ToNumber(
Isolate* isolate, Handle<Oddball> input);
- DECL_CAST(Oddball)
-
- // Dispatched behavior.
- DECL_VERIFIER(Oddball)
-
// Initialize the fields.
static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
const char* type_of, byte kind);
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_ODDBALL_FIELDS)
- // TODO(v8:8989): [torque] Support marker constants.
- static const int kTaggedFieldsStartOffset = kToStringOffset;
- static const int kTaggedFieldsEndOffset = kKindOffset;
-
static const byte kFalse = 0;
static const byte kTrue = 1;
static const byte kNotBooleanMask = static_cast<byte>(~1);
@@ -68,14 +49,16 @@ class Oddball : public HeapObject {
static const byte kStaleRegister = 10;
static const byte kSelfReferenceMarker = 10;
- using BodyDescriptor = FixedBodyDescriptor<kTaggedFieldsStartOffset,
- kTaggedFieldsEndOffset, kSize>;
+ static_assert(kStartOfWeakFieldsOffset == kEndOfWeakFieldsOffset,
+ "Ensure BodyDescriptor does not need to handle weak fields.");
+ using BodyDescriptor = FixedBodyDescriptor<kStartOfStrongFieldsOffset,
+ kEndOfStrongFieldsOffset, kSize>;
STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
STATIC_ASSERT(kNull == Internals::kNullOddballKind);
STATIC_ASSERT(kUndefined == Internals::kUndefinedOddballKind);
- OBJECT_CONSTRUCTORS(Oddball, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(Oddball)
};
} // namespace internal
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 1ab26ca8ab..0eaa7567e2 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -8,10 +8,10 @@
#include "src/objects/ordered-hash-table.h"
#include "src/heap/heap.h"
-#include "src/objects-inl.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/js-collection-iterator.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
// Has to be the last include (doesn't have include guards):
@@ -185,18 +185,18 @@ template <class Derived, class TableType>
Object OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
TableType table = TableType::cast(this->table());
int index = Smi::ToInt(this->index());
- Object key = table->KeyAt(index);
- DCHECK(!key->IsTheHole());
+ Object key = table.KeyAt(index);
+ DCHECK(!key.IsTheHole());
return key;
}
inline void SmallOrderedNameDictionary::SetHash(int hash) {
DCHECK(PropertyArray::HashField::is_valid(hash));
- WRITE_INT_FIELD(*this, PrefixOffset(), hash);
+ WriteField<int>(PrefixOffset(), hash);
}
inline int SmallOrderedNameDictionary::Hash() {
- int hash = READ_INT_FIELD(*this, PrefixOffset());
+ int hash = ReadField<int>(PrefixOffset());
DCHECK(PropertyArray::HashField::is_valid(hash));
return hash;
}
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index 0b52160805..3d628cc406 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -4,10 +4,10 @@
#include "src/objects/ordered-hash-table.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
namespace v8 {
@@ -86,10 +86,10 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Clear(
template <class Derived, int entrysize>
bool OrderedHashTable<Derived, entrysize>::HasKey(Isolate* isolate,
Derived table, Object key) {
- DCHECK_IMPLIES(entrysize == 1, table->IsOrderedHashSet());
- DCHECK_IMPLIES(entrysize == 2, table->IsOrderedHashMap());
+ DCHECK_IMPLIES(entrysize == 1, table.IsOrderedHashSet());
+ DCHECK_IMPLIES(entrysize == 2, table.IsOrderedHashMap());
DisallowHeapAllocation no_gc;
- int entry = table->FindEntry(isolate, key);
+ int entry = table.FindEntry(isolate, key);
return entry != kNotFound;
}
@@ -99,21 +99,21 @@ int OrderedHashTable<Derived, entrysize>::FindEntry(Isolate* isolate,
int entry;
// This special cases for Smi, so that we avoid the HandleScope
// creation below.
- if (key->IsSmi()) {
+ if (key.IsSmi()) {
uint32_t hash = ComputeUnseededHash(Smi::ToInt(key));
entry = HashToEntry(hash & Smi::kMaxValue);
} else {
HandleScope scope(isolate);
- Object hash = key->GetHash();
+ Object hash = key.GetHash();
// If the object does not have an identity hash, it was never used as a key
- if (hash->IsUndefined(isolate)) return kNotFound;
+ if (hash.IsUndefined(isolate)) return kNotFound;
entry = HashToEntry(Smi::ToInt(hash));
}
// Walk the chain in the bucket to find the key.
while (entry != kNotFound) {
Object candidate_key = KeyAt(entry);
- if (candidate_key->SameValueZero(key)) break;
+ if (candidate_key.SameValueZero(key)) break;
entry = NextChainEntry(entry);
}
@@ -123,13 +123,13 @@ int OrderedHashTable<Derived, entrysize>::FindEntry(Isolate* isolate,
Handle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
Handle<OrderedHashSet> table,
Handle<Object> key) {
- int hash = key->GetOrCreateHash(isolate)->value();
+ int hash = key->GetOrCreateHash(isolate).value();
int entry = table->HashToEntry(hash);
// Walk the chain of the bucket and try finding the key.
while (entry != kNotFound) {
Object candidate_key = table->KeyAt(entry);
// Do not add if we have the key already
- if (candidate_key->SameValueZero(*key)) return table;
+ if (candidate_key.SameValueZero(*key)) return table;
entry = table->NextChainEntry(entry);
}
@@ -164,12 +164,12 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
Object key = table->get(index);
if (convert == GetKeysConversion::kConvertToString) {
uint32_t index_value;
- if (key->ToArrayIndex(&index_value)) {
+ if (key.ToArrayIndex(&index_value)) {
// Avoid trashing the Number2String cache if indices get very large.
bool use_cache = i < kMaxStringTableEntries;
key = *isolate->factory()->Uint32ToString(index_value, use_cache);
} else {
- CHECK(key->IsName());
+ CHECK(key.IsName());
}
}
result->set(i, key);
@@ -203,12 +203,12 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
DisallowHeapAllocation no_gc;
for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
Object key = table->KeyAt(old_entry);
- if (key->IsTheHole(isolate)) {
+ if (key.IsTheHole(isolate)) {
table->SetRemovedIndexAt(removed_holes_index++, old_entry);
continue;
}
- Object hash = key->GetHash();
+ Object hash = key.GetHash();
int bucket = Smi::ToInt(hash) & (new_buckets - 1);
Object chain_entry = new_table->get(HashTableStartIndex() + bucket);
new_table->set(HashTableStartIndex() + bucket, Smi::FromInt(new_entry));
@@ -257,20 +257,20 @@ template <class Derived, int entrysize>
bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
Derived table, Object key) {
DisallowHeapAllocation no_gc;
- int entry = table->FindEntry(isolate, key);
+ int entry = table.FindEntry(isolate, key);
if (entry == kNotFound) return false;
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
- int index = table->EntryToIndex(entry);
+ int nof = table.NumberOfElements();
+ int nod = table.NumberOfDeletedElements();
+ int index = table.EntryToIndex(entry);
Object hole = ReadOnlyRoots(isolate).the_hole_value();
for (int i = 0; i < entrysize; ++i) {
- table->set(index + i, hole);
+ table.set(index + i, hole);
}
- table->SetNumberOfElements(nof - 1);
- table->SetNumberOfDeletedElements(nod + 1);
+ table.SetNumberOfElements(nof - 1);
+ table.SetNumberOfDeletedElements(nod + 1);
return true;
}
@@ -278,11 +278,11 @@ bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
Address OrderedHashMap::GetHash(Isolate* isolate, Address raw_key) {
DisallowHeapAllocation no_gc;
Object key(raw_key);
- Object hash = key->GetHash();
+ Object hash = key.GetHash();
// If the object does not have an identity hash, it was never used as a key
- if (hash->IsUndefined(isolate)) return Smi::FromInt(-1).ptr();
- DCHECK(hash->IsSmi());
- DCHECK_GE(Smi::cast(hash)->value(), 0);
+ if (hash.IsUndefined(isolate)) return Smi::FromInt(-1).ptr();
+ DCHECK(hash.IsSmi());
+ DCHECK_GE(Smi::cast(hash).value(), 0);
return hash.ptr();
}
@@ -290,7 +290,7 @@ Handle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
Handle<OrderedHashMap> table,
Handle<Object> key,
Handle<Object> value) {
- int hash = key->GetOrCreateHash(isolate)->value();
+ int hash = key->GetOrCreateHash(isolate).value();
int entry = table->HashToEntry(hash);
// Walk the chain of the bucket and try finding the key.
{
@@ -299,7 +299,7 @@ Handle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
while (entry != kNotFound) {
Object candidate_key = table->KeyAt(entry);
// Do not add if we have the key already
- if (candidate_key->SameValueZero(raw_key)) return table;
+ if (candidate_key.SameValueZero(raw_key)) return table;
entry = table->NextChainEntry(entry);
}
}
@@ -326,14 +326,14 @@ V8_EXPORT_PRIVATE int OrderedHashTable<OrderedNameDictionary, 3>::FindEntry(
Isolate* isolate, Object key) {
DisallowHeapAllocation no_gc;
- DCHECK(key->IsUniqueName());
+ DCHECK(key.IsUniqueName());
Name raw_key = Name::cast(key);
- int entry = HashToEntry(raw_key->Hash());
+ int entry = HashToEntry(raw_key.Hash());
while (entry != kNotFound) {
Object candidate_key = KeyAt(entry);
- DCHECK(candidate_key->IsTheHole() ||
- Name::cast(candidate_key)->IsUniqueName());
+ DCHECK(candidate_key.IsTheHole() ||
+ Name::cast(candidate_key).IsUniqueName());
if (candidate_key == raw_key) return entry;
// TODO(gsathya): This is loading the bucket count from the hash
@@ -377,7 +377,7 @@ Handle<OrderedNameDictionary> OrderedNameDictionary::Add(
void OrderedNameDictionary::SetEntry(Isolate* isolate, int entry, Object key,
Object value, PropertyDetails details) {
DisallowHeapAllocation gc;
- DCHECK_IMPLIES(!key->IsName(), key->IsTheHole(isolate));
+ DCHECK_IMPLIES(!key.IsName(), key.IsTheHole(isolate));
DisallowHeapAllocation no_gc;
int index = EntryToIndex(entry);
this->set(index, key);
@@ -554,7 +554,7 @@ MaybeHandle<SmallOrderedHashSet> SmallOrderedHashSet::Add(
}
}
- int hash = key->GetOrCreateHash(isolate)->value();
+ int hash = key->GetOrCreateHash(isolate).value();
int nof = table->NumberOfElements();
// Read the existing bucket values.
@@ -597,7 +597,7 @@ MaybeHandle<SmallOrderedHashMap> SmallOrderedHashMap::Add(
}
}
- int hash = key->GetOrCreateHash(isolate)->value();
+ int hash = key->GetOrCreateHash(isolate).value();
int nof = table->NumberOfElements();
// Read the existing bucket values.
@@ -633,10 +633,10 @@ int V8_EXPORT_PRIVATE
SmallOrderedHashTable<SmallOrderedNameDictionary>::FindEntry(Isolate* isolate,
Object key) {
DisallowHeapAllocation no_gc;
- DCHECK(key->IsUniqueName());
+ DCHECK(key.IsUniqueName());
Name raw_key = Name::cast(key);
- int entry = HashToFirstEntry(raw_key->Hash());
+ int entry = HashToFirstEntry(raw_key.Hash());
// Walk the chain in the bucket to find the key.
while (entry != kNotFound) {
@@ -692,7 +692,7 @@ MaybeHandle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::Add(
void SmallOrderedNameDictionary::SetEntry(Isolate* isolate, int entry,
Object key, Object value,
PropertyDetails details) {
- DCHECK_IMPLIES(!key->IsName(), key->IsTheHole(isolate));
+ DCHECK_IMPLIES(!key.IsName(), key.IsTheHole(isolate));
SetDataEntry(entry, SmallOrderedNameDictionary::kValueIndex, value);
SetDataEntry(entry, SmallOrderedNameDictionary::kKeyIndex, key);
@@ -713,19 +713,19 @@ template <class Derived>
bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived table,
Object key) {
DisallowHeapAllocation no_gc;
- int entry = table->FindEntry(isolate, key);
+ int entry = table.FindEntry(isolate, key);
if (entry == kNotFound) return false;
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
+ int nof = table.NumberOfElements();
+ int nod = table.NumberOfDeletedElements();
Object hole = ReadOnlyRoots(isolate).the_hole_value();
for (int j = 0; j < Derived::kEntrySize; j++) {
- table->SetDataEntry(entry, j, hole);
+ table.SetDataEntry(entry, j, hole);
}
- table->SetNumberOfElements(nof - 1);
- table->SetNumberOfDeletedElements(nod + 1);
+ table.SetNumberOfElements(nof - 1);
+ table.SetNumberOfDeletedElements(nod + 1);
return true;
}
@@ -765,9 +765,9 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
DisallowHeapAllocation no_gc;
for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
Object key = table->KeyAt(old_entry);
- if (key->IsTheHole(isolate)) continue;
+ if (key.IsTheHole(isolate)) continue;
- int hash = Smi::ToInt(key->GetHash());
+ int hash = Smi::ToInt(key.GetHash());
int bucket = new_table->HashToBucket(hash);
int chain = new_table->GetFirstEntry(bucket);
@@ -848,15 +848,15 @@ MaybeHandle<Derived> SmallOrderedHashTable<Derived>::Grow(
template <class Derived>
int SmallOrderedHashTable<Derived>::FindEntry(Isolate* isolate, Object key) {
DisallowHeapAllocation no_gc;
- Object hash = key->GetHash();
+ Object hash = key.GetHash();
- if (hash->IsUndefined(isolate)) return kNotFound;
+ if (hash.IsUndefined(isolate)) return kNotFound;
int entry = HashToFirstEntry(Smi::ToInt(hash));
// Walk the chain in the bucket to find the key.
while (entry != kNotFound) {
Object candidate_key = KeyAt(entry);
- if (candidate_key->SameValueZero(key)) return entry;
+ if (candidate_key.SameValueZero(key)) return entry;
entry = GetNextEntry(entry);
}
return kNotFound;
@@ -930,7 +930,6 @@ OrderedHashTableHandler<SmallOrderedNameDictionary,
OrderedNameDictionary>::Allocate(Isolate* isolate,
int capacity);
-#if !defined(V8_OS_WIN)
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
Handle<HeapObject> table, Handle<Object> key) {
@@ -943,9 +942,7 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
// down to a smaller hash table.
return LargeTable::Delete(Handle<LargeTable>::cast(table), key);
}
-#endif
-#if !defined(V8_OS_WIN)
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
Isolate* isolate, Handle<HeapObject> table, Handle<Object> key) {
@@ -956,7 +953,6 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
DCHECK(LargeTable::Is(table));
return LargeTable::HasKey(isolate, LargeTable::cast(*table), *key);
}
-#endif
template bool
OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::HasKey(
@@ -1096,114 +1092,112 @@ void OrderedNameDictionaryHandler::SetEntry(Isolate* isolate, HeapObject table,
int entry, Object key, Object value,
PropertyDetails details) {
DisallowHeapAllocation no_gc;
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->SetEntry(
- isolate, entry, key, value, details);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).SetEntry(isolate, entry, key,
+ value, details);
}
- DCHECK(table->IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table)->SetEntry(isolate, entry, key,
- value, details);
+ DCHECK(table.IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table).SetEntry(isolate, entry, key, value,
+ details);
}
int OrderedNameDictionaryHandler::FindEntry(Isolate* isolate, HeapObject table,
Name key) {
DisallowHeapAllocation no_gc;
- if (table->IsSmallOrderedNameDictionary()) {
- int entry =
- SmallOrderedNameDictionary::cast(table)->FindEntry(isolate, key);
+ if (table.IsSmallOrderedNameDictionary()) {
+ int entry = SmallOrderedNameDictionary::cast(table).FindEntry(isolate, key);
return entry == SmallOrderedNameDictionary::kNotFound
? OrderedNameDictionaryHandler::kNotFound
: entry;
}
- DCHECK(table->IsOrderedNameDictionary());
- int entry = OrderedNameDictionary::cast(table)->FindEntry(isolate, key);
+ DCHECK(table.IsOrderedNameDictionary());
+ int entry = OrderedNameDictionary::cast(table).FindEntry(isolate, key);
return entry == OrderedNameDictionary::kNotFound
? OrderedNameDictionaryHandler::kNotFound
: entry;
}
Object OrderedNameDictionaryHandler::ValueAt(HeapObject table, int entry) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->ValueAt(entry);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).ValueAt(entry);
}
- DCHECK(table->IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table)->ValueAt(entry);
+ DCHECK(table.IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table).ValueAt(entry);
}
void OrderedNameDictionaryHandler::ValueAtPut(HeapObject table, int entry,
Object value) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->ValueAtPut(entry, value);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).ValueAtPut(entry, value);
}
- DCHECK(table->IsOrderedNameDictionary());
- OrderedNameDictionary::cast(table)->ValueAtPut(entry, value);
+ DCHECK(table.IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(table).ValueAtPut(entry, value);
}
PropertyDetails OrderedNameDictionaryHandler::DetailsAt(HeapObject table,
int entry) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->DetailsAt(entry);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).DetailsAt(entry);
}
- DCHECK(table->IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table)->DetailsAt(entry);
+ DCHECK(table.IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table).DetailsAt(entry);
}
void OrderedNameDictionaryHandler::DetailsAtPut(HeapObject table, int entry,
PropertyDetails details) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->DetailsAtPut(entry,
- details);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).DetailsAtPut(entry, details);
}
- DCHECK(table->IsOrderedNameDictionary());
- OrderedNameDictionary::cast(table)->DetailsAtPut(entry, details);
+ DCHECK(table.IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(table).DetailsAtPut(entry, details);
}
int OrderedNameDictionaryHandler::Hash(HeapObject table) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->Hash();
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).Hash();
}
- DCHECK(table->IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table)->Hash();
+ DCHECK(table.IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table).Hash();
}
void OrderedNameDictionaryHandler::SetHash(HeapObject table, int hash) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->SetHash(hash);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).SetHash(hash);
}
- DCHECK(table->IsOrderedNameDictionary());
- OrderedNameDictionary::cast(table)->SetHash(hash);
+ DCHECK(table.IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(table).SetHash(hash);
}
Name OrderedNameDictionaryHandler::KeyAt(HeapObject table, int entry) {
- if (table->IsSmallOrderedNameDictionary()) {
- return Name::cast(SmallOrderedNameDictionary::cast(table)->KeyAt(entry));
+ if (table.IsSmallOrderedNameDictionary()) {
+ return Name::cast(SmallOrderedNameDictionary::cast(table).KeyAt(entry));
}
- return Name::cast(OrderedNameDictionary::cast(table)->KeyAt(entry));
+ return Name::cast(OrderedNameDictionary::cast(table).KeyAt(entry));
}
int OrderedNameDictionaryHandler::NumberOfElements(HeapObject table) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->NumberOfElements();
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).NumberOfElements();
}
- return OrderedNameDictionary::cast(table)->NumberOfElements();
+ return OrderedNameDictionary::cast(table).NumberOfElements();
}
int OrderedNameDictionaryHandler::Capacity(HeapObject table) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->Capacity();
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).Capacity();
}
- return OrderedNameDictionary::cast(table)->Capacity();
+ return OrderedNameDictionary::cast(table).Capacity();
}
Handle<HeapObject> OrderedNameDictionaryHandler::Shrink(
@@ -1237,21 +1231,21 @@ template <class Derived, class TableType>
void OrderedHashTableIterator<Derived, TableType>::Transition() {
DisallowHeapAllocation no_allocation;
TableType table = TableType::cast(this->table());
- if (!table->IsObsolete()) return;
+ if (!table.IsObsolete()) return;
int index = Smi::ToInt(this->index());
- while (table->IsObsolete()) {
- TableType next_table = table->NextTable();
+ while (table.IsObsolete()) {
+ TableType next_table = table.NextTable();
if (index > 0) {
- int nod = table->NumberOfDeletedElements();
+ int nod = table.NumberOfDeletedElements();
if (nod == TableType::kClearedTableSentinel) {
index = 0;
} else {
int old_index = index;
for (int i = 0; i < nod; ++i) {
- int removed_index = table->RemovedIndexAt(i);
+ int removed_index = table.RemovedIndexAt(i);
if (removed_index >= old_index) break;
--index;
}
@@ -1274,9 +1268,9 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
TableType table = TableType::cast(this->table());
int index = Smi::ToInt(this->index());
- int used_capacity = table->UsedCapacity();
+ int used_capacity = table.UsedCapacity();
- while (index < used_capacity && table->KeyAt(index)->IsTheHole(ro_roots)) {
+ while (index < used_capacity && table.KeyAt(index).IsTheHole(ro_roots)) {
index++;
}
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 6afbb6b662..a83109ed90 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -6,11 +6,11 @@
#define V8_OBJECTS_ORDERED_HASH_TABLE_H_
#include "src/base/export-template.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/smi.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -126,7 +126,7 @@ class OrderedHashTable : public FixedArray {
return get(EntryToIndex(entry));
}
- bool IsObsolete() { return !get(NextTableIndex())->IsSmi(); }
+ bool IsObsolete() { return !get(NextTableIndex()).IsSmi(); }
// The next newer table. This is only valid if the table is obsolete.
Derived NextTable() { return Derived::cast(get(NextTableIndex())); }
@@ -540,13 +540,13 @@ class SmallOrderedHashTable : public HeapObject {
byte getByte(Offset offset, ByteIndex index) const {
DCHECK(offset < DataTableStartOffset() ||
offset >= GetBucketsStartOffset());
- return READ_BYTE_FIELD(*this, offset + (index * kOneByteSize));
+ return ReadField<byte>(offset + (index * kOneByteSize));
}
void setByte(Offset offset, ByteIndex index, byte value) {
DCHECK(offset < DataTableStartOffset() ||
offset >= GetBucketsStartOffset());
- WRITE_BYTE_FIELD(*this, offset + (index * kOneByteSize), value);
+ WriteField<byte>(offset + (index * kOneByteSize), value);
}
Offset GetDataEntryOffset(int entry, int relative_index) const {
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
index 0b15546d03..f7c60413d1 100644
--- a/deps/v8/src/objects/promise.h
+++ b/deps/v8/src/objects/promise.h
@@ -39,7 +39,7 @@ class PromiseReactionJobTask : public Microtask {
// Dispatched behavior.
DECL_CAST(PromiseReactionJobTask)
DECL_VERIFIER(PromiseReactionJobTask)
-
+ static const int kSizeOfAllPromiseReactionJobTasks = kHeaderSize;
OBJECT_CONSTRUCTORS(PromiseReactionJobTask, Microtask);
};
@@ -51,6 +51,11 @@ class PromiseFulfillReactionJobTask : public PromiseReactionJobTask {
DECL_PRINTER(PromiseFulfillReactionJobTask)
DECL_VERIFIER(PromiseFulfillReactionJobTask)
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ PromiseReactionJobTask::kHeaderSize,
+ TORQUE_GENERATED_PROMISE_FULFILL_REACTION_JOB_TASK_FIELDS)
+ STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
+
OBJECT_CONSTRUCTORS(PromiseFulfillReactionJobTask, PromiseReactionJobTask);
};
@@ -62,6 +67,11 @@ class PromiseRejectReactionJobTask : public PromiseReactionJobTask {
DECL_PRINTER(PromiseRejectReactionJobTask)
DECL_VERIFIER(PromiseRejectReactionJobTask)
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ PromiseReactionJobTask::kHeaderSize,
+ TORQUE_GENERATED_PROMISE_REJECT_REACTION_JOB_TASK_FIELDS)
+ STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
+
OBJECT_CONSTRUCTORS(PromiseRejectReactionJobTask, PromiseReactionJobTask);
};
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index fa3f4ccde2..f23e63e50d 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -8,8 +8,8 @@
#include "src/objects/property-array.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -77,6 +77,17 @@ void PropertyArray::SetHash(int hash) {
WRITE_FIELD(*this, kLengthAndHashOffset, Smi::FromInt(value));
}
+void PropertyArray::CopyElements(Isolate* isolate, int dst_index,
+ PropertyArray src, int src_index, int len,
+ WriteBarrierMode mode) {
+ if (len == 0) return;
+ DisallowHeapAllocation no_gc;
+
+ ObjectSlot dst_slot(data_start() + dst_index);
+ ObjectSlot src_slot(src.data_start() + src_index);
+ isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index 1112de4ae6..0c8b40ece2 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_PROPERTY_ARRAY_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -35,6 +35,10 @@ class PropertyArray : public HeapObject {
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
+ // Signature must be in sync with FixedArray::CopyElements().
+ inline void CopyElements(Isolate* isolate, int dst_index, PropertyArray src,
+ int src_index, int len, WriteBarrierMode mode);
+
// Gives access to raw memory which stores the array's data.
inline ObjectSlot data_start();
diff --git a/deps/v8/src/objects/property-cell.h b/deps/v8/src/objects/property-cell.h
index 7bdcfb8e49..75a5132728 100644
--- a/deps/v8/src/objects/property-cell.h
+++ b/deps/v8/src/objects/property-cell.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_PROPERTY_CELL_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/property-descriptor-object-inl.h b/deps/v8/src/objects/property-descriptor-object-inl.h
index 66ca48164f..7754de5964 100644
--- a/deps/v8/src/objects/property-descriptor-object-inl.h
+++ b/deps/v8/src/objects/property-descriptor-object-inl.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_INL_H_
#define V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_INL_H_
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/property-descriptor-object.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/property-descriptor-object.h b/deps/v8/src/objects/property-descriptor-object.h
index 7c90369be7..f4930c4a31 100644
--- a/deps/v8/src/objects/property-descriptor-object.h
+++ b/deps/v8/src/objects/property-descriptor-object.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_H_
#define V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
new file mode 100644
index 0000000000..b3b05deceb
--- /dev/null
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -0,0 +1,370 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/property-descriptor.h"
+
+#include "src/execution/isolate-inl.h"
+#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
+#include "src/init/bootstrapper.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor-object-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// Helper function for ToPropertyDescriptor. Comments describe steps for
+// "enumerable", other properties are handled the same way.
+// Returns false if an exception was thrown.
+bool GetPropertyIfPresent(Handle<JSReceiver> receiver, Handle<String> name,
+ Handle<Object>* value) {
+ LookupIterator it(receiver, name, receiver);
+ // 4. Let hasEnumerable be HasProperty(Obj, "enumerable").
+ Maybe<bool> has_property = JSReceiver::HasProperty(&it);
+ // 5. ReturnIfAbrupt(hasEnumerable).
+ if (has_property.IsNothing()) return false;
+ // 6. If hasEnumerable is true, then
+ if (has_property.FromJust() == true) {
+ // 6a. Let enum be ToBoolean(Get(Obj, "enumerable")).
+ // 6b. ReturnIfAbrupt(enum).
+ if (!Object::GetProperty(&it).ToHandle(value)) return false;
+ }
+ return true;
+}
+
+// Helper function for ToPropertyDescriptor. Handles the case of "simple"
+// objects: nothing on the prototype chain, just own fast data properties.
+// Must not have observable side effects, because the slow path will restart
+// the entire conversion!
+bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
+ PropertyDescriptor* desc) {
+ if (!obj->IsJSObject()) return false;
+ Map map = Handle<JSObject>::cast(obj)->map();
+ if (map.instance_type() != JS_OBJECT_TYPE) return false;
+ if (map.is_access_check_needed()) return false;
+ if (map.prototype() != *isolate->initial_object_prototype()) return false;
+ // During bootstrapping, the object_function_prototype_map hasn't been
+ // set up yet.
+ if (isolate->bootstrapper()->IsActive()) return false;
+ if (JSObject::cast(map.prototype()).map() !=
+ isolate->native_context()->object_function_prototype_map()) {
+ return false;
+ }
+ // TODO(jkummerow): support dictionary properties?
+ if (map.is_dictionary_map()) return false;
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(map.instance_descriptors(), isolate);
+ for (int i = 0; i < map.NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ Name key = descs->GetKey(i);
+ Handle<Object> value;
+ if (details.location() == kField) {
+ if (details.kind() == kData) {
+ value = JSObject::FastPropertyAt(Handle<JSObject>::cast(obj),
+ details.representation(),
+ FieldIndex::ForDescriptor(map, i));
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ // Bail out to slow path.
+ return false;
+ }
+
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ if (details.kind() == kData) {
+ value = handle(descs->GetStrongValue(i), isolate);
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ // Bail out to slow path.
+ return false;
+ }
+ }
+ ReadOnlyRoots roots(isolate);
+ if (key == roots.enumerable_string()) {
+ desc->set_enumerable(value->BooleanValue(isolate));
+ } else if (key == roots.configurable_string()) {
+ desc->set_configurable(value->BooleanValue(isolate));
+ } else if (key == roots.value_string()) {
+ desc->set_value(value);
+ } else if (key == roots.writable_string()) {
+ desc->set_writable(value->BooleanValue(isolate));
+ } else if (key == roots.get_string()) {
+ // Bail out to slow path to throw an exception if necessary.
+ if (!value->IsCallable()) return false;
+ desc->set_get(value);
+ } else if (key == roots.set_string()) {
+ // Bail out to slow path to throw an exception if necessary.
+ if (!value->IsCallable()) return false;
+ desc->set_set(value);
+ }
+ }
+ if ((desc->has_get() || desc->has_set()) &&
+ (desc->has_value() || desc->has_writable())) {
+ // Bail out to slow path to throw an exception.
+ return false;
+ }
+ return true;
+}
+
+void CreateDataProperty(Handle<JSObject> object, Handle<String> name,
+ Handle<Object> value) {
+ LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Maybe<bool> result = JSObject::CreateDataProperty(&it, value);
+ CHECK(result.IsJust() && result.FromJust());
+}
+
+} // namespace
+
+// ES6 6.2.4.4 "FromPropertyDescriptor"
+Handle<Object> PropertyDescriptor::ToObject(Isolate* isolate) {
+ DCHECK(!(PropertyDescriptor::IsAccessorDescriptor(this) &&
+ PropertyDescriptor::IsDataDescriptor(this)));
+ Factory* factory = isolate->factory();
+ if (IsRegularAccessorProperty()) {
+ // Fast case for regular accessor properties.
+ Handle<JSObject> result = factory->NewJSObjectFromMap(
+ isolate->accessor_property_descriptor_map());
+ result->InObjectPropertyAtPut(JSAccessorPropertyDescriptor::kGetIndex,
+ *get());
+ result->InObjectPropertyAtPut(JSAccessorPropertyDescriptor::kSetIndex,
+ *set());
+ result->InObjectPropertyAtPut(
+ JSAccessorPropertyDescriptor::kEnumerableIndex,
+ isolate->heap()->ToBoolean(enumerable()));
+ result->InObjectPropertyAtPut(
+ JSAccessorPropertyDescriptor::kConfigurableIndex,
+ isolate->heap()->ToBoolean(configurable()));
+ return result;
+ }
+ if (IsRegularDataProperty()) {
+ // Fast case for regular data properties.
+ Handle<JSObject> result =
+ factory->NewJSObjectFromMap(isolate->data_property_descriptor_map());
+ result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kValueIndex,
+ *value());
+ result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kWritableIndex,
+ isolate->heap()->ToBoolean(writable()));
+ result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kEnumerableIndex,
+ isolate->heap()->ToBoolean(enumerable()));
+ result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kConfigurableIndex,
+ isolate->heap()->ToBoolean(configurable()));
+ return result;
+ }
+ Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ if (has_value()) {
+ CreateDataProperty(result, factory->value_string(), value());
+ }
+ if (has_writable()) {
+ CreateDataProperty(result, factory->writable_string(),
+ factory->ToBoolean(writable()));
+ }
+ if (has_get()) {
+ CreateDataProperty(result, factory->get_string(), get());
+ }
+ if (has_set()) {
+ CreateDataProperty(result, factory->set_string(), set());
+ }
+ if (has_enumerable()) {
+ CreateDataProperty(result, factory->enumerable_string(),
+ factory->ToBoolean(enumerable()));
+ }
+ if (has_configurable()) {
+ CreateDataProperty(result, factory->configurable_string(),
+ factory->ToBoolean(configurable()));
+ }
+ return result;
+}
+
+// ES6 6.2.4.5
+// Returns false in case of exception.
+// static
+bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
+ Handle<Object> obj,
+ PropertyDescriptor* desc) {
+ // 1. ReturnIfAbrupt(Obj).
+ // 2. If Type(Obj) is not Object, throw a TypeError exception.
+ if (!obj->IsJSReceiver()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kPropertyDescObject, obj));
+ return false;
+ }
+ // 3. Let desc be a new Property Descriptor that initially has no fields.
+ DCHECK(desc->is_empty());
+
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(obj);
+ if (ToPropertyDescriptorFastPath(isolate, receiver, desc)) {
+ return true;
+ }
+
+ // enumerable?
+ Handle<Object> enumerable;
+ // 4 through 6b.
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->enumerable_string(),
+ &enumerable)) {
+ return false;
+ }
+ // 6c. Set the [[Enumerable]] field of desc to enum.
+ if (!enumerable.is_null()) {
+ desc->set_enumerable(enumerable->BooleanValue(isolate));
+ }
+
+ // configurable?
+ Handle<Object> configurable;
+ // 7 through 9b.
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->configurable_string(),
+ &configurable)) {
+ return false;
+ }
+ // 9c. Set the [[Configurable]] field of desc to conf.
+ if (!configurable.is_null()) {
+ desc->set_configurable(configurable->BooleanValue(isolate));
+ }
+
+ // value?
+ Handle<Object> value;
+ // 10 through 12b.
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->value_string(),
+ &value)) {
+ return false;
+ }
+ // 12c. Set the [[Value]] field of desc to value.
+ if (!value.is_null()) desc->set_value(value);
+
+ // writable?
+ Handle<Object> writable;
+ // 13 through 15b.
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->writable_string(),
+ &writable)) {
+ return false;
+ }
+ // 15c. Set the [[Writable]] field of desc to writable.
+ if (!writable.is_null()) desc->set_writable(writable->BooleanValue(isolate));
+
+ // getter?
+ Handle<Object> getter;
+ // 16 through 18b.
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->get_string(),
+ &getter)) {
+ return false;
+ }
+ if (!getter.is_null()) {
+ // 18c. If IsCallable(getter) is false and getter is not undefined,
+ // throw a TypeError exception.
+ if (!getter->IsCallable() && !getter->IsUndefined(isolate)) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kObjectGetterCallable, getter));
+ return false;
+ }
+ // 18d. Set the [[Get]] field of desc to getter.
+ desc->set_get(getter);
+ }
+ // setter?
+ Handle<Object> setter;
+ // 19 through 21b.
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->set_string(),
+ &setter)) {
+ return false;
+ }
+ if (!setter.is_null()) {
+ // 21c. If IsCallable(setter) is false and setter is not undefined,
+ // throw a TypeError exception.
+ if (!setter->IsCallable() && !setter->IsUndefined(isolate)) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kObjectSetterCallable, setter));
+ return false;
+ }
+ // 21d. Set the [[Set]] field of desc to setter.
+ desc->set_set(setter);
+ }
+
+ // 22. If either desc.[[Get]] or desc.[[Set]] is present, then
+ // 22a. If either desc.[[Value]] or desc.[[Writable]] is present,
+ // throw a TypeError exception.
+ if ((desc->has_get() || desc->has_set()) &&
+ (desc->has_value() || desc->has_writable())) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kValueAndAccessor, obj));
+ return false;
+ }
+
+ // 23. Return desc.
+ return true;
+}
+
+// ES6 6.2.4.6
+// static
+void PropertyDescriptor::CompletePropertyDescriptor(Isolate* isolate,
+ PropertyDescriptor* desc) {
+ // 1. ReturnIfAbrupt(Desc).
+ // 2. Assert: Desc is a Property Descriptor.
+ // 3. Let like be Record{
+ // [[Value]]: undefined, [[Writable]]: false,
+ // [[Get]]: undefined, [[Set]]: undefined,
+ // [[Enumerable]]: false, [[Configurable]]: false}.
+ // 4. If either IsGenericDescriptor(Desc) or IsDataDescriptor(Desc) is true,
+ // then:
+ if (!IsAccessorDescriptor(desc)) {
+ // 4a. If Desc does not have a [[Value]] field, set Desc.[[Value]] to
+ // like.[[Value]].
+ if (!desc->has_value()) {
+ desc->set_value(isolate->factory()->undefined_value());
+ }
+ // 4b. If Desc does not have a [[Writable]] field, set Desc.[[Writable]]
+ // to like.[[Writable]].
+ if (!desc->has_writable()) desc->set_writable(false);
+ } else {
+ // 5. Else,
+ // 5a. If Desc does not have a [[Get]] field, set Desc.[[Get]] to
+ // like.[[Get]].
+ if (!desc->has_get()) {
+ desc->set_get(isolate->factory()->undefined_value());
+ }
+ // 5b. If Desc does not have a [[Set]] field, set Desc.[[Set]] to
+ // like.[[Set]].
+ if (!desc->has_set()) {
+ desc->set_set(isolate->factory()->undefined_value());
+ }
+ }
+ // 6. If Desc does not have an [[Enumerable]] field, set
+ // Desc.[[Enumerable]] to like.[[Enumerable]].
+ if (!desc->has_enumerable()) desc->set_enumerable(false);
+ // 7. If Desc does not have a [[Configurable]] field, set
+ // Desc.[[Configurable]] to like.[[Configurable]].
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ // 8. Return Desc.
+}
+
+Handle<PropertyDescriptorObject> PropertyDescriptor::ToPropertyDescriptorObject(
+ Isolate* isolate) {
+ Handle<PropertyDescriptorObject> obj = Handle<PropertyDescriptorObject>::cast(
+ isolate->factory()->NewFixedArray(PropertyDescriptorObject::kLength));
+
+ int flags =
+ PropertyDescriptorObject::IsEnumerableBit::encode(enumerable_) |
+ PropertyDescriptorObject::HasEnumerableBit::encode(has_enumerable_) |
+ PropertyDescriptorObject::IsConfigurableBit::encode(configurable_) |
+ PropertyDescriptorObject::HasConfigurableBit::encode(has_configurable_) |
+ PropertyDescriptorObject::IsWritableBit::encode(writable_) |
+ PropertyDescriptorObject::HasWritableBit::encode(has_writable_) |
+ PropertyDescriptorObject::HasValueBit::encode(has_value()) |
+ PropertyDescriptorObject::HasGetBit::encode(has_get()) |
+ PropertyDescriptorObject::HasSetBit::encode(has_set());
+
+ obj->set(PropertyDescriptorObject::kFlagsIndex, Smi::FromInt(flags));
+
+ obj->set(PropertyDescriptorObject::kValueIndex,
+ has_value() ? *value_ : ReadOnlyRoots(isolate).the_hole_value());
+ obj->set(PropertyDescriptorObject::kGetIndex,
+ has_get() ? *get_ : ReadOnlyRoots(isolate).the_hole_value());
+ obj->set(PropertyDescriptorObject::kSetIndex,
+ has_set() ? *set_ : ReadOnlyRoots(isolate).the_hole_value());
+
+ return obj;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/property-descriptor.h b/deps/v8/src/objects/property-descriptor.h
new file mode 100644
index 0000000000..22fb1d6ff8
--- /dev/null
+++ b/deps/v8/src/objects/property-descriptor.h
@@ -0,0 +1,134 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROPERTY_DESCRIPTOR_H_
+#define V8_OBJECTS_PROPERTY_DESCRIPTOR_H_
+
+#include "src/handles/handles.h"
+#include "src/objects/property-details.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Object;
+class PropertyDescriptorObject;
+
+class PropertyDescriptor {
+ public:
+ PropertyDescriptor()
+ : enumerable_(false),
+ has_enumerable_(false),
+ configurable_(false),
+ has_configurable_(false),
+ writable_(false),
+ has_writable_(false) {}
+
+ // ES6 6.2.4.1
+ static bool IsAccessorDescriptor(PropertyDescriptor* desc) {
+ return desc->has_get() || desc->has_set();
+ }
+
+ // ES6 6.2.4.2
+ static bool IsDataDescriptor(PropertyDescriptor* desc) {
+ return desc->has_value() || desc->has_writable();
+ }
+
+ // ES6 6.2.4.3
+ static bool IsGenericDescriptor(PropertyDescriptor* desc) {
+ return !IsAccessorDescriptor(desc) && !IsDataDescriptor(desc);
+ }
+
+ // ES6 6.2.4.4
+ Handle<Object> ToObject(Isolate* isolate);
+
+ Handle<PropertyDescriptorObject> ToPropertyDescriptorObject(Isolate* isolate);
+
+ // ES6 6.2.4.5
+ static bool ToPropertyDescriptor(Isolate* isolate, Handle<Object> obj,
+ PropertyDescriptor* desc);
+
+ // ES6 6.2.4.6
+ static void CompletePropertyDescriptor(Isolate* isolate,
+ PropertyDescriptor* desc);
+
+ bool is_empty() const {
+ return !has_enumerable() && !has_configurable() && !has_writable() &&
+ !has_value() && !has_get() && !has_set();
+ }
+
+ bool IsRegularAccessorProperty() const {
+ return has_configurable() && has_enumerable() && !has_value() &&
+ !has_writable() && has_get() && has_set();
+ }
+
+ bool IsRegularDataProperty() const {
+ return has_configurable() && has_enumerable() && has_value() &&
+ has_writable() && !has_get() && !has_set();
+ }
+
+ bool enumerable() const { return enumerable_; }
+ void set_enumerable(bool enumerable) {
+ enumerable_ = enumerable;
+ has_enumerable_ = true;
+ }
+ bool has_enumerable() const { return has_enumerable_; }
+
+ bool configurable() const { return configurable_; }
+ void set_configurable(bool configurable) {
+ configurable_ = configurable;
+ has_configurable_ = true;
+ }
+ bool has_configurable() const { return has_configurable_; }
+
+ Handle<Object> value() const { return value_; }
+ void set_value(Handle<Object> value) { value_ = value; }
+ bool has_value() const { return !value_.is_null(); }
+
+ bool writable() const { return writable_; }
+ void set_writable(bool writable) {
+ writable_ = writable;
+ has_writable_ = true;
+ }
+ bool has_writable() const { return has_writable_; }
+
+ Handle<Object> get() const { return get_; }
+ void set_get(Handle<Object> get) { get_ = get; }
+ bool has_get() const { return !get_.is_null(); }
+
+ Handle<Object> set() const { return set_; }
+ void set_set(Handle<Object> set) { set_ = set; }
+ bool has_set() const { return !set_.is_null(); }
+
+ Handle<Object> name() const { return name_; }
+ void set_name(Handle<Object> name) { name_ = name; }
+
+ PropertyAttributes ToAttributes() {
+ return static_cast<PropertyAttributes>(
+ (has_enumerable() && !enumerable() ? DONT_ENUM : NONE) |
+ (has_configurable() && !configurable() ? DONT_DELETE : NONE) |
+ (has_writable() && !writable() ? READ_ONLY : NONE));
+ }
+
+ private:
+ bool enumerable_ : 1;
+ bool has_enumerable_ : 1;
+ bool configurable_ : 1;
+ bool has_configurable_ : 1;
+ bool writable_ : 1;
+ bool has_writable_ : 1;
+ Handle<Object> value_;
+ Handle<Object> get_;
+ Handle<Object> set_;
+ Handle<Object> name_;
+
+ // Some compilers (Xcode 5.1, ARM GCC 4.9) insist on having a copy
+ // constructor for std::vector<PropertyDescriptor>, so we can't
+ // DISALLOW_COPY_AND_ASSIGN(PropertyDescriptor); here.
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_PROPERTY_DESCRIPTOR_H_
diff --git a/deps/v8/src/objects/property-details.h b/deps/v8/src/objects/property-details.h
new file mode 100644
index 0000000000..7836575edf
--- /dev/null
+++ b/deps/v8/src/objects/property-details.h
@@ -0,0 +1,409 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROPERTY_DETAILS_H_
+#define V8_OBJECTS_PROPERTY_DETAILS_H_
+
+#include "include/v8.h"
+#include "src/utils/allocation.h"
+// TODO(bmeurer): Remove once FLAG_modify_field_representation_inplace is gone.
+#include "src/flags/flags.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// ES6 6.1.7.1
+enum PropertyAttributes {
+ NONE = ::v8::None,
+ READ_ONLY = ::v8::ReadOnly,
+ DONT_ENUM = ::v8::DontEnum,
+ DONT_DELETE = ::v8::DontDelete,
+
+ ALL_ATTRIBUTES_MASK = READ_ONLY | DONT_ENUM | DONT_DELETE,
+
+ SEALED = DONT_DELETE,
+ FROZEN = SEALED | READ_ONLY,
+
+ ABSENT = 64, // Used in runtime to indicate a property is absent.
+ // ABSENT can never be stored in or returned from a descriptor's attributes
+ // bitfield. It is only used as a return value meaning the attributes of
+ // a non-existent property.
+};
+
+enum PropertyFilter {
+ ALL_PROPERTIES = 0,
+ ONLY_WRITABLE = 1,
+ ONLY_ENUMERABLE = 2,
+ ONLY_CONFIGURABLE = 4,
+ SKIP_STRINGS = 8,
+ SKIP_SYMBOLS = 16,
+ ONLY_ALL_CAN_READ = 32,
+ PRIVATE_NAMES_ONLY = 64,
+ ENUMERABLE_STRINGS = ONLY_ENUMERABLE | SKIP_SYMBOLS,
+};
+// Enable fast comparisons of PropertyAttributes against PropertyFilters.
+STATIC_ASSERT(ALL_PROPERTIES == static_cast<PropertyFilter>(NONE));
+STATIC_ASSERT(ONLY_WRITABLE == static_cast<PropertyFilter>(READ_ONLY));
+STATIC_ASSERT(ONLY_ENUMERABLE == static_cast<PropertyFilter>(DONT_ENUM));
+STATIC_ASSERT(ONLY_CONFIGURABLE == static_cast<PropertyFilter>(DONT_DELETE));
+STATIC_ASSERT(((SKIP_STRINGS | SKIP_SYMBOLS | ONLY_ALL_CAN_READ) &
+ ALL_ATTRIBUTES_MASK) == 0);
+STATIC_ASSERT(ALL_PROPERTIES ==
+ static_cast<PropertyFilter>(v8::PropertyFilter::ALL_PROPERTIES));
+STATIC_ASSERT(ONLY_WRITABLE ==
+ static_cast<PropertyFilter>(v8::PropertyFilter::ONLY_WRITABLE));
+STATIC_ASSERT(ONLY_ENUMERABLE ==
+ static_cast<PropertyFilter>(v8::PropertyFilter::ONLY_ENUMERABLE));
+STATIC_ASSERT(ONLY_CONFIGURABLE == static_cast<PropertyFilter>(
+ v8::PropertyFilter::ONLY_CONFIGURABLE));
+STATIC_ASSERT(SKIP_STRINGS ==
+ static_cast<PropertyFilter>(v8::PropertyFilter::SKIP_STRINGS));
+STATIC_ASSERT(SKIP_SYMBOLS ==
+ static_cast<PropertyFilter>(v8::PropertyFilter::SKIP_SYMBOLS));
+
+class Smi;
+class TypeInfo;
+
+// Order of kinds is significant.
+// Must fit in the BitField PropertyDetails::KindField.
+enum PropertyKind { kData = 0, kAccessor = 1 };
+
+// Order of modes is significant.
+// Must fit in the BitField PropertyDetails::LocationField.
+enum PropertyLocation { kField = 0, kDescriptor = 1 };
+
+// Order of modes is significant.
+// Must fit in the BitField PropertyDetails::ConstnessField.
+enum class PropertyConstness { kMutable = 0, kConst = 1 };
+
+class Representation {
+ public:
+ enum Kind { kNone, kSmi, kDouble, kHeapObject, kTagged, kNumRepresentations };
+
+ Representation() : kind_(kNone) {}
+
+ static Representation None() { return Representation(kNone); }
+ static Representation Tagged() { return Representation(kTagged); }
+ static Representation Smi() { return Representation(kSmi); }
+ static Representation Double() { return Representation(kDouble); }
+ static Representation HeapObject() { return Representation(kHeapObject); }
+
+ static Representation FromKind(Kind kind) { return Representation(kind); }
+
+ bool Equals(const Representation& other) const {
+ return kind_ == other.kind_;
+ }
+
+ bool IsCompatibleForLoad(const Representation& other) const {
+ return IsDouble() == other.IsDouble();
+ }
+
+ bool IsCompatibleForStore(const Representation& other) const {
+ return Equals(other);
+ }
+
+ bool CanBeInPlaceChangedTo(const Representation& other) const {
+ // If it's just a representation generalization case (i.e. property kind and
+ // attributes stays unchanged) it's fine to transition from None to anything
+ // but double without any modification to the object, because the default
+ // uninitialized value for representation None can be overwritten by both
+ // smi and tagged values. Doubles, however, would require a box allocation.
+ if (IsNone()) return !other.IsDouble();
+ if (!FLAG_modify_field_representation_inplace) return false;
+ return (IsSmi() || IsHeapObject()) && other.IsTagged();
+ }
+
+ bool is_more_general_than(const Representation& other) const {
+ if (IsHeapObject()) return other.IsNone();
+ return kind_ > other.kind_;
+ }
+
+ bool fits_into(const Representation& other) const {
+ return other.is_more_general_than(*this) || other.Equals(*this);
+ }
+
+ Representation generalize(Representation other) {
+ if (other.fits_into(*this)) return *this;
+ if (other.is_more_general_than(*this)) return other;
+ return Representation::Tagged();
+ }
+
+ int size() const {
+ DCHECK(!IsNone());
+ if (IsDouble()) return kDoubleSize;
+ DCHECK(IsTagged() || IsSmi() || IsHeapObject());
+ return kTaggedSize;
+ }
+
+ Kind kind() const { return static_cast<Kind>(kind_); }
+ bool IsNone() const { return kind_ == kNone; }
+ bool IsTagged() const { return kind_ == kTagged; }
+ bool IsSmi() const { return kind_ == kSmi; }
+ bool IsSmiOrTagged() const { return IsSmi() || IsTagged(); }
+ bool IsDouble() const { return kind_ == kDouble; }
+ bool IsHeapObject() const { return kind_ == kHeapObject; }
+
+ const char* Mnemonic() const {
+ switch (kind_) {
+ case kNone:
+ return "v";
+ case kTagged:
+ return "t";
+ case kSmi:
+ return "s";
+ case kDouble:
+ return "d";
+ case kHeapObject:
+ return "h";
+ }
+ UNREACHABLE();
+ }
+
+ private:
+ explicit Representation(Kind k) : kind_(k) {}
+
+ // Make sure kind fits in int8.
+ STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
+
+ int8_t kind_;
+};
+
+static const int kDescriptorIndexBitCount = 10;
+static const int kFirstInobjectPropertyOffsetBitCount = 7;
+// The maximum number of descriptors we want in a descriptor array. It should
+// fit in a page and also the following should hold:
+// kMaxNumberOfDescriptors + kFieldsAdded <= PropertyArray::kMaxLength.
+static const int kMaxNumberOfDescriptors = (1 << kDescriptorIndexBitCount) - 4;
+static const int kInvalidEnumCacheSentinel =
+ (1 << kDescriptorIndexBitCount) - 1;
+
+enum class PropertyCellType {
+ // Meaningful when a property cell does not contain the hole.
+ kUndefined, // The PREMONOMORPHIC of property cells.
+ kConstant, // Cell has been assigned only once.
+ kConstantType, // Cell has been assigned only one type.
+ kMutable, // Cell will no longer be tracked as constant.
+
+ // Meaningful when a property cell contains the hole.
+ kUninitialized = kUndefined, // Cell has never been initialized.
+ kInvalidated = kConstant, // Cell has been deleted, invalidated or never
+ // existed.
+
+ // For dictionaries not holding cells.
+ kNoCell = kMutable,
+};
+
+enum class PropertyCellConstantType {
+ kSmi,
+ kStableMap,
+};
+
+// PropertyDetails captures type and attributes for a property.
+// They are used both in property dictionaries and instance descriptors.
+class PropertyDetails {
+ public:
+ // Property details for dictionary mode properties/elements.
+ PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
+ PropertyCellType cell_type, int dictionary_index = 0) {
+ value_ = KindField::encode(kind) | LocationField::encode(kField) |
+ AttributesField::encode(attributes) |
+ DictionaryStorageField::encode(dictionary_index) |
+ PropertyCellTypeField::encode(cell_type);
+ }
+
+ // Property details for fast mode properties.
+ PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
+ PropertyLocation location, PropertyConstness constness,
+ Representation representation, int field_index = 0) {
+ value_ = KindField::encode(kind) | AttributesField::encode(attributes) |
+ LocationField::encode(location) |
+ ConstnessField::encode(constness) |
+ RepresentationField::encode(EncodeRepresentation(representation)) |
+ FieldIndexField::encode(field_index);
+ }
+
+ static PropertyDetails Empty(
+ PropertyCellType cell_type = PropertyCellType::kNoCell) {
+ return PropertyDetails(kData, NONE, cell_type);
+ }
+
+ int pointer() const { return DescriptorPointer::decode(value_); }
+
+ PropertyDetails set_pointer(int i) const {
+ return PropertyDetails(value_, i);
+ }
+
+ PropertyDetails set_cell_type(PropertyCellType type) const {
+ PropertyDetails details = *this;
+ details.value_ = PropertyCellTypeField::update(details.value_, type);
+ return details;
+ }
+
+ PropertyDetails set_index(int index) const {
+ PropertyDetails details = *this;
+ details.value_ = DictionaryStorageField::update(details.value_, index);
+ return details;
+ }
+
+ PropertyDetails CopyWithRepresentation(Representation representation) const {
+ return PropertyDetails(value_, representation);
+ }
+ PropertyDetails CopyWithConstness(PropertyConstness constness) const {
+ return PropertyDetails(value_, constness);
+ }
+ PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) const {
+ new_attributes =
+ static_cast<PropertyAttributes>(attributes() | new_attributes);
+ return PropertyDetails(value_, new_attributes);
+ }
+
+ // Conversion for storing details as Object.
+ explicit inline PropertyDetails(Smi smi);
+ inline Smi AsSmi() const;
+
+ static uint8_t EncodeRepresentation(Representation representation) {
+ return representation.kind();
+ }
+
+ static Representation DecodeRepresentation(uint32_t bits) {
+ return Representation::FromKind(static_cast<Representation::Kind>(bits));
+ }
+
+ PropertyKind kind() const { return KindField::decode(value_); }
+ PropertyLocation location() const { return LocationField::decode(value_); }
+ PropertyConstness constness() const { return ConstnessField::decode(value_); }
+
+ PropertyAttributes attributes() const {
+ return AttributesField::decode(value_);
+ }
+
+ bool HasKindAndAttributes(PropertyKind kind, PropertyAttributes attributes) {
+ return (value_ & (KindField::kMask | AttributesField::kMask)) ==
+ (KindField::encode(kind) | AttributesField::encode(attributes));
+ }
+
+ int dictionary_index() const {
+ return DictionaryStorageField::decode(value_);
+ }
+
+ Representation representation() const {
+ return DecodeRepresentation(RepresentationField::decode(value_));
+ }
+
+ int field_index() const { return FieldIndexField::decode(value_); }
+
+ inline int field_width_in_words() const;
+
+ static bool IsValidIndex(int index) {
+ return DictionaryStorageField::is_valid(index);
+ }
+
+ bool IsReadOnly() const { return (attributes() & READ_ONLY) != 0; }
+ bool IsConfigurable() const { return (attributes() & DONT_DELETE) == 0; }
+ bool IsDontEnum() const { return (attributes() & DONT_ENUM) != 0; }
+ bool IsEnumerable() const { return !IsDontEnum(); }
+ PropertyCellType cell_type() const {
+ return PropertyCellTypeField::decode(value_);
+ }
+
+ // Bit fields in value_ (type, shift, size). Must be public so the
+ // constants can be embedded in generated code.
+ class KindField : public BitField<PropertyKind, 0, 1> {};
+ class LocationField : public BitField<PropertyLocation, KindField::kNext, 1> {
+ };
+ class ConstnessField
+ : public BitField<PropertyConstness, LocationField::kNext, 1> {};
+ class AttributesField
+ : public BitField<PropertyAttributes, ConstnessField::kNext, 3> {};
+ static const int kAttributesReadOnlyMask =
+ (READ_ONLY << AttributesField::kShift);
+ static const int kAttributesDontDeleteMask =
+ (DONT_DELETE << AttributesField::kShift);
+ static const int kAttributesDontEnumMask =
+ (DONT_ENUM << AttributesField::kShift);
+
+ // Bit fields for normalized objects.
+ class PropertyCellTypeField
+ : public BitField<PropertyCellType, AttributesField::kNext, 2> {};
+ class DictionaryStorageField
+ : public BitField<uint32_t, PropertyCellTypeField::kNext, 23> {};
+
+ // Bit fields for fast objects.
+ class RepresentationField
+ : public BitField<uint32_t, AttributesField::kNext, 3> {};
+ class DescriptorPointer
+ : public BitField<uint32_t, RepresentationField::kNext,
+ kDescriptorIndexBitCount> {}; // NOLINT
+ class FieldIndexField : public BitField<uint32_t, DescriptorPointer::kNext,
+ kDescriptorIndexBitCount> {
+ }; // NOLINT
+
+ // All bits for both fast and slow objects must fit in a smi.
+ STATIC_ASSERT(DictionaryStorageField::kNext <= 31);
+ STATIC_ASSERT(FieldIndexField::kNext <= 31);
+
+ static const int kInitialIndex = 1;
+
+#ifdef OBJECT_PRINT
+ // For our gdb macros, we should perhaps change these in the future.
+ void Print(bool dictionary_mode);
+#endif
+
+ enum PrintMode {
+ kPrintAttributes = 1 << 0,
+ kPrintFieldIndex = 1 << 1,
+ kPrintRepresentation = 1 << 2,
+ kPrintPointer = 1 << 3,
+
+ kForProperties = kPrintFieldIndex,
+ kForTransitions = kPrintAttributes,
+ kPrintFull = -1,
+ };
+ void PrintAsSlowTo(std::ostream& out);
+ void PrintAsFastTo(std::ostream& out, PrintMode mode = kPrintFull);
+
+ private:
+ PropertyDetails(int value, int pointer) {
+ value_ = DescriptorPointer::update(value, pointer);
+ }
+ PropertyDetails(int value, Representation representation) {
+ value_ = RepresentationField::update(value,
+ EncodeRepresentation(representation));
+ }
+ PropertyDetails(int value, PropertyConstness constness) {
+ value_ = ConstnessField::update(value, constness);
+ }
+ PropertyDetails(int value, PropertyAttributes attributes) {
+ value_ = AttributesField::update(value, attributes);
+ }
+
+ uint32_t value_;
+};
+
+// kField location is more general than kDescriptor, kDescriptor generalizes
+// only to itself.
+inline bool IsGeneralizableTo(PropertyLocation a, PropertyLocation b) {
+ return b == kField || a == kDescriptor;
+}
+
+// PropertyConstness::kMutable constness is more general than
+// VariableMode::kConst, VariableMode::kConst generalizes only to itself.
+inline bool IsGeneralizableTo(PropertyConstness a, PropertyConstness b) {
+ return b == PropertyConstness::kMutable || a == PropertyConstness::kConst;
+}
+
+inline PropertyConstness GeneralizeConstness(PropertyConstness a,
+ PropertyConstness b) {
+ return a == PropertyConstness::kMutable ? PropertyConstness::kMutable : b;
+}
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(
+ std::ostream& os, const PropertyAttributes& attributes);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ PropertyConstness constness);
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_PROPERTY_DETAILS_H_
diff --git a/deps/v8/src/objects/property.cc b/deps/v8/src/objects/property.cc
new file mode 100644
index 0000000000..c226c28a76
--- /dev/null
+++ b/deps/v8/src/objects/property.cc
@@ -0,0 +1,146 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/property.h"
+
+#include "src/handles/handles-inl.h"
+#include "src/objects/field-type.h"
+#include "src/objects/name-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/smi.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+std::ostream& operator<<(std::ostream& os,
+ const PropertyAttributes& attributes) {
+ os << "[";
+ os << (((attributes & READ_ONLY) == 0) ? "W" : "_"); // writable
+ os << (((attributes & DONT_ENUM) == 0) ? "E" : "_"); // enumerable
+ os << (((attributes & DONT_DELETE) == 0) ? "C" : "_"); // configurable
+ os << "]";
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, PropertyConstness constness) {
+ switch (constness) {
+ case PropertyConstness::kMutable:
+ return os << "mutable";
+ case PropertyConstness::kConst:
+ return os << "const";
+ }
+ UNREACHABLE();
+}
+
+Descriptor::Descriptor() : details_(Smi::zero()) {}
+
+Descriptor::Descriptor(Handle<Name> key, const MaybeObjectHandle& value,
+ PropertyKind kind, PropertyAttributes attributes,
+ PropertyLocation location, PropertyConstness constness,
+ Representation representation, int field_index)
+ : key_(key),
+ value_(value),
+ details_(kind, attributes, location, constness, representation,
+ field_index) {
+ DCHECK(key->IsUniqueName());
+ DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
+}
+
+Descriptor::Descriptor(Handle<Name> key, const MaybeObjectHandle& value,
+ PropertyDetails details)
+ : key_(key), value_(value), details_(details) {
+ DCHECK(key->IsUniqueName());
+ DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
+}
+
+Descriptor Descriptor::DataField(Isolate* isolate, Handle<Name> key,
+ int field_index, PropertyAttributes attributes,
+ Representation representation) {
+ return DataField(key, field_index, attributes, PropertyConstness::kMutable,
+ representation, MaybeObjectHandle(FieldType::Any(isolate)));
+}
+
+Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
+ PropertyAttributes attributes,
+ PropertyConstness constness,
+ Representation representation,
+ const MaybeObjectHandle& wrapped_field_type) {
+ DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeak());
+ PropertyDetails details(kData, attributes, kField, constness, representation,
+ field_index);
+ return Descriptor(key, wrapped_field_type, details);
+}
+
+Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
+ PropertyAttributes attributes) {
+ return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
+ kDescriptor, PropertyConstness::kConst,
+ value->OptimalRepresentation(), 0);
+}
+
+Descriptor Descriptor::DataConstant(Isolate* isolate, Handle<Name> key,
+ int field_index, Handle<Object> value,
+ PropertyAttributes attributes) {
+ MaybeObjectHandle any_type(FieldType::Any(), isolate);
+ return DataField(key, field_index, attributes, PropertyConstness::kConst,
+ Representation::Tagged(), any_type);
+}
+
+Descriptor Descriptor::AccessorConstant(Handle<Name> key,
+ Handle<Object> foreign,
+ PropertyAttributes attributes) {
+ return Descriptor(key, MaybeObjectHandle(foreign), kAccessor, attributes,
+ kDescriptor, PropertyConstness::kConst,
+ Representation::Tagged(), 0);
+}
+
+// Outputs PropertyDetails as a dictionary details.
+void PropertyDetails::PrintAsSlowTo(std::ostream& os) {
+ os << "(";
+ if (constness() == PropertyConstness::kConst) os << "const ";
+ os << (kind() == kData ? "data" : "accessor");
+ os << ", dict_index: " << dictionary_index();
+ os << ", attrs: " << attributes() << ")";
+}
+
+// Outputs PropertyDetails as a descriptor array details.
+void PropertyDetails::PrintAsFastTo(std::ostream& os, PrintMode mode) {
+ os << "(";
+ if (constness() == PropertyConstness::kConst) os << "const ";
+ os << (kind() == kData ? "data" : "accessor");
+ if (location() == kField) {
+ os << " field";
+ if (mode & kPrintFieldIndex) {
+ os << " " << field_index();
+ }
+ if (mode & kPrintRepresentation) {
+ os << ":" << representation().Mnemonic();
+ }
+ } else {
+ os << " descriptor";
+ }
+ if (mode & kPrintPointer) {
+ os << ", p: " << pointer();
+ }
+ if (mode & kPrintAttributes) {
+ os << ", attrs: " << attributes();
+ }
+ os << ")";
+}
+
+#ifdef OBJECT_PRINT
+void PropertyDetails::Print(bool dictionary_mode) {
+ StdoutStream os;
+ if (dictionary_mode) {
+ PrintAsSlowTo(os);
+ } else {
+ PrintAsFastTo(os, PrintMode::kPrintFull);
+ }
+ os << "\n" << std::flush;
+}
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/property.h b/deps/v8/src/objects/property.h
new file mode 100644
index 0000000000..100b39e1f9
--- /dev/null
+++ b/deps/v8/src/objects/property.h
@@ -0,0 +1,75 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROPERTY_H_
+#define V8_OBJECTS_PROPERTY_H_
+
+#include <iosfwd>
+
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
+#include "src/objects/name.h"
+#include "src/objects/objects.h"
+#include "src/objects/property-details.h"
+
+namespace v8 {
+namespace internal {
+
+// Abstraction for elements in instance-descriptor arrays.
+//
+// Each descriptor has a key, property attributes, property type,
+// property index (in the actual instance-descriptor array) and
+// optionally a piece of data.
+class V8_EXPORT_PRIVATE Descriptor final {
+ public:
+ Descriptor();
+
+ Handle<Name> GetKey() const { return key_; }
+ MaybeObjectHandle GetValue() const { return value_; }
+ PropertyDetails GetDetails() const { return details_; }
+
+ void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
+
+ static Descriptor DataField(Isolate* isolate, Handle<Name> key,
+ int field_index, PropertyAttributes attributes,
+ Representation representation);
+
+ static Descriptor DataField(Handle<Name> key, int field_index,
+ PropertyAttributes attributes,
+ PropertyConstness constness,
+ Representation representation,
+ const MaybeObjectHandle& wrapped_field_type);
+
+ static Descriptor DataConstant(Handle<Name> key, Handle<Object> value,
+ PropertyAttributes attributes);
+
+ static Descriptor DataConstant(Isolate* isolate, Handle<Name> key,
+ int field_index, Handle<Object> value,
+ PropertyAttributes attributes);
+
+ static Descriptor AccessorConstant(Handle<Name> key, Handle<Object> foreign,
+ PropertyAttributes attributes);
+
+ private:
+ Handle<Name> key_;
+ MaybeObjectHandle value_;
+ PropertyDetails details_;
+
+ protected:
+ Descriptor(Handle<Name> key, const MaybeObjectHandle& value,
+ PropertyDetails details);
+
+ Descriptor(Handle<Name> key, const MaybeObjectHandle& value,
+ PropertyKind kind, PropertyAttributes attributes,
+ PropertyLocation location, PropertyConstness constness,
+ Representation representation, int field_index);
+
+ friend class MapUpdater;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_PROPERTY_H_
diff --git a/deps/v8/src/objects/prototype-info-inl.h b/deps/v8/src/objects/prototype-info-inl.h
index 56104ba4c1..b83bb1346a 100644
--- a/deps/v8/src/objects/prototype-info-inl.h
+++ b/deps/v8/src/objects/prototype-info-inl.h
@@ -8,10 +8,10 @@
#include "src/objects/prototype-info.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/struct-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -48,19 +48,19 @@ BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
void PrototypeUsers::MarkSlotEmpty(WeakArrayList array, int index) {
DCHECK_GT(index, 0);
- DCHECK_LT(index, array->length());
+ DCHECK_LT(index, array.length());
// Chain the empty slots into a linked list (each empty slot contains the
// index of the next empty slot).
- array->Set(index, MaybeObject::FromObject(empty_slot_index(array)));
+ array.Set(index, MaybeObject::FromObject(empty_slot_index(array)));
set_empty_slot_index(array, index);
}
Smi PrototypeUsers::empty_slot_index(WeakArrayList array) {
- return array->Get(kEmptySlotIndex).ToSmi();
+ return array.Get(kEmptySlotIndex).ToSmi();
}
void PrototypeUsers::set_empty_slot_index(WeakArrayList array, int index) {
- array->Set(kEmptySlotIndex, MaybeObject::FromObject(Smi::FromInt(index)));
+ array.Set(kEmptySlotIndex, MaybeObject::FromObject(Smi::FromInt(index)));
}
} // namespace internal
diff --git a/deps/v8/src/objects/prototype-info.h b/deps/v8/src/objects/prototype-info.h
index 4e6ba68cde..94d86d2e19 100644
--- a/deps/v8/src/objects/prototype-info.h
+++ b/deps/v8/src/objects/prototype-info.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_PROTOTYPE_INFO_H_
#define V8_OBJECTS_PROTOTYPE_INFO_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/prototype-inl.h b/deps/v8/src/objects/prototype-inl.h
new file mode 100644
index 0000000000..5f7c3e23c5
--- /dev/null
+++ b/deps/v8/src/objects/prototype-inl.h
@@ -0,0 +1,144 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROTOTYPE_INL_H_
+#define V8_OBJECTS_PROTOTYPE_INL_H_
+
+#include "src/objects/prototype.h"
+
+#include "src/handles/handles-inl.h"
+#include "src/objects/js-proxy.h"
+#include "src/objects/map-inl.h"
+
+namespace v8 {
+namespace internal {
+
+PrototypeIterator::PrototypeIterator(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ WhereToStart where_to_start,
+ WhereToEnd where_to_end)
+ : isolate_(isolate),
+ handle_(receiver),
+ where_to_end_(where_to_end),
+ is_at_end_(false),
+ seen_proxies_(0) {
+ CHECK(!handle_.is_null());
+ if (where_to_start == kStartAtPrototype) Advance();
+}
+
+PrototypeIterator::PrototypeIterator(Isolate* isolate, JSReceiver receiver,
+ WhereToStart where_to_start,
+ WhereToEnd where_to_end)
+ : isolate_(isolate),
+ object_(receiver),
+ where_to_end_(where_to_end),
+ is_at_end_(false),
+ seen_proxies_(0) {
+ if (where_to_start == kStartAtPrototype) Advance();
+}
+
+PrototypeIterator::PrototypeIterator(Isolate* isolate, Map receiver_map,
+ WhereToEnd where_to_end)
+ : isolate_(isolate),
+ object_(receiver_map.GetPrototypeChainRootMap(isolate_).prototype()),
+ where_to_end_(where_to_end),
+ is_at_end_(object_.IsNull(isolate_)),
+ seen_proxies_(0) {
+ if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
+ DCHECK(object_.IsJSReceiver());
+ Map map = JSReceiver::cast(object_).map();
+ is_at_end_ = !map.has_hidden_prototype();
+ }
+}
+
+PrototypeIterator::PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
+ WhereToEnd where_to_end)
+ : isolate_(isolate),
+ handle_(receiver_map->GetPrototypeChainRootMap(isolate_).prototype(),
+ isolate_),
+ where_to_end_(where_to_end),
+ is_at_end_(handle_->IsNull(isolate_)),
+ seen_proxies_(0) {
+ if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
+ DCHECK(handle_->IsJSReceiver());
+ Map map = JSReceiver::cast(*handle_).map();
+ is_at_end_ = !map.has_hidden_prototype();
+ }
+}
+
+bool PrototypeIterator::HasAccess() const {
+ // We can only perform access check in the handlified version of the
+ // PrototypeIterator.
+ DCHECK(!handle_.is_null());
+ if (handle_->IsAccessCheckNeeded()) {
+ return isolate_->MayAccess(handle(isolate_->context(), isolate_),
+ Handle<JSObject>::cast(handle_));
+ }
+ return true;
+}
+
+void PrototypeIterator::Advance() {
+ if (handle_.is_null() && object_.IsJSProxy()) {
+ is_at_end_ = true;
+ object_ = ReadOnlyRoots(isolate_).null_value();
+ return;
+ } else if (!handle_.is_null() && handle_->IsJSProxy()) {
+ is_at_end_ = true;
+ handle_ = isolate_->factory()->null_value();
+ return;
+ }
+ AdvanceIgnoringProxies();
+}
+
+void PrototypeIterator::AdvanceIgnoringProxies() {
+ Object object = handle_.is_null() ? object_ : *handle_;
+ Map map = HeapObject::cast(object).map();
+
+ HeapObject prototype = map.prototype();
+ is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN ? !map.has_hidden_prototype()
+ : prototype.IsNull(isolate_);
+
+ if (handle_.is_null()) {
+ object_ = prototype;
+ } else {
+ handle_ = handle(prototype, isolate_);
+ }
+}
+
+V8_WARN_UNUSED_RESULT bool PrototypeIterator::AdvanceFollowingProxies() {
+ DCHECK(!(handle_.is_null() && object_.IsJSProxy()));
+ if (!HasAccess()) {
+ // Abort the lookup if we do not have access to the current object.
+ handle_ = isolate_->factory()->null_value();
+ is_at_end_ = true;
+ return true;
+ }
+ return AdvanceFollowingProxiesIgnoringAccessChecks();
+}
+
+V8_WARN_UNUSED_RESULT bool
+PrototypeIterator::AdvanceFollowingProxiesIgnoringAccessChecks() {
+ if (handle_.is_null() || !handle_->IsJSProxy()) {
+ AdvanceIgnoringProxies();
+ return true;
+ }
+
+ // Due to possible __proto__ recursion limit the number of Proxies
+ // we visit to an arbitrarily chosen large number.
+ seen_proxies_++;
+ if (seen_proxies_ > JSProxy::kMaxIterationLimit) {
+ isolate_->StackOverflow();
+ return false;
+ }
+ MaybeHandle<HeapObject> proto =
+ JSProxy::GetPrototype(Handle<JSProxy>::cast(handle_));
+ if (!proto.ToHandle(&handle_)) return false;
+ is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN || handle_->IsNull(isolate_);
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_PROTOTYPE_INL_H_
diff --git a/deps/v8/src/objects/prototype.h b/deps/v8/src/objects/prototype.h
new file mode 100644
index 0000000000..cd003837ca
--- /dev/null
+++ b/deps/v8/src/objects/prototype.h
@@ -0,0 +1,89 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROTOTYPE_H_
+#define V8_OBJECTS_PROTOTYPE_H_
+
+#include "src/execution/isolate.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+/**
+ * A class to uniformly access the prototype of any Object and walk its
+ * prototype chain.
+ *
+ * The PrototypeIterator can either start at the prototype (default), or
+ * include the receiver itself. If a PrototypeIterator is constructed for a
+ * Map, it will always start at the prototype.
+ *
+ * The PrototypeIterator can either run to the null_value(), the first
+ * non-hidden prototype, or a given object.
+ */
+
+class PrototypeIterator {
+ public:
+ enum WhereToEnd { END_AT_NULL, END_AT_NON_HIDDEN };
+
+ inline PrototypeIterator(Isolate* isolate, Handle<JSReceiver> receiver,
+ WhereToStart where_to_start = kStartAtPrototype,
+ WhereToEnd where_to_end = END_AT_NULL);
+
+ inline PrototypeIterator(Isolate* isolate, JSReceiver receiver,
+ WhereToStart where_to_start = kStartAtPrototype,
+ WhereToEnd where_to_end = END_AT_NULL);
+
+ inline explicit PrototypeIterator(Isolate* isolate, Map receiver_map,
+ WhereToEnd where_to_end = END_AT_NULL);
+
+ inline explicit PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
+ WhereToEnd where_to_end = END_AT_NULL);
+
+ ~PrototypeIterator() = default;
+
+ inline bool HasAccess() const;
+
+ template <typename T = HeapObject>
+ T GetCurrent() const {
+ DCHECK(handle_.is_null());
+ return T::cast(object_);
+ }
+
+ template <typename T = HeapObject>
+ static Handle<T> GetCurrent(const PrototypeIterator& iterator) {
+ DCHECK(!iterator.handle_.is_null());
+ DCHECK_EQ(iterator.object_, Object());
+ return Handle<T>::cast(iterator.handle_);
+ }
+
+ inline void Advance();
+
+ inline void AdvanceIgnoringProxies();
+
+ // Returns false iff a call to JSProxy::GetPrototype throws.
+ V8_WARN_UNUSED_RESULT inline bool AdvanceFollowingProxies();
+
+ V8_WARN_UNUSED_RESULT inline bool
+ AdvanceFollowingProxiesIgnoringAccessChecks();
+
+ bool IsAtEnd() const { return is_at_end_; }
+ Isolate* isolate() const { return isolate_; }
+
+ private:
+ Isolate* isolate_;
+ Object object_;
+ Handle<HeapObject> handle_;
+ WhereToEnd where_to_end_;
+ bool is_at_end_;
+ int seen_proxies_;
+
+ DISALLOW_COPY_AND_ASSIGN(PrototypeIterator);
+};
+
+} // namespace internal
+
+} // namespace v8
+
+#endif // V8_OBJECTS_PROTOTYPE_H_
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index 32828e9591..859dc4a09a 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -6,8 +6,8 @@
#define V8_OBJECTS_REGEXP_MATCH_INFO_H_
#include "src/base/compiler-specific.h"
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index f3b3a15ab7..af45e86af3 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -8,10 +8,10 @@
#include "src/ast/scopes.h"
#include "src/ast/variables.h"
-#include "src/bootstrapper.h"
+#include "src/init/bootstrapper.h"
-#include "src/objects-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -26,27 +26,27 @@ enum ModuleVariableEntryOffset {
#ifdef DEBUG
bool ScopeInfo::Equals(ScopeInfo other) const {
- if (length() != other->length()) return false;
+ if (length() != other.length()) return false;
for (int index = 0; index < length(); ++index) {
Object entry = get(index);
- Object other_entry = other->get(index);
- if (entry->IsSmi()) {
+ Object other_entry = other.get(index);
+ if (entry.IsSmi()) {
if (entry != other_entry) return false;
} else {
- if (HeapObject::cast(entry)->map()->instance_type() !=
- HeapObject::cast(other_entry)->map()->instance_type()) {
+ if (HeapObject::cast(entry).map().instance_type() !=
+ HeapObject::cast(other_entry).map().instance_type()) {
return false;
}
- if (entry->IsString()) {
- if (!String::cast(entry)->Equals(String::cast(other_entry))) {
+ if (entry.IsString()) {
+ if (!String::cast(entry).Equals(String::cast(other_entry))) {
return false;
}
- } else if (entry->IsScopeInfo()) {
- if (!ScopeInfo::cast(entry)->Equals(ScopeInfo::cast(other_entry))) {
+ } else if (entry.IsScopeInfo()) {
+ if (!ScopeInfo::cast(entry).Equals(ScopeInfo::cast(other_entry))) {
return false;
}
- } else if (entry->IsModuleInfo()) {
- if (!ModuleInfo::cast(entry)->Equals(ModuleInfo::cast(other_entry))) {
+ } else if (entry.IsModuleInfo()) {
+ if (!ModuleInfo::cast(entry).Equals(ModuleInfo::cast(other_entry))) {
return false;
}
} else {
@@ -134,6 +134,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
function_name_info = NONE;
}
+ const bool has_brand = scope->is_class_scope()
+ ? scope->AsClassScope()->brand() != nullptr
+ : false;
const bool has_function_name = function_name_info != NONE;
const bool has_position_info = NeedsPositionInfo(scope->scope_type());
const bool has_receiver = receiver_info == STACK || receiver_info == CONTEXT;
@@ -158,7 +161,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
{
DisallowHeapAllocation no_gc;
ScopeInfo scope_info = *scope_info_handle;
- WriteBarrierMode mode = scope_info->GetWriteBarrierMode(no_gc);
+ WriteBarrierMode mode = scope_info.GetWriteBarrierMode(no_gc);
bool has_simple_parameters = false;
bool is_asm_module = false;
@@ -181,6 +184,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
LanguageModeField::encode(scope->language_mode()) |
DeclarationScopeField::encode(scope->is_declaration_scope()) |
ReceiverVariableField::encode(receiver_info) |
+ HasClassBrandField::encode(has_brand) |
HasNewTargetField::encode(has_new_target) |
FunctionVariableField::encode(function_name_info) |
HasInferredFunctionNameField::encode(has_inferred_function_name) |
@@ -191,16 +195,16 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
IsDebugEvaluateScopeField::encode(scope->is_debug_evaluate_scope()) |
ForceContextAllocationField::encode(
scope->ForceContextForLanguageMode());
- scope_info->SetFlags(flags);
+ scope_info.SetFlags(flags);
- scope_info->SetParameterCount(parameter_count);
- scope_info->SetContextLocalCount(context_local_count);
+ scope_info.SetParameterCount(parameter_count);
+ scope_info.SetContextLocalCount(context_local_count);
// Add context locals' names and info, module variables' names and info.
// Context locals are added using their index.
int context_local_base = index;
int context_local_info_base = context_local_base + context_local_count;
- int module_var_entry = scope_info->ModuleVariablesIndex();
+ int module_var_entry = scope_info.ModuleVariablesIndex();
for (Variable* var : *scope->locals()) {
switch (var->location()) {
@@ -215,23 +219,23 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
- scope_info->set(context_local_base + local_index, *var->name(), mode);
- scope_info->set(context_local_info_base + local_index,
- Smi::FromInt(info));
+ scope_info.set(context_local_base + local_index, *var->name(), mode);
+ scope_info.set(context_local_info_base + local_index,
+ Smi::FromInt(info));
break;
}
case VariableLocation::MODULE: {
- scope_info->set(module_var_entry + kModuleVariableNameOffset,
- *var->name(), mode);
- scope_info->set(module_var_entry + kModuleVariableIndexOffset,
- Smi::FromInt(var->index()));
+ scope_info.set(module_var_entry + kModuleVariableNameOffset,
+ *var->name(), mode);
+ scope_info.set(module_var_entry + kModuleVariableIndexOffset,
+ Smi::FromInt(var->index()));
uint32_t properties =
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
- scope_info->set(module_var_entry + kModuleVariablePropertiesOffset,
- Smi::FromInt(properties));
+ scope_info.set(module_var_entry + kModuleVariablePropertiesOffset,
+ Smi::FromInt(properties));
module_var_entry += kModuleVariableEntryLength;
break;
}
@@ -253,9 +257,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
if (parameter->location() != VariableLocation::CONTEXT) continue;
int index = parameter->index() - Context::MIN_CONTEXT_SLOTS;
int info_index = context_local_info_base + index;
- int info = Smi::ToInt(scope_info->get(info_index));
+ int info = Smi::ToInt(scope_info.get(info_index));
info = ParameterNumberField::update(info, i);
- scope_info->set(info_index, Smi::FromInt(info));
+ scope_info.set(info_index, Smi::FromInt(info));
}
// TODO(verwaest): Remove this unnecessary entry.
@@ -268,9 +272,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
- scope_info->set(context_local_base + local_index, *var->name(), mode);
- scope_info->set(context_local_info_base + local_index,
- Smi::FromInt(info));
+ scope_info.set(context_local_base + local_index, *var->name(), mode);
+ scope_info.set(context_local_info_base + local_index,
+ Smi::FromInt(info));
}
}
}
@@ -278,16 +282,16 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
index += 2 * context_local_count;
// If the receiver is allocated, add its index.
- DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
+ DCHECK_EQ(index, scope_info.ReceiverInfoIndex());
if (has_receiver) {
int var_index = scope->AsDeclarationScope()->receiver()->index();
- scope_info->set(index++, Smi::FromInt(var_index));
+ scope_info.set(index++, Smi::FromInt(var_index));
// ?? DCHECK(receiver_info != CONTEXT || var_index ==
// scope_info->ContextLength() - 1);
}
// If present, add the function variable name and its index.
- DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+ DCHECK_EQ(index, scope_info.FunctionNameInfoIndex());
if (has_function_name) {
Variable* var = scope->AsDeclarationScope()->function_var();
int var_index = -1;
@@ -296,28 +300,28 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
var_index = var->index();
name = *var->name();
}
- scope_info->set(index++, name, mode);
- scope_info->set(index++, Smi::FromInt(var_index));
+ scope_info.set(index++, name, mode);
+ scope_info.set(index++, Smi::FromInt(var_index));
DCHECK(function_name_info != CONTEXT ||
- var_index == scope_info->ContextLength() - 1);
+ var_index == scope_info.ContextLength() - 1);
}
- DCHECK_EQ(index, scope_info->InferredFunctionNameIndex());
+ DCHECK_EQ(index, scope_info.InferredFunctionNameIndex());
if (has_inferred_function_name) {
// The inferred function name is taken from the SFI.
index++;
}
- DCHECK_EQ(index, scope_info->PositionInfoIndex());
+ DCHECK_EQ(index, scope_info.PositionInfoIndex());
if (has_position_info) {
- scope_info->set(index++, Smi::FromInt(scope->start_position()));
- scope_info->set(index++, Smi::FromInt(scope->end_position()));
+ scope_info.set(index++, Smi::FromInt(scope->start_position()));
+ scope_info.set(index++, Smi::FromInt(scope->end_position()));
}
// If present, add the outer scope info.
- DCHECK(index == scope_info->OuterScopeInfoIndex());
+ DCHECK(index == scope_info.OuterScopeInfoIndex());
if (has_outer_scope_info) {
- scope_info->set(index++, *outer_scope.ToHandleChecked(), mode);
+ scope_info.set(index++, *outer_scope.ToHandleChecked(), mode);
}
}
@@ -354,9 +358,9 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
ScopeTypeField::encode(WITH_SCOPE) | CallsSloppyEvalField::encode(false) |
LanguageModeField::encode(LanguageMode::kSloppy) |
DeclarationScopeField::encode(false) |
- ReceiverVariableField::encode(NONE) | HasNewTargetField::encode(false) |
- FunctionVariableField::encode(NONE) | IsAsmModuleField::encode(false) |
- HasSimpleParametersField::encode(true) |
+ ReceiverVariableField::encode(NONE) | HasClassBrandField::encode(false) |
+ HasNewTargetField::encode(false) | FunctionVariableField::encode(NONE) |
+ IsAsmModuleField::encode(false) | HasSimpleParametersField::encode(true) |
FunctionKindField::encode(kNormalFunction) |
HasOuterScopeInfoField::encode(has_outer_scope_info) |
IsDebugEvaluateScopeField::encode(false);
@@ -416,7 +420,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
LanguageModeField::encode(LanguageMode::kSloppy) |
DeclarationScopeField::encode(true) |
ReceiverVariableField::encode(is_empty_function ? UNUSED : CONTEXT) |
- HasNewTargetField::encode(false) |
+ HasClassBrandField::encode(false) | HasNewTargetField::encode(false) |
FunctionVariableField::encode(is_empty_function ? UNUSED : NONE) |
HasInferredFunctionNameField::encode(has_inferred_function_name) |
IsAsmModuleField::encode(false) | HasSimpleParametersField::encode(true) |
@@ -536,6 +540,10 @@ bool ScopeInfo::HasAllocatedReceiver() const {
return allocation == STACK || allocation == CONTEXT;
}
+bool ScopeInfo::HasClassBrand() const {
+ return HasClassBrandField::decode(Flags());
+}
+
bool ScopeInfo::HasNewTarget() const {
return HasNewTargetField::decode(Flags());
}
@@ -567,7 +575,7 @@ bool ScopeInfo::HasSharedFunctionName() const {
void ScopeInfo::SetFunctionName(Object name) {
DCHECK(HasFunctionName());
- DCHECK(name->IsString() || name == SharedFunctionInfo::kNoSharedNameSentinel);
+ DCHECK(name.IsString() || name == SharedFunctionInfo::kNoSharedNameSentinel);
set(FunctionNameInfoIndex(), name);
}
@@ -609,12 +617,12 @@ Object ScopeInfo::InferredFunctionName() const {
String ScopeInfo::FunctionDebugName() const {
Object name = FunctionName();
- if (name->IsString() && String::cast(name)->length() > 0) {
+ if (name.IsString() && String::cast(name).length() > 0) {
return String::cast(name);
}
if (HasInferredFunctionName()) {
name = InferredFunctionName();
- if (name->IsString()) return String::cast(name);
+ if (name.IsString()) return String::cast(name);
}
return GetReadOnlyRoots().empty_string();
}
@@ -698,15 +706,15 @@ bool ScopeInfo::VariableIsSynthetic(String name) {
// variable is a compiler-introduced temporary. However, to avoid conflict
// with user declarations, the current temporaries like .generator_object and
// .result start with a dot, so we can use that as a flag. It's a hack!
- return name->length() == 0 || name->Get(0) == '.' ||
- name->Equals(name->GetReadOnlyRoots().this_string());
+ return name.length() == 0 || name.Get(0) == '.' ||
+ name.Equals(name.GetReadOnlyRoots().this_string());
}
int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
DisallowHeapAllocation no_gc;
- DCHECK(name->IsInternalizedString());
+ DCHECK(name.IsInternalizedString());
DCHECK_EQ(scope_type(), MODULE_SCOPE);
DCHECK_NOT_NULL(mode);
DCHECK_NOT_NULL(init_flag);
@@ -716,7 +724,7 @@ int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
int entry = ModuleVariablesIndex();
for (int i = 0; i < module_vars_count; ++i) {
String var_name = String::cast(get(entry + kModuleVariableNameOffset));
- if (name->Equals(var_name)) {
+ if (name.Equals(var_name)) {
int index;
ModuleVariable(i, nullptr, &index, mode, init_flag, maybe_assigned_flag);
return index;
@@ -733,24 +741,24 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
DisallowHeapAllocation no_gc;
- DCHECK(name->IsInternalizedString());
+ DCHECK(name.IsInternalizedString());
DCHECK_NOT_NULL(mode);
DCHECK_NOT_NULL(init_flag);
DCHECK_NOT_NULL(maybe_assigned_flag);
- if (scope_info->length() == 0) return -1;
+ if (scope_info.length() == 0) return -1;
- int start = scope_info->ContextLocalNamesIndex();
- int end = start + scope_info->ContextLocalCount();
+ int start = scope_info.ContextLocalNamesIndex();
+ int end = start + scope_info.ContextLocalCount();
for (int i = start; i < end; ++i) {
- if (name != scope_info->get(i)) continue;
+ if (name != scope_info.get(i)) continue;
int var = i - start;
- *mode = scope_info->ContextLocalMode(var);
- *init_flag = scope_info->ContextLocalInitFlag(var);
- *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
+ *mode = scope_info.ContextLocalMode(var);
+ *init_flag = scope_info.ContextLocalInitFlag(var);
+ *maybe_assigned_flag = scope_info.ContextLocalMaybeAssignedFlag(var);
int result = Context::MIN_CONTEXT_SLOTS + var;
- DCHECK_LT(result, scope_info->ContextLength());
+ DCHECK_LT(result, scope_info.ContextLength());
return result;
}
@@ -765,7 +773,7 @@ int ScopeInfo::ReceiverContextSlotIndex() const {
}
int ScopeInfo::FunctionContextSlotIndex(String name) const {
- DCHECK(name->IsInternalizedString());
+ DCHECK(name.IsInternalizedString());
if (length() > 0) {
if (FunctionVariableField::decode(Flags()) == CONTEXT &&
FunctionName() == name) {
@@ -946,22 +954,22 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
}
int ModuleInfo::RegularExportCount() const {
- DCHECK_EQ(regular_exports()->length() % kRegularExportLength, 0);
- return regular_exports()->length() / kRegularExportLength;
+ DCHECK_EQ(regular_exports().length() % kRegularExportLength, 0);
+ return regular_exports().length() / kRegularExportLength;
}
String ModuleInfo::RegularExportLocalName(int i) const {
- return String::cast(regular_exports()->get(i * kRegularExportLength +
- kRegularExportLocalNameOffset));
+ return String::cast(regular_exports().get(i * kRegularExportLength +
+ kRegularExportLocalNameOffset));
}
int ModuleInfo::RegularExportCellIndex(int i) const {
- return Smi::ToInt(regular_exports()->get(i * kRegularExportLength +
- kRegularExportCellIndexOffset));
+ return Smi::ToInt(regular_exports().get(i * kRegularExportLength +
+ kRegularExportCellIndexOffset));
}
FixedArray ModuleInfo::RegularExportExportNames(int i) const {
- return FixedArray::cast(regular_exports()->get(
+ return FixedArray::cast(regular_exports().get(
i * kRegularExportLength + kRegularExportExportNamesOffset));
}
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index b5fb1d1a7c..8d43357631 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -5,11 +5,11 @@
#ifndef V8_OBJECTS_SCOPE_INFO_H_
#define V8_OBJECTS_SCOPE_INFO_H_
-#include "src/function-kind.h"
-#include "src/globals.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
-#include "src/utils.h"
+#include "src/objects/function-kind.h"
+#include "src/objects/objects.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -69,6 +69,9 @@ class ScopeInfo : public FixedArray {
// or context-allocated?
bool HasAllocatedReceiver() const;
+ // Does this scope has class brand (for private methods)?
+ bool HasClassBrand() const;
+
// Does this scope declare a "new.target" binding?
bool HasNewTarget() const;
@@ -228,8 +231,10 @@ class ScopeInfo : public FixedArray {
class ReceiverVariableField
: public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
2> {};
- class HasNewTargetField
+ class HasClassBrandField
: public BitField<bool, ReceiverVariableField::kNext, 1> {};
+ class HasNewTargetField
+ : public BitField<bool, HasClassBrandField::kNext, 1> {};
class FunctionVariableField
: public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
// TODO(cbruni): Combine with function variable field when only storing the
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 33c794e4a5..07450c73ec 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -47,11 +47,11 @@ ACCESSORS_CHECKED(Script, wasm_module_object, Object,
this->type() == TYPE_WASM)
bool Script::is_wrapped() const {
- return eval_from_shared_or_wrapped_arguments()->IsFixedArray();
+ return eval_from_shared_or_wrapped_arguments().IsFixedArray();
}
bool Script::has_eval_from_shared() const {
- return eval_from_shared_or_wrapped_arguments()->IsSharedFunctionInfo();
+ return eval_from_shared_or_wrapped_arguments().IsSharedFunctionInfo();
}
void Script::set_eval_from_shared(SharedFunctionInfo shared,
@@ -104,13 +104,13 @@ void Script::set_origin_options(ScriptOriginOptions origin_options) {
bool Script::HasValidSource() {
Object src = this->source();
- if (!src->IsString()) return true;
+ if (!src.IsString()) return true;
String src_str = String::cast(src);
if (!StringShape(src_str).IsExternal()) return true;
- if (src_str->IsOneByteRepresentation()) {
- return ExternalOneByteString::cast(src)->resource() != nullptr;
- } else if (src_str->IsTwoByteRepresentation()) {
- return ExternalTwoByteString::cast(src)->resource() != nullptr;
+ if (src_str.IsOneByteRepresentation()) {
+ return ExternalOneByteString::cast(src).resource() != nullptr;
+ } else if (src_str.IsTwoByteRepresentation()) {
+ return ExternalTwoByteString::cast(src).resource() != nullptr;
}
return true;
}
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 315ab038a8..2d9e4bca78 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_SCRIPT_H_
#define V8_OBJECTS_SCRIPT_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 1187db2d94..f5413ce1de 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -7,10 +7,10 @@
#include "src/objects/shared-function-info.h"
-#include "src/feedback-vector-inl.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/feedback-vector-inl.h"
#include "src/objects/scope-info.h"
#include "src/objects/templates.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -47,14 +47,14 @@ byte PreparseData::get(int index) const {
DCHECK_LE(0, index);
DCHECK_LT(index, data_length());
int offset = kDataStartOffset + index * kByteSize;
- return READ_BYTE_FIELD(*this, offset);
+ return ReadField<byte>(offset);
}
void PreparseData::set(int index, byte value) {
DCHECK_LE(0, index);
DCHECK_LT(index, data_length());
int offset = kDataStartOffset + index * kByteSize;
- WRITE_BYTE_FIELD(*this, offset, value);
+ WriteField<byte>(offset, value);
}
void PreparseData::copy_in(int index, const byte* buffer, int length) {
@@ -128,6 +128,9 @@ ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
ACCESSORS(SharedFunctionInfo, script_or_debug_info, Object,
kScriptOrDebugInfoOffset)
+#if V8_SFI_HAS_UNIQUE_ID
+INT_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
+#endif
UINT16_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
UINT16_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
kFormalParameterCountOffset)
@@ -139,8 +142,8 @@ RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
bool SharedFunctionInfo::HasSharedName() const {
Object value = name_or_scope_info();
- if (value->IsScopeInfo()) {
- return ScopeInfo::cast(value)->HasSharedFunctionName();
+ if (value.IsScopeInfo()) {
+ return ScopeInfo::cast(value).HasSharedFunctionName();
}
return value != kNoSharedNameSentinel;
}
@@ -148,9 +151,9 @@ bool SharedFunctionInfo::HasSharedName() const {
String SharedFunctionInfo::Name() const {
if (!HasSharedName()) return GetReadOnlyRoots().empty_string();
Object value = name_or_scope_info();
- if (value->IsScopeInfo()) {
- if (ScopeInfo::cast(value)->HasFunctionName()) {
- return String::cast(ScopeInfo::cast(value)->FunctionName());
+ if (value.IsScopeInfo()) {
+ if (ScopeInfo::cast(value).HasFunctionName()) {
+ return String::cast(ScopeInfo::cast(value).FunctionName());
}
return GetReadOnlyRoots().empty_string();
}
@@ -159,10 +162,10 @@ String SharedFunctionInfo::Name() const {
void SharedFunctionInfo::SetName(String name) {
Object maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
- ScopeInfo::cast(maybe_scope_info)->SetFunctionName(name);
+ if (maybe_scope_info.IsScopeInfo()) {
+ ScopeInfo::cast(maybe_scope_info).SetFunctionName(name);
} else {
- DCHECK(maybe_scope_info->IsString() ||
+ DCHECK(maybe_scope_info.IsString() ||
maybe_scope_info == kNoSharedNameSentinel);
set_name_or_scope_info(name);
}
@@ -335,7 +338,7 @@ bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
ScopeInfo SharedFunctionInfo::scope_info() const {
Object maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
+ if (maybe_scope_info.IsScopeInfo()) {
return ScopeInfo::cast(maybe_scope_info);
}
return ScopeInfo::Empty(GetIsolate());
@@ -345,14 +348,14 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo scope_info,
WriteBarrierMode mode) {
// Move the existing name onto the ScopeInfo.
Object name = name_or_scope_info();
- if (name->IsScopeInfo()) {
- name = ScopeInfo::cast(name)->FunctionName();
+ if (name.IsScopeInfo()) {
+ name = ScopeInfo::cast(name).FunctionName();
}
- DCHECK(name->IsString() || name == kNoSharedNameSentinel);
+ DCHECK(name.IsString() || name == kNoSharedNameSentinel);
// Only set the function name for function scopes.
- scope_info->SetFunctionName(name);
- if (HasInferredName() && inferred_name()->length() != 0) {
- scope_info->SetInferredFunctionName(inferred_name());
+ scope_info.SetFunctionName(name);
+ if (HasInferredName() && inferred_name().length() != 0) {
+ scope_info.SetInferredFunctionName(inferred_name());
}
WRITE_FIELD(*this, kNameOrScopeInfoOffset, scope_info);
CONDITIONAL_WRITE_BARRIER(*this, kNameOrScopeInfoOffset, scope_info, mode);
@@ -370,31 +373,31 @@ HeapObject SharedFunctionInfo::outer_scope_info() const {
bool SharedFunctionInfo::HasOuterScopeInfo() const {
ScopeInfo outer_info;
if (!is_compiled()) {
- if (!outer_scope_info()->IsScopeInfo()) return false;
+ if (!outer_scope_info().IsScopeInfo()) return false;
outer_info = ScopeInfo::cast(outer_scope_info());
} else {
- if (!scope_info()->HasOuterScopeInfo()) return false;
- outer_info = scope_info()->OuterScopeInfo();
+ if (!scope_info().HasOuterScopeInfo()) return false;
+ outer_info = scope_info().OuterScopeInfo();
}
- return outer_info->length() > 0;
+ return outer_info.length() > 0;
}
ScopeInfo SharedFunctionInfo::GetOuterScopeInfo() const {
DCHECK(HasOuterScopeInfo());
if (!is_compiled()) return ScopeInfo::cast(outer_scope_info());
- return scope_info()->OuterScopeInfo();
+ return scope_info().OuterScopeInfo();
}
void SharedFunctionInfo::set_outer_scope_info(HeapObject value,
WriteBarrierMode mode) {
DCHECK(!is_compiled());
- DCHECK(raw_outer_scope_info_or_feedback_metadata()->IsTheHole());
- DCHECK(value->IsScopeInfo() || value->IsTheHole());
+ DCHECK(raw_outer_scope_info_or_feedback_metadata().IsTheHole());
+ DCHECK(value.IsScopeInfo() || value.IsTheHole());
set_raw_outer_scope_info_or_feedback_metadata(value, mode);
}
bool SharedFunctionInfo::HasFeedbackMetadata() const {
- return raw_outer_scope_info_or_feedback_metadata()->IsFeedbackMetadata();
+ return raw_outer_scope_info_or_feedback_metadata().IsFeedbackMetadata();
}
FeedbackMetadata SharedFunctionInfo::feedback_metadata() const {
@@ -405,14 +408,14 @@ FeedbackMetadata SharedFunctionInfo::feedback_metadata() const {
void SharedFunctionInfo::set_feedback_metadata(FeedbackMetadata value,
WriteBarrierMode mode) {
DCHECK(!HasFeedbackMetadata());
- DCHECK(value->IsFeedbackMetadata());
+ DCHECK(value.IsFeedbackMetadata());
set_raw_outer_scope_info_or_feedback_metadata(value, mode);
}
bool SharedFunctionInfo::is_compiled() const {
Object data = function_data();
return data != Smi::FromEnum(Builtins::kCompileLazy) &&
- !data->IsUncompiledData();
+ !data.IsUncompiledData();
}
IsCompiledScope SharedFunctionInfo::is_compiled_scope() const {
@@ -421,19 +424,19 @@ IsCompiledScope SharedFunctionInfo::is_compiled_scope() const {
IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
Isolate* isolate)
- : retain_bytecode_(shared->HasBytecodeArray()
- ? handle(shared->GetBytecodeArray(), isolate)
+ : retain_bytecode_(shared.HasBytecodeArray()
+ ? handle(shared.GetBytecodeArray(), isolate)
: MaybeHandle<BytecodeArray>()),
- is_compiled_(shared->is_compiled()) {
+ is_compiled_(shared.is_compiled()) {
DCHECK_IMPLIES(!retain_bytecode_.is_null(), is_compiled());
}
bool SharedFunctionInfo::has_simple_parameters() {
- return scope_info()->HasSimpleParameters();
+ return scope_info().HasSimpleParameters();
}
bool SharedFunctionInfo::IsApiFunction() const {
- return function_data()->IsFunctionTemplateInfo();
+ return function_data().IsFunctionTemplateInfo();
}
FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() {
@@ -442,40 +445,40 @@ FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() {
}
bool SharedFunctionInfo::HasBytecodeArray() const {
- return function_data()->IsBytecodeArray() ||
- function_data()->IsInterpreterData();
+ return function_data().IsBytecodeArray() ||
+ function_data().IsInterpreterData();
}
BytecodeArray SharedFunctionInfo::GetBytecodeArray() const {
DCHECK(HasBytecodeArray());
- if (HasDebugInfo() && GetDebugInfo()->HasInstrumentedBytecodeArray()) {
- return GetDebugInfo()->OriginalBytecodeArray();
- } else if (function_data()->IsBytecodeArray()) {
+ if (HasDebugInfo() && GetDebugInfo().HasInstrumentedBytecodeArray()) {
+ return GetDebugInfo().OriginalBytecodeArray();
+ } else if (function_data().IsBytecodeArray()) {
return BytecodeArray::cast(function_data());
} else {
- DCHECK(function_data()->IsInterpreterData());
- return InterpreterData::cast(function_data())->bytecode_array();
+ DCHECK(function_data().IsInterpreterData());
+ return InterpreterData::cast(function_data()).bytecode_array();
}
}
BytecodeArray SharedFunctionInfo::GetDebugBytecodeArray() const {
DCHECK(HasBytecodeArray());
- DCHECK(HasDebugInfo() && GetDebugInfo()->HasInstrumentedBytecodeArray());
- if (function_data()->IsBytecodeArray()) {
+ DCHECK(HasDebugInfo() && GetDebugInfo().HasInstrumentedBytecodeArray());
+ if (function_data().IsBytecodeArray()) {
return BytecodeArray::cast(function_data());
} else {
- DCHECK(function_data()->IsInterpreterData());
- return InterpreterData::cast(function_data())->bytecode_array();
+ DCHECK(function_data().IsInterpreterData());
+ return InterpreterData::cast(function_data()).bytecode_array();
}
}
void SharedFunctionInfo::SetDebugBytecodeArray(BytecodeArray bytecode) {
DCHECK(HasBytecodeArray());
- if (function_data()->IsBytecodeArray()) {
+ if (function_data().IsBytecodeArray()) {
set_function_data(bytecode);
} else {
- DCHECK(function_data()->IsInterpreterData());
- interpreter_data()->set_bytecode_array(bytecode);
+ DCHECK(function_data().IsInterpreterData());
+ interpreter_data().set_bytecode_array(bytecode);
}
}
@@ -497,22 +500,22 @@ bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
Object data = function_data();
- if (!data->IsBytecodeArray()) return false;
+ if (!data.IsBytecodeArray()) return false;
if (mode == BytecodeFlushMode::kStressFlushBytecode) return true;
BytecodeArray bytecode = BytecodeArray::cast(data);
- return bytecode->IsOld();
+ return bytecode.IsOld();
}
Code SharedFunctionInfo::InterpreterTrampoline() const {
DCHECK(HasInterpreterData());
- return interpreter_data()->interpreter_trampoline();
+ return interpreter_data().interpreter_trampoline();
}
bool SharedFunctionInfo::HasInterpreterData() const {
- return function_data()->IsInterpreterData();
+ return function_data().IsInterpreterData();
}
InterpreterData SharedFunctionInfo::interpreter_data() const {
@@ -527,7 +530,7 @@ void SharedFunctionInfo::set_interpreter_data(
}
bool SharedFunctionInfo::HasAsmWasmData() const {
- return function_data()->IsAsmWasmData();
+ return function_data().IsAsmWasmData();
}
AsmWasmData SharedFunctionInfo::asm_wasm_data() const {
@@ -542,7 +545,7 @@ void SharedFunctionInfo::set_asm_wasm_data(AsmWasmData data) {
}
bool SharedFunctionInfo::HasBuiltinId() const {
- return function_data()->IsSmi();
+ return function_data().IsSmi();
}
int SharedFunctionInfo::builtin_id() const {
@@ -558,7 +561,7 @@ void SharedFunctionInfo::set_builtin_id(int builtin_id) {
}
bool SharedFunctionInfo::HasUncompiledData() const {
- return function_data()->IsUncompiledData();
+ return function_data().IsUncompiledData();
}
UncompiledData SharedFunctionInfo::uncompiled_data() const {
@@ -568,12 +571,12 @@ UncompiledData SharedFunctionInfo::uncompiled_data() const {
void SharedFunctionInfo::set_uncompiled_data(UncompiledData uncompiled_data) {
DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
- DCHECK(uncompiled_data->IsUncompiledData());
+ DCHECK(uncompiled_data.IsUncompiledData());
set_function_data(uncompiled_data);
}
bool SharedFunctionInfo::HasUncompiledDataWithPreparseData() const {
- return function_data()->IsUncompiledDataWithPreparseData();
+ return function_data().IsUncompiledDataWithPreparseData();
}
UncompiledDataWithPreparseData
@@ -585,13 +588,12 @@ SharedFunctionInfo::uncompiled_data_with_preparse_data() const {
void SharedFunctionInfo::set_uncompiled_data_with_preparse_data(
UncompiledDataWithPreparseData uncompiled_data_with_preparse_data) {
DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
- DCHECK(
- uncompiled_data_with_preparse_data->IsUncompiledDataWithPreparseData());
+ DCHECK(uncompiled_data_with_preparse_data.IsUncompiledDataWithPreparseData());
set_function_data(uncompiled_data_with_preparse_data);
}
bool SharedFunctionInfo::HasUncompiledDataWithoutPreparseData() const {
- return function_data()->IsUncompiledDataWithoutPreparseData();
+ return function_data().IsUncompiledDataWithoutPreparseData();
}
void SharedFunctionInfo::ClearPreparseData() {
@@ -610,12 +612,12 @@ void SharedFunctionInfo::ClearPreparseData() {
UncompiledDataWithPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==
UncompiledData::kSize);
- data->synchronized_set_map(
+ data.synchronized_set_map(
GetReadOnlyRoots().uncompiled_data_without_preparse_data_map());
// Fill the remaining space with filler.
heap->CreateFillerObjectAt(
- data->address() + UncompiledDataWithoutPreparseData::kSize,
+ data.address() + UncompiledDataWithoutPreparseData::kSize,
UncompiledDataWithPreparseData::kSize -
UncompiledDataWithoutPreparseData::kSize,
ClearRecordedSlots::kNo);
@@ -624,23 +626,19 @@ void SharedFunctionInfo::ClearPreparseData() {
DCHECK(HasUncompiledDataWithoutPreparseData());
}
-OBJECT_CONSTRUCTORS_IMPL(SharedFunctionInfoWithID, SharedFunctionInfo)
-CAST_ACCESSOR(SharedFunctionInfoWithID)
-INT_ACCESSORS(SharedFunctionInfoWithID, unique_id, kUniqueIdOffset)
-
// static
void UncompiledData::Initialize(
UncompiledData data, String inferred_name, int start_position,
int end_position, int function_literal_id,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot) {
- data->set_inferred_name(inferred_name);
+ data.set_inferred_name(inferred_name);
gc_notify_updated_slot(
- data, data->RawField(UncompiledData::kInferredNameOffset), inferred_name);
- data->set_start_position(start_position);
- data->set_end_position(end_position);
- data->set_function_literal_id(function_literal_id);
- data->clear_padding();
+ data, data.RawField(UncompiledData::kInferredNameOffset), inferred_name);
+ data.set_start_position(start_position);
+ data.set_end_position(end_position);
+ data.set_function_literal_id(function_literal_id);
+ data.clear_padding();
}
void UncompiledDataWithPreparseData::Initialize(
@@ -651,9 +649,9 @@ void UncompiledDataWithPreparseData::Initialize(
gc_notify_updated_slot) {
UncompiledData::Initialize(data, inferred_name, start_position, end_position,
function_literal_id, gc_notify_updated_slot);
- data->set_preparse_data(scope_data);
+ data.set_preparse_data(scope_data);
gc_notify_updated_slot(
- data, data->RawField(UncompiledDataWithPreparseData::kPreparseDataOffset),
+ data, data.RawField(UncompiledDataWithPreparseData::kPreparseDataOffset),
scope_data);
}
@@ -662,28 +660,36 @@ bool UncompiledData::has_function_literal_id() {
}
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
- return function_data()->IsWasmExportedFunctionData();
+ return function_data().IsWasmExportedFunctionData();
+}
+
+bool SharedFunctionInfo::HasWasmJSFunctionData() const {
+ return function_data().IsWasmJSFunctionData();
+}
+
+bool SharedFunctionInfo::HasWasmCapiFunctionData() const {
+ return function_data().IsWasmCapiFunctionData();
}
Object SharedFunctionInfo::script() const {
Object maybe_script = script_or_debug_info();
- if (maybe_script->IsDebugInfo()) {
- return DebugInfo::cast(maybe_script)->script();
+ if (maybe_script.IsDebugInfo()) {
+ return DebugInfo::cast(maybe_script).script();
}
return maybe_script;
}
void SharedFunctionInfo::set_script(Object script) {
Object maybe_debug_info = script_or_debug_info();
- if (maybe_debug_info->IsDebugInfo()) {
- DebugInfo::cast(maybe_debug_info)->set_script(script);
+ if (maybe_debug_info.IsDebugInfo()) {
+ DebugInfo::cast(maybe_debug_info).set_script(script);
} else {
set_script_or_debug_info(script);
}
}
bool SharedFunctionInfo::HasDebugInfo() const {
- return script_or_debug_info()->IsDebugInfo();
+ return script_or_debug_info().IsDebugInfo();
}
DebugInfo SharedFunctionInfo::GetDebugInfo() const {
@@ -693,37 +699,37 @@ DebugInfo SharedFunctionInfo::GetDebugInfo() const {
void SharedFunctionInfo::SetDebugInfo(DebugInfo debug_info) {
DCHECK(!HasDebugInfo());
- DCHECK_EQ(debug_info->script(), script_or_debug_info());
+ DCHECK_EQ(debug_info.script(), script_or_debug_info());
set_script_or_debug_info(debug_info);
}
bool SharedFunctionInfo::HasInferredName() {
Object scope_info = name_or_scope_info();
- if (scope_info->IsScopeInfo()) {
- return ScopeInfo::cast(scope_info)->HasInferredFunctionName();
+ if (scope_info.IsScopeInfo()) {
+ return ScopeInfo::cast(scope_info).HasInferredFunctionName();
}
return HasUncompiledData();
}
String SharedFunctionInfo::inferred_name() {
Object maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
+ if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo scope_info = ScopeInfo::cast(maybe_scope_info);
- if (scope_info->HasInferredFunctionName()) {
- Object name = scope_info->InferredFunctionName();
- if (name->IsString()) return String::cast(name);
+ if (scope_info.HasInferredFunctionName()) {
+ Object name = scope_info.InferredFunctionName();
+ if (name.IsString()) return String::cast(name);
}
} else if (HasUncompiledData()) {
- return uncompiled_data()->inferred_name();
+ return uncompiled_data().inferred_name();
}
return GetReadOnlyRoots().empty_string();
}
bool SharedFunctionInfo::IsUserJavaScript() {
Object script_obj = script();
- if (script_obj->IsUndefined()) return false;
+ if (script_obj.IsUndefined()) return false;
Script script = Script::cast(script_obj);
- return script->IsUserJavaScript();
+ return script.IsUserJavaScript();
}
bool SharedFunctionInfo::IsSubjectToDebugging() {
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 5b79098fc0..a3b84ee46e 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -5,16 +5,16 @@
#ifndef V8_OBJECTS_SHARED_FUNCTION_INFO_H_
#define V8_OBJECTS_SHARED_FUNCTION_INFO_H_
-#include "src/bailout-reason.h"
-#include "src/function-kind.h"
-#include "src/objects.h"
+#include "src/codegen/bailout-reason.h"
#include "src/objects/compressed-slots.h"
+#include "src/objects/function-kind.h"
+#include "src/objects/objects.h"
#include "src/objects/script.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/struct.h"
#include "testing/gtest/include/gtest/gtest_prod.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -32,7 +32,9 @@ class BytecodeArray;
class CoverageInfo;
class DebugInfo;
class IsCompiledScope;
+class WasmCapiFunctionData;
class WasmExportedFunctionData;
+class WasmJSFunctionData;
// Data collected by the pre-parser storing information about scopes and inner
// functions.
@@ -118,9 +120,9 @@ class UncompiledData : public HeapObject {
// Layout description.
#define UNCOMPILED_DATA_FIELDS(V) \
- V(kStartOfPointerFieldsOffset, 0) \
+ V(kStartOfStrongFieldsOffset, 0) \
V(kInferredNameOffset, kTaggedSize) \
- V(kEndOfTaggedFieldsOffset, 0) \
+ V(kEndOfStrongFieldsOffset, 0) \
/* Raw data fields. */ \
V(kStartPositionOffset, kInt32Size) \
V(kEndPositionOffset, kInt32Size) \
@@ -132,8 +134,8 @@ class UncompiledData : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, UNCOMPILED_DATA_FIELDS)
#undef UNCOMPILED_DATA_FIELDS
- using BodyDescriptor = FixedBodyDescriptor<kStartOfPointerFieldsOffset,
- kEndOfTaggedFieldsOffset, kSize>;
+ using BodyDescriptor = FixedBodyDescriptor<kStartOfStrongFieldsOffset,
+ kEndOfStrongFieldsOffset, kSize>;
// Clear uninitialized padding space.
inline void clear_padding();
@@ -179,9 +181,9 @@ class UncompiledDataWithPreparseData : public UncompiledData {
// Layout description.
#define UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS(V) \
- V(kStartOfPointerFieldsOffset, 0) \
+ V(kStartOfStrongFieldsOffset, 0) \
V(kPreparseDataOffset, kTaggedSize) \
- V(kEndOfTaggedFieldsOffset, 0) \
+ V(kEndOfStrongFieldsOffset, 0) \
/* Total size. */ \
V(kSize, 0)
@@ -194,7 +196,7 @@ class UncompiledDataWithPreparseData : public UncompiledData {
using BodyDescriptor = SubclassBodyDescriptor<
UncompiledData::BodyDescriptor,
- FixedBodyDescriptor<kStartOfPointerFieldsOffset, kEndOfTaggedFieldsOffset,
+ FixedBodyDescriptor<kStartOfStrongFieldsOffset, kEndOfStrongFieldsOffset,
kSize>>;
OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseData, UncompiledData);
@@ -314,6 +316,12 @@ class SharedFunctionInfo : public HeapObject {
// function. The value is only reliable when the function has been compiled.
DECL_UINT16_ACCESSORS(expected_nof_properties)
+#if V8_SFI_HAS_UNIQUE_ID
+ // [unique_id] - For --trace-maps purposes, an identifier that's persistent
+ // even if the GC moves this SharedFunctionInfo.
+ DECL_INT_ACCESSORS(unique_id)
+#endif
+
// [function data]: This field holds some additional data for function.
// Currently it has one of:
// - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
@@ -361,6 +369,10 @@ class SharedFunctionInfo : public HeapObject {
inline bool HasUncompiledDataWithoutPreparseData() const;
inline bool HasWasmExportedFunctionData() const;
WasmExportedFunctionData wasm_exported_function_data() const;
+ inline bool HasWasmJSFunctionData() const;
+ WasmJSFunctionData wasm_js_function_data() const;
+ inline bool HasWasmCapiFunctionData() const;
+ WasmCapiFunctionData wasm_capi_function_data() const;
// Clear out pre-parsed scope data from UncompiledDataWithPreparseData,
// turning it into UncompiledDataWithoutPreparseData.
@@ -581,6 +593,8 @@ class SharedFunctionInfo : public HeapObject {
static void EnsureSourcePositionsAvailable(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info);
+ bool AreSourcePositionsAvailable() const;
+
// Hash based on function literal id and script id.
V8_EXPORT_PRIVATE uint32_t Hash();
@@ -601,7 +615,8 @@ class SharedFunctionInfo : public HeapObject {
#endif
// Returns the SharedFunctionInfo in a format tracing can support.
- std::unique_ptr<v8::tracing::TracedValue> ToTracedValue();
+ std::unique_ptr<v8::tracing::TracedValue> ToTracedValue(
+ FunctionLiteral* literal);
// The tracing scope for SharedFunctionInfo objects.
static const char* kTraceScope;
@@ -702,14 +717,6 @@ class SharedFunctionInfo : public HeapObject {
// This is needed to set up the [[HomeObject]] on the function instance.
inline bool needs_home_object() const;
- V8_INLINE bool IsSharedFunctionInfoWithID() const {
-#if V8_SFI_HAS_UNIQUE_ID
- return true;
-#else
- return false;
-#endif
- }
-
private:
// [name_or_scope_info]: Function name string, kNoSharedNameSentinel or
// ScopeInfo.
@@ -744,23 +751,6 @@ class SharedFunctionInfo : public HeapObject {
OBJECT_CONSTRUCTORS(SharedFunctionInfo, HeapObject);
};
-class SharedFunctionInfoWithID : public SharedFunctionInfo {
- public:
- // [unique_id] - For --trace-maps purposes, an identifier that's persistent
- // even if the GC moves this SharedFunctionInfo.
- DECL_INT_ACCESSORS(unique_id)
-
- DECL_CAST(SharedFunctionInfoWithID)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(
- SharedFunctionInfo::kSize,
- TORQUE_GENERATED_SHARED_FUNCTION_INFO_WITH_ID_FIELDS)
-
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
-
- OBJECT_CONSTRUCTORS(SharedFunctionInfoWithID, SharedFunctionInfo);
-};
-
// Printing support.
struct SourceCodeOf {
explicit SourceCodeOf(SharedFunctionInfo v, int max = -1)
diff --git a/deps/v8/src/objects/slots-atomic-inl.h b/deps/v8/src/objects/slots-atomic-inl.h
index 57da18dd66..220013b7b5 100644
--- a/deps/v8/src/objects/slots-atomic-inl.h
+++ b/deps/v8/src/objects/slots-atomic-inl.h
@@ -80,6 +80,7 @@ class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t> {
AtomicSlot() : SlotBase(kNullAddress) {}
explicit AtomicSlot(Address address) : SlotBase(address) {}
explicit AtomicSlot(ObjectSlot slot) : SlotBase(slot.address()) {}
+ explicit AtomicSlot(MaybeObjectSlot slot) : SlotBase(slot.address()) {}
Reference operator*() const {
return Reference(reinterpret_cast<Tagged_t*>(address()));
diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h
index e0a42fbd91..b240729114 100644
--- a/deps/v8/src/objects/slots-inl.h
+++ b/deps/v8/src/objects/slots-inl.h
@@ -8,11 +8,12 @@
#include "src/objects/slots.h"
#include "src/base/atomic-utils.h"
-#include "src/memcopy.h"
-#include "src/objects.h"
-#include "src/objects/heap-object-inl.h"
+#include "src/common/ptr-compr-inl.h"
+#include "src/objects/compressed-slots.h"
+#include "src/objects/heap-object.h"
#include "src/objects/maybe-object.h"
-#include "src/ptr-compr-inl.h"
+#include "src/objects/objects.h"
+#include "src/utils/memcopy.h"
namespace v8 {
namespace internal {
@@ -30,7 +31,7 @@ bool FullObjectSlot::contains_value(Address raw_value) const {
const Object FullObjectSlot::operator*() const { return Object(*location()); }
-void FullObjectSlot::store(Object value) const { *location() = value->ptr(); }
+void FullObjectSlot::store(Object value) const { *location() = value.ptr(); }
Object FullObjectSlot::Acquire_Load() const {
return Object(base::AsAtomicPointer::Acquire_Load(location()));
@@ -41,16 +42,16 @@ Object FullObjectSlot::Relaxed_Load() const {
}
void FullObjectSlot::Relaxed_Store(Object value) const {
- base::AsAtomicPointer::Relaxed_Store(location(), value->ptr());
+ base::AsAtomicPointer::Relaxed_Store(location(), value.ptr());
}
void FullObjectSlot::Release_Store(Object value) const {
- base::AsAtomicPointer::Release_Store(location(), value->ptr());
+ base::AsAtomicPointer::Release_Store(location(), value.ptr());
}
Object FullObjectSlot::Release_CompareAndSwap(Object old, Object target) const {
Address result = base::AsAtomicPointer::Release_CompareAndSwap(
- location(), old->ptr(), target->ptr());
+ location(), old.ptr(), target.ptr());
return Object(result);
}
@@ -98,7 +99,7 @@ HeapObject FullHeapObjectSlot::ToHeapObject() const {
}
void FullHeapObjectSlot::StoreHeapObject(HeapObject value) const {
- *location() = value->ptr();
+ *location() = value.ptr();
}
//
diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h
index 18c8152f5b..fa8b558939 100644
--- a/deps/v8/src/objects/slots.h
+++ b/deps/v8/src/objects/slots.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_SLOTS_H_
#define V8_OBJECTS_SLOTS_H_
-#include "src/globals.h"
-#include "src/v8memory.h"
+#include "src/common/globals.h"
+#include "src/common/v8memory.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/objects/smi-inl.h b/deps/v8/src/objects/smi-inl.h
index 38d644fbea..b355a5b1bd 100644
--- a/deps/v8/src/objects/smi-inl.h
+++ b/deps/v8/src/objects/smi-inl.h
@@ -13,9 +13,7 @@
namespace v8 {
namespace internal {
-CAST_ACCESSOR(Smi)
-
-int Smi::ToInt(const Object object) { return Smi::cast(object)->value(); }
+// TODO(ishell): remove this file
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/smi.h b/deps/v8/src/objects/smi.h
index 938fc5504d..0f93f37458 100644
--- a/deps/v8/src/objects/smi.h
+++ b/deps/v8/src/objects/smi.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_SMI_H_
#define V8_OBJECTS_SMI_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/heap-object.h"
// Has to be the last include (doesn't have include guards):
@@ -41,7 +41,9 @@ class Smi : public Object {
}
// Convert a Smi object to an int.
- static inline int ToInt(const Object object);
+ static inline int ToInt(const Object object) {
+ return Smi::cast(object).value();
+ }
// Convert a value to a Smi object.
static inline constexpr Smi FromInt(int value) {
@@ -107,6 +109,8 @@ class Smi : public Object {
static constexpr int kMaxValue = kSmiMaxValue;
};
+CAST_ACCESSOR(Smi)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index 1007c78b18..8069e6e5c9 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -8,8 +8,8 @@
#include "src/objects/stack-frame-info.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/struct-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -27,14 +27,20 @@ CAST_ACCESSOR(StackFrameInfo)
SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberOffset)
SMI_ACCESSORS(StackFrameInfo, column_number, kColumnNumberOffset)
SMI_ACCESSORS(StackFrameInfo, script_id, kScriptIdOffset)
+SMI_ACCESSORS(StackFrameInfo, promise_all_index, kPromiseAllIndexOffset)
ACCESSORS(StackFrameInfo, script_name, Object, kScriptNameOffset)
ACCESSORS(StackFrameInfo, script_name_or_source_url, Object,
kScriptNameOrSourceUrlOffset)
ACCESSORS(StackFrameInfo, function_name, Object, kFunctionNameOffset)
+ACCESSORS(StackFrameInfo, wasm_module_name, Object, kWasmModuleNameOffset)
SMI_ACCESSORS(StackFrameInfo, flag, kFlagOffset)
BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, kIsEvalBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, kIsConstructorBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, kIsWasmBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_user_java_script, kIsUserJavaScriptBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_toplevel, kIsToplevelBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_async, kIsAsyncBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_promise_all, kIsPromiseAllBit)
OBJECT_CONSTRUCTORS_IMPL(StackTraceFrame, Struct)
NEVER_READ_ONLY_SPACE_IMPL(StackTraceFrame)
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index 45ab671ee5..f427d7eae2 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -10,59 +10,77 @@ namespace v8 {
namespace internal {
int StackTraceFrame::GetLineNumber(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
int line = GetFrameInfo(frame)->line_number();
return line != StackFrameBase::kNone ? line : Message::kNoLineNumberInfo;
}
int StackTraceFrame::GetColumnNumber(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
int column = GetFrameInfo(frame)->column_number();
return column != StackFrameBase::kNone ? column : Message::kNoColumnInfo;
}
int StackTraceFrame::GetScriptId(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
int id = GetFrameInfo(frame)->script_id();
return id != StackFrameBase::kNone ? id : Message::kNoScriptIdInfo;
}
+int StackTraceFrame::GetPromiseAllIndex(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->promise_all_index();
+}
+
Handle<Object> StackTraceFrame::GetFileName(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
auto name = GetFrameInfo(frame)->script_name();
return handle(name, frame->GetIsolate());
}
Handle<Object> StackTraceFrame::GetScriptNameOrSourceUrl(
Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
auto name = GetFrameInfo(frame)->script_name_or_source_url();
return handle(name, frame->GetIsolate());
}
Handle<Object> StackTraceFrame::GetFunctionName(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
auto name = GetFrameInfo(frame)->function_name();
return handle(name, frame->GetIsolate());
}
+Handle<Object> StackTraceFrame::GetWasmModuleName(
+ Handle<StackTraceFrame> frame) {
+ auto module = GetFrameInfo(frame)->wasm_module_name();
+ return handle(module, frame->GetIsolate());
+}
+
bool StackTraceFrame::IsEval(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
return GetFrameInfo(frame)->is_eval();
}
bool StackTraceFrame::IsConstructor(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
return GetFrameInfo(frame)->is_constructor();
}
bool StackTraceFrame::IsWasm(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
return GetFrameInfo(frame)->is_wasm();
}
+bool StackTraceFrame::IsUserJavaScript(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_user_java_script();
+}
+
+bool StackTraceFrame::IsToplevel(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_toplevel();
+}
+
+bool StackTraceFrame::IsAsync(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_async();
+}
+
+bool StackTraceFrame::IsPromiseAll(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_promise_all();
+}
+
Handle<StackFrameInfo> StackTraceFrame::GetFrameInfo(
Handle<StackTraceFrame> frame) {
+ if (frame->frame_info().IsUndefined()) InitializeFrameInfo(frame);
return handle(StackFrameInfo::cast(frame->frame_info()), frame->GetIsolate());
}
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index cf1d4b0e2d..44826f67e6 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -21,12 +21,18 @@ class StackFrameInfo : public Struct {
DECL_INT_ACCESSORS(line_number)
DECL_INT_ACCESSORS(column_number)
DECL_INT_ACCESSORS(script_id)
+ DECL_INT_ACCESSORS(promise_all_index)
DECL_ACCESSORS(script_name, Object)
DECL_ACCESSORS(script_name_or_source_url, Object)
DECL_ACCESSORS(function_name, Object)
+ DECL_ACCESSORS(wasm_module_name, Object)
DECL_BOOLEAN_ACCESSORS(is_eval)
DECL_BOOLEAN_ACCESSORS(is_constructor)
DECL_BOOLEAN_ACCESSORS(is_wasm)
+ DECL_BOOLEAN_ACCESSORS(is_user_java_script)
+ DECL_BOOLEAN_ACCESSORS(is_toplevel)
+ DECL_BOOLEAN_ACCESSORS(is_async)
+ DECL_BOOLEAN_ACCESSORS(is_promise_all)
DECL_INT_ACCESSORS(flag)
DECL_CAST(StackFrameInfo)
@@ -43,6 +49,10 @@ class StackFrameInfo : public Struct {
static const int kIsEvalBit = 0;
static const int kIsConstructorBit = 1;
static const int kIsWasmBit = 2;
+ static const int kIsUserJavaScriptBit = 3;
+ static const int kIsToplevelBit = 4;
+ static const int kIsAsyncBit = 5;
+ static const int kIsPromiseAllBit = 6;
OBJECT_CONSTRUCTORS(StackFrameInfo, Struct);
};
@@ -72,14 +82,20 @@ class StackTraceFrame : public Struct {
static int GetLineNumber(Handle<StackTraceFrame> frame);
static int GetColumnNumber(Handle<StackTraceFrame> frame);
static int GetScriptId(Handle<StackTraceFrame> frame);
+ static int GetPromiseAllIndex(Handle<StackTraceFrame> frame);
static Handle<Object> GetFileName(Handle<StackTraceFrame> frame);
static Handle<Object> GetScriptNameOrSourceUrl(Handle<StackTraceFrame> frame);
static Handle<Object> GetFunctionName(Handle<StackTraceFrame> frame);
+ static Handle<Object> GetWasmModuleName(Handle<StackTraceFrame> frame);
static bool IsEval(Handle<StackTraceFrame> frame);
static bool IsConstructor(Handle<StackTraceFrame> frame);
static bool IsWasm(Handle<StackTraceFrame> frame);
+ static bool IsUserJavaScript(Handle<StackTraceFrame> frame);
+ static bool IsToplevel(Handle<StackTraceFrame> frame);
+ static bool IsAsync(Handle<StackTraceFrame> frame);
+ static bool IsPromiseAll(Handle<StackTraceFrame> frame);
private:
OBJECT_CONSTRUCTORS(StackTraceFrame, Struct);
diff --git a/deps/v8/src/objects/string-comparator.cc b/deps/v8/src/objects/string-comparator.cc
index b29f9c3d7b..6f517edb20 100644
--- a/deps/v8/src/objects/string-comparator.cc
+++ b/deps/v8/src/objects/string-comparator.cc
@@ -40,7 +40,7 @@ void StringComparator::State::Advance(int consumed) {
}
bool StringComparator::Equals(String string_1, String string_2) {
- int length = string_1->length();
+ int length = string_1.length();
state_1_.Init(string_1);
state_2_.Init(string_2);
while (true) {
diff --git a/deps/v8/src/objects/string-comparator.h b/deps/v8/src/objects/string-comparator.h
index 5b4354deeb..8cee98a642 100644
--- a/deps/v8/src/objects/string-comparator.h
+++ b/deps/v8/src/objects/string-comparator.h
@@ -6,9 +6,9 @@
#define V8_OBJECTS_STRING_COMPARATOR_H_
#include "src/base/logging.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/string.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index f3a4f5908b..0d8f83ca86 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -7,14 +7,14 @@
#include "src/objects/string.h"
-#include "src/conversions-inl.h"
-#include "src/handles-inl.h"
-#include "src/hash-seed-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
+#include "src/numbers/conversions-inl.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/objects/name-inl.h"
#include "src/objects/smi-inl.h"
#include "src/objects/string-table-inl.h"
-#include "src/string-hasher-inl.h"
+#include "src/strings/string-hasher-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -58,13 +58,12 @@ CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(String)
CAST_ACCESSOR(ThinString)
-StringShape::StringShape(const String str)
- : type_(str->map()->instance_type()) {
+StringShape::StringShape(const String str) : type_(str.map().instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
-StringShape::StringShape(Map map) : type_(map->instance_type()) {
+StringShape::StringShape(Map map) : type_(map.instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
@@ -149,18 +148,18 @@ STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
bool String::IsOneByteRepresentation() const {
- uint32_t type = map()->instance_type();
+ uint32_t type = map().instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
bool String::IsTwoByteRepresentation() const {
- uint32_t type = map()->instance_type();
+ uint32_t type = map().instance_type();
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
bool String::IsOneByteRepresentationUnderneath(String string) {
while (true) {
- uint32_t type = string.map()->instance_type();
+ uint32_t type = string.map().instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
DCHECK(string.IsFlat());
@@ -195,30 +194,51 @@ Char FlatStringReader::Get(int index) {
}
template <typename Char>
-class SequentialStringKey : public StringTableKey {
+class SequentialStringKey final : public StringTableKey {
public:
- explicit SequentialStringKey(Vector<const Char> string, uint64_t seed)
- : StringTableKey(StringHasher::HashSequentialString<Char>(
- string.start(), string.length(), seed)),
- string_(string) {}
-
- Vector<const Char> string_;
-};
-
-class OneByteStringKey : public SequentialStringKey<uint8_t> {
- public:
- OneByteStringKey(Vector<const uint8_t> str, uint64_t seed)
- : SequentialStringKey<uint8_t>(str, seed) {}
+ SequentialStringKey(const Vector<const Char>& chars, uint64_t seed,
+ bool convert = false)
+ : SequentialStringKey(StringHasher::HashSequentialString<Char>(
+ chars.begin(), chars.length(), seed),
+ chars, convert) {}
+
+ SequentialStringKey(int hash, const Vector<const Char>& chars,
+ bool convert = false)
+ : StringTableKey(hash, chars.length()),
+ chars_(chars),
+ convert_(convert) {}
+
+ bool IsMatch(String s) override {
+ DisallowHeapAllocation no_gc;
+ if (s.IsOneByteRepresentation()) {
+ const uint8_t* chars = s.GetChars<uint8_t>(no_gc);
+ return CompareChars(chars, chars_.begin(), chars_.length()) == 0;
+ }
+ const uint16_t* chars = s.GetChars<uint16_t>(no_gc);
+ return CompareChars(chars, chars_.begin(), chars_.length()) == 0;
+ }
- bool IsMatch(Object string) override {
- return String::cast(string)->IsOneByteEqualTo(string_);
+ Handle<String> AsHandle(Isolate* isolate) override {
+ if (sizeof(Char) == 1) {
+ return isolate->factory()->NewOneByteInternalizedString(
+ Vector<const uint8_t>::cast(chars_), hash_field());
+ }
+ return isolate->factory()->NewTwoByteInternalizedString(
+ Vector<const uint16_t>::cast(chars_), hash_field());
}
- Handle<String> AsHandle(Isolate* isolate) override;
+ private:
+ Vector<const Char> chars_;
+ bool convert_;
};
-class SeqOneByteSubStringKey : public StringTableKey {
+using OneByteStringKey = SequentialStringKey<uint8_t>;
+using TwoByteStringKey = SequentialStringKey<uint16_t>;
+
+template <typename SeqString>
+class SeqSubStringKey final : public StringTableKey {
public:
+ using Char = typename SeqString::Char;
// VS 2017 on official builds gives this spurious warning:
// warning C4789: buffer 'key' of size 16 bytes will be overrun; 4 bytes will
// be written starting at offset 16
@@ -227,68 +247,69 @@ class SeqOneByteSubStringKey : public StringTableKey {
#pragma warning(push)
#pragma warning(disable : 4789)
#endif
- SeqOneByteSubStringKey(Isolate* isolate, Handle<SeqOneByteString> string,
- int from, int length)
- : StringTableKey(0), string_(string), from_(from), length_(length) {
+ SeqSubStringKey(Isolate* isolate, Handle<SeqString> string, int from, int len,
+ bool convert = false)
+ : StringTableKey(0, len),
+ string_(string),
+ from_(from),
+ convert_(convert) {
// We have to set the hash later.
DisallowHeapAllocation no_gc;
uint32_t hash = StringHasher::HashSequentialString(
- string->GetChars(no_gc) + from, length, HashSeed(isolate));
+ string->GetChars(no_gc) + from, len, HashSeed(isolate));
set_hash_field(hash);
- DCHECK_LE(0, length_);
- DCHECK_LE(from_ + length_, string_->length());
- DCHECK(string_->IsSeqOneByteString());
+ DCHECK_LE(0, length());
+ DCHECK_LE(from_ + length(), string_->length());
+ DCHECK_EQ(string_->IsSeqOneByteString(), sizeof(Char) == 1);
+ DCHECK_EQ(string_->IsSeqTwoByteString(), sizeof(Char) == 2);
}
#if defined(V8_CC_MSVC)
#pragma warning(pop)
#endif
- bool IsMatch(Object string) override;
- Handle<String> AsHandle(Isolate* isolate) override;
-
- private:
- Handle<SeqOneByteString> string_;
- int from_;
- int length_;
-};
-
-class TwoByteStringKey : public SequentialStringKey<uc16> {
- public:
- explicit TwoByteStringKey(Vector<const uc16> str, uint64_t seed)
- : SequentialStringKey<uc16>(str, seed) {}
-
- bool IsMatch(Object string) override {
- return String::cast(string)->IsTwoByteEqualTo(string_);
- }
-
- Handle<String> AsHandle(Isolate* isolate) override;
-};
-
-// Utf8StringKey carries a vector of chars as key.
-class Utf8StringKey : public StringTableKey {
- public:
- explicit Utf8StringKey(Vector<const char> string, uint64_t seed)
- : StringTableKey(StringHasher::ComputeUtf8Hash(string, seed, &chars_)),
- string_(string) {}
-
- bool IsMatch(Object string) override {
- return String::cast(string)->IsUtf8EqualTo(string_);
+ bool IsMatch(String string) override {
+ DisallowHeapAllocation no_gc;
+ if (string.IsOneByteRepresentation()) {
+ const uint8_t* data = string.GetChars<uint8_t>(no_gc);
+ return CompareChars(string_->GetChars(no_gc) + from_, data, length()) ==
+ 0;
+ }
+ const uint16_t* data = string.GetChars<uint16_t>(no_gc);
+ return CompareChars(string_->GetChars(no_gc) + from_, data, length()) == 0;
}
Handle<String> AsHandle(Isolate* isolate) override {
- return isolate->factory()->NewInternalizedStringFromUtf8(string_, chars_,
- HashField());
+ if (sizeof(Char) == 1 || (sizeof(Char) == 2 && convert_)) {
+ Handle<SeqOneByteString> result =
+ isolate->factory()->AllocateRawOneByteInternalizedString(
+ length(), hash_field());
+ DisallowHeapAllocation no_gc;
+ CopyChars(result->GetChars(no_gc), string_->GetChars(no_gc) + from_,
+ length());
+ return result;
+ }
+ Handle<SeqTwoByteString> result =
+ isolate->factory()->AllocateRawTwoByteInternalizedString(length(),
+ hash_field());
+ DisallowHeapAllocation no_gc;
+ CopyChars(result->GetChars(no_gc), string_->GetChars(no_gc) + from_,
+ length());
+ return result;
}
private:
- Vector<const char> string_;
- int chars_; // Caches the number of characters when computing the hash code.
+ Handle<typename CharTraits<Char>::String> string_;
+ int from_;
+ bool convert_;
};
+using SeqOneByteSubStringKey = SeqSubStringKey<SeqOneByteString>;
+using SeqTwoByteSubStringKey = SeqSubStringKey<SeqTwoByteString>;
+
bool String::Equals(String other) {
if (other == *this) return true;
- if (this->IsInternalizedString() && other->IsInternalizedString()) {
+ if (this->IsInternalizedString() && other.IsInternalizedString()) {
return false;
}
return SlowEquals(other);
@@ -302,6 +323,13 @@ bool String::Equals(Isolate* isolate, Handle<String> one, Handle<String> two) {
return SlowEquals(isolate, one, two);
}
+template <typename Char>
+const Char* String::GetChars(const DisallowHeapAllocation& no_gc) {
+ return StringShape(*this).IsExternal()
+ ? CharTraits<Char>::ExternalString::cast(*this).GetChars()
+ : CharTraits<Char>::String::cast(*this).GetChars(no_gc);
+}
+
Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
AllocationType allocation) {
if (string->IsConsString()) {
@@ -323,24 +351,22 @@ uint16_t String::Get(int index) {
DCHECK(index >= 0 && index < length());
switch (StringShape(*this).full_representation_tag()) {
case kSeqStringTag | kOneByteStringTag:
- return SeqOneByteString::cast(*this)->SeqOneByteStringGet(index);
+ return SeqOneByteString::cast(*this).Get(index);
case kSeqStringTag | kTwoByteStringTag:
- return SeqTwoByteString::cast(*this)->SeqTwoByteStringGet(index);
+ return SeqTwoByteString::cast(*this).Get(index);
case kConsStringTag | kOneByteStringTag:
case kConsStringTag | kTwoByteStringTag:
- return ConsString::cast(*this)->ConsStringGet(index);
+ return ConsString::cast(*this).Get(index);
case kExternalStringTag | kOneByteStringTag:
- return ExternalOneByteString::cast(*this)->ExternalOneByteStringGet(
- index);
+ return ExternalOneByteString::cast(*this).Get(index);
case kExternalStringTag | kTwoByteStringTag:
- return ExternalTwoByteString::cast(*this)->ExternalTwoByteStringGet(
- index);
+ return ExternalTwoByteString::cast(*this).Get(index);
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag:
- return SlicedString::cast(*this)->SlicedStringGet(index);
+ return SlicedString::cast(*this).Get(index);
case kThinStringTag | kOneByteStringTag:
case kThinStringTag | kTwoByteStringTag:
- return ThinString::cast(*this)->ThinStringGet(index);
+ return ThinString::cast(*this).Get(index);
default:
break;
}
@@ -353,13 +379,13 @@ void String::Set(int index, uint16_t value) {
DCHECK(StringShape(*this).IsSequential());
return this->IsOneByteRepresentation()
- ? SeqOneByteString::cast(*this)->SeqOneByteStringSet(index, value)
- : SeqTwoByteString::cast(*this)->SeqTwoByteStringSet(index, value);
+ ? SeqOneByteString::cast(*this).SeqOneByteStringSet(index, value)
+ : SeqTwoByteString::cast(*this).SeqTwoByteStringSet(index, value);
}
bool String::IsFlat() {
if (!StringShape(*this).IsCons()) return true;
- return ConsString::cast(*this)->second()->length() == 0;
+ return ConsString::cast(*this).second().length() == 0;
}
String String::GetUnderlying() {
@@ -380,40 +406,40 @@ ConsString String::VisitFlat(Visitor* visitor, String string,
const int offset) {
DisallowHeapAllocation no_gc;
int slice_offset = offset;
- const int length = string->length();
+ const int length = string.length();
DCHECK(offset <= length);
while (true) {
- int32_t type = string->map()->instance_type();
+ int32_t type = string.map().instance_type();
switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
case kSeqStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
- SeqOneByteString::cast(string)->GetChars(no_gc) + slice_offset,
+ SeqOneByteString::cast(string).GetChars(no_gc) + slice_offset,
length - offset);
return ConsString();
case kSeqStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
- SeqTwoByteString::cast(string)->GetChars(no_gc) + slice_offset,
+ SeqTwoByteString::cast(string).GetChars(no_gc) + slice_offset,
length - offset);
return ConsString();
case kExternalStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
- ExternalOneByteString::cast(string)->GetChars() + slice_offset,
+ ExternalOneByteString::cast(string).GetChars() + slice_offset,
length - offset);
return ConsString();
case kExternalStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
- ExternalTwoByteString::cast(string)->GetChars() + slice_offset,
+ ExternalTwoByteString::cast(string).GetChars() + slice_offset,
length - offset);
return ConsString();
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag: {
SlicedString slicedString = SlicedString::cast(string);
- slice_offset += slicedString->offset();
- string = slicedString->parent();
+ slice_offset += slicedString.offset();
+ string = slicedString.parent();
continue;
}
@@ -423,7 +449,7 @@ ConsString String::VisitFlat(Visitor* visitor, String string,
case kThinStringTag | kOneByteStringTag:
case kThinStringTag | kTwoByteStringTag:
- string = ThinString::cast(string)->actual();
+ string = ThinString::cast(string).actual();
continue;
default:
@@ -455,15 +481,14 @@ uint32_t String::ToValidIndex(Object number) {
return index;
}
-uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
+uint8_t SeqOneByteString::Get(int index) {
DCHECK(index >= 0 && index < length());
- return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize);
+ return ReadField<byte>(kHeaderSize + index * kCharSize);
}
void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
DCHECK(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
- WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize,
- static_cast<byte>(value));
+ WriteField<byte>(kHeaderSize + index * kCharSize, static_cast<byte>(value));
}
Address SeqOneByteString::GetCharsAddress() {
@@ -484,14 +509,14 @@ uc16* SeqTwoByteString::GetChars(const DisallowHeapAllocation& no_gc) {
return reinterpret_cast<uc16*>(FIELD_ADDR(*this, kHeaderSize));
}
-uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
+uint16_t SeqTwoByteString::Get(int index) {
DCHECK(index >= 0 && index < length());
- return READ_UINT16_FIELD(*this, kHeaderSize + index * kShortSize);
+ return ReadField<uint16_t>(kHeaderSize + index * kShortSize);
}
void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
DCHECK(index >= 0 && index < length());
- WRITE_UINT16_FIELD(*this, kHeaderSize + index * kShortSize, value);
+ WriteField<uint16_t>(kHeaderSize + index * kShortSize, value);
}
int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
@@ -508,7 +533,7 @@ String SlicedString::parent() {
void SlicedString::set_parent(Isolate* isolate, String parent,
WriteBarrierMode mode) {
- DCHECK(parent->IsSeqString() || parent->IsExternalString());
+ DCHECK(parent.IsSeqString() || parent.IsExternalString());
WRITE_FIELD(*this, kParentOffset, parent);
CONDITIONAL_WRITE_BARRIER(*this, kParentOffset, parent, mode);
}
@@ -548,53 +573,52 @@ HeapObject ThinString::unchecked_actual() const {
}
bool ExternalString::is_uncached() const {
- InstanceType type = map()->instance_type();
+ InstanceType type = map().instance_type();
return (type & kUncachedExternalStringMask) == kUncachedExternalStringTag;
}
Address ExternalString::resource_as_address() {
- return READ_UINTPTR_FIELD(*this, kResourceOffset);
+ return ReadField<Address>(kResourceOffset);
}
void ExternalString::set_address_as_resource(Address address) {
- WRITE_UINTPTR_FIELD(*this, kResourceOffset, address);
+ WriteField<Address>(kResourceOffset, address);
if (IsExternalOneByteString()) {
- ExternalOneByteString::cast(*this)->update_data_cache();
+ ExternalOneByteString::cast(*this).update_data_cache();
} else {
- ExternalTwoByteString::cast(*this)->update_data_cache();
+ ExternalTwoByteString::cast(*this).update_data_cache();
}
}
uint32_t ExternalString::resource_as_uint32() {
- return static_cast<uint32_t>(READ_UINTPTR_FIELD(*this, kResourceOffset));
+ return static_cast<uint32_t>(ReadField<Address>(kResourceOffset));
}
void ExternalString::set_uint32_as_resource(uint32_t value) {
- WRITE_UINTPTR_FIELD(*this, kResourceOffset, value);
+ WriteField<Address>(kResourceOffset, value);
if (is_uncached()) return;
- WRITE_UINTPTR_FIELD(*this, kResourceDataOffset, kNullAddress);
+ WriteField<Address>(kResourceDataOffset, kNullAddress);
}
void ExternalString::DisposeResource() {
v8::String::ExternalStringResourceBase* resource =
reinterpret_cast<v8::String::ExternalStringResourceBase*>(
- READ_UINTPTR_FIELD(*this, ExternalString::kResourceOffset));
+ ReadField<Address>(ExternalString::kResourceOffset));
// Dispose of the C++ object if it has not already been disposed.
if (resource != nullptr) {
resource->Dispose();
- WRITE_UINTPTR_FIELD(*this, ExternalString::kResourceOffset, kNullAddress);
+ WriteField<Address>(ExternalString::kResourceOffset, kNullAddress);
}
}
const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
- return reinterpret_cast<Resource*>(
- READ_UINTPTR_FIELD(*this, kResourceOffset));
+ return reinterpret_cast<Resource*>(ReadField<Address>(kResourceOffset));
}
void ExternalOneByteString::update_data_cache() {
if (is_uncached()) return;
- WRITE_UINTPTR_FIELD(*this, kResourceDataOffset,
+ WriteField<Address>(kResourceDataOffset,
reinterpret_cast<Address>(resource()->data()));
}
@@ -609,8 +633,7 @@ void ExternalOneByteString::SetResource(
void ExternalOneByteString::set_resource(
const ExternalOneByteString::Resource* resource) {
- WRITE_UINTPTR_FIELD(*this, kResourceOffset,
- reinterpret_cast<Address>(resource));
+ WriteField<Address>(kResourceOffset, reinterpret_cast<Address>(resource));
if (resource != nullptr) update_data_cache();
}
@@ -618,19 +641,18 @@ const uint8_t* ExternalOneByteString::GetChars() {
return reinterpret_cast<const uint8_t*>(resource()->data());
}
-uint16_t ExternalOneByteString::ExternalOneByteStringGet(int index) {
+uint8_t ExternalOneByteString::Get(int index) {
DCHECK(index >= 0 && index < length());
return GetChars()[index];
}
const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
- return reinterpret_cast<Resource*>(
- READ_UINTPTR_FIELD(*this, kResourceOffset));
+ return reinterpret_cast<Resource*>(ReadField<Address>(kResourceOffset));
}
void ExternalTwoByteString::update_data_cache() {
if (is_uncached()) return;
- WRITE_UINTPTR_FIELD(*this, kResourceDataOffset,
+ WriteField<Address>(kResourceDataOffset,
reinterpret_cast<Address>(resource()->data()));
}
@@ -645,14 +667,13 @@ void ExternalTwoByteString::SetResource(
void ExternalTwoByteString::set_resource(
const ExternalTwoByteString::Resource* resource) {
- WRITE_UINTPTR_FIELD(*this, kResourceOffset,
- reinterpret_cast<Address>(resource));
+ WriteField<Address>(kResourceOffset, reinterpret_cast<Address>(resource));
if (resource != nullptr) update_data_cache();
}
const uint16_t* ExternalTwoByteString::GetChars() { return resource()->data(); }
-uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
+uint16_t ExternalTwoByteString::Get(int index) {
DCHECK(index >= 0 && index < length());
return GetChars()[index];
}
@@ -745,7 +766,7 @@ SubStringRange::SubStringRange(String string,
int length)
: string_(string),
first_(first),
- length_(length == -1 ? string->length() : length),
+ length_(length == -1 ? string.length() : length),
no_gc_(no_gc) {}
class SubStringRange::iterator final {
@@ -775,7 +796,7 @@ class SubStringRange::iterator final {
friend class String;
friend class SubStringRange;
iterator(String from, int offset, const DisallowHeapAllocation& no_gc)
- : content_(from->GetFlatContent(no_gc)), offset_(offset) {}
+ : content_(from.GetFlatContent(no_gc)), offset_(offset) {}
String::FlatContent content_;
int offset_;
};
diff --git a/deps/v8/src/objects/string-table-inl.h b/deps/v8/src/objects/string-table-inl.h
index 199f0a0f6b..1b7b7d140c 100644
--- a/deps/v8/src/objects/string-table-inl.h
+++ b/deps/v8/src/objects/string-table-inl.h
@@ -28,24 +28,39 @@ StringSet::StringSet(Address ptr) : HashTable<StringSet, StringSetShape>(ptr) {
}
bool StringSetShape::IsMatch(String key, Object value) {
- DCHECK(value->IsString());
- return key->Equals(String::cast(value));
+ DCHECK(value.IsString());
+ return key.Equals(String::cast(value));
}
uint32_t StringSetShape::Hash(Isolate* isolate, String key) {
- return key->Hash();
+ return key.Hash();
}
uint32_t StringSetShape::HashForObject(ReadOnlyRoots roots, Object object) {
- return String::cast(object)->Hash();
+ return String::cast(object).Hash();
}
-StringTableKey::StringTableKey(uint32_t hash_field)
- : HashTableKey(hash_field >> Name::kHashShift), hash_field_(hash_field) {}
+bool StringTableShape::IsMatch(Key key, Object value) {
+ String string = String::cast(value);
+ if (string.hash_field() != key->hash_field()) return false;
+ if (string.length() != key->length()) return false;
+ return key->IsMatch(string);
+}
+
+StringTableKey::StringTableKey(uint32_t hash_field, int length)
+ : hash_field_(hash_field), length_(length) {}
void StringTableKey::set_hash_field(uint32_t hash_field) {
hash_field_ = hash_field;
- set_hash(hash_field >> Name::kHashShift);
+}
+
+uint32_t StringTableKey::hash() const {
+ return hash_field_ >> Name::kHashShift;
+}
+
+// static
+uint32_t StringTableShape::Hash(Isolate* isolate, Key key) {
+ return key->hash();
}
Handle<Object> StringTableShape::AsHandle(Isolate* isolate,
@@ -54,7 +69,7 @@ Handle<Object> StringTableShape::AsHandle(Isolate* isolate,
}
uint32_t StringTableShape::HashForObject(ReadOnlyRoots roots, Object object) {
- return String::cast(object)->Hash();
+ return String::cast(object).Hash();
}
RootIndex StringTableShape::GetMapRootIndex() {
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index e71a3a1341..6279137b1f 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_STRING_TABLE_H_
#include "src/objects/hash-table.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,30 +14,34 @@
namespace v8 {
namespace internal {
-class StringTableKey : public HashTableKey {
+class StringTableKey {
public:
- explicit inline StringTableKey(uint32_t hash_field);
+ virtual ~StringTableKey() {}
+ inline StringTableKey(uint32_t hash_field, int length);
virtual Handle<String> AsHandle(Isolate* isolate) = 0;
- uint32_t HashField() const {
+ uint32_t hash_field() const {
DCHECK_NE(0, hash_field_);
return hash_field_;
}
+ virtual bool IsMatch(String string) = 0;
+ inline uint32_t hash() const;
+ int length() const { return length_; }
+
protected:
inline void set_hash_field(uint32_t hash_field);
private:
uint32_t hash_field_ = 0;
+ int length_;
};
class StringTableShape : public BaseShape<StringTableKey*> {
public:
- static inline bool IsMatch(Key key, Object value) {
- return key->IsMatch(value);
- }
+ static inline bool IsMatch(Key key, Object value);
- static inline uint32_t Hash(Isolate* isolate, Key key) { return key->Hash(); }
+ static inline uint32_t Hash(Isolate* isolate, Key key);
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
@@ -61,20 +65,15 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
// added. The return value is the string found.
V8_EXPORT_PRIVATE static Handle<String> LookupString(Isolate* isolate,
Handle<String> key);
+ template <typename StringTableKey>
static Handle<String> LookupKey(Isolate* isolate, StringTableKey* key);
static Handle<String> AddKeyNoResize(Isolate* isolate, StringTableKey* key);
- static String ForwardStringIfExists(Isolate* isolate, StringTableKey* key,
- String string);
// Shink the StringTable if it's very empty (kMaxEmptyFactor) to avoid the
// performance overhead of re-allocating the StringTable over and over again.
static Handle<StringTable> CautiousShrink(Isolate* isolate,
Handle<StringTable> table);
- // Looks up a string that is equal to the given string and returns
- // string handle if it is found, or an empty handle otherwise.
- V8_WARN_UNUSED_RESULT static MaybeHandle<String> LookupTwoCharsStringIfExists(
- Isolate* isolate, uint16_t c1, uint16_t c2);
// {raw_string} must be a tagged String pointer.
// Returns a tagged pointer: either an internalized string, or a Smi
// sentinel.
@@ -90,7 +89,7 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
static const int kMinShrinkCapacity = kMinCapacity;
private:
- template <bool seq_one_byte>
+ template <typename char_type>
friend class JsonParser;
OBJECT_CONSTRUCTORS(StringTable, HashTable<StringTable, StringTableShape>);
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index 22157a3500..cc513f88cb 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -4,34 +4,35 @@
#include "src/objects/string.h"
-#include "src/char-predicates.h"
-#include "src/conversions.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h" // For LooksValid implementation.
+#include "src/heap/read-only-heap.h"
+#include "src/numbers/conversions.h"
#include "src/objects/map.h"
#include "src/objects/oddball.h"
#include "src/objects/string-comparator.h"
#include "src/objects/string-inl.h"
-#include "src/ostreams.h"
-#include "src/string-builder-inl.h"
-#include "src/string-hasher.h"
-#include "src/string-search.h"
-#include "src/string-stream.h"
-#include "src/unicode-inl.h"
+#include "src/strings/char-predicates.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/strings/string-hasher.h"
+#include "src/strings/string-search.h"
+#include "src/strings/string-stream.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
AllocationType allocation) {
- DCHECK_NE(cons->second()->length(), 0);
+ DCHECK_NE(cons->second().length(), 0);
// TurboFan can create cons strings with empty first parts.
- while (cons->first()->length() == 0) {
+ while (cons->first().length() == 0) {
// We do not want to call this function recursively. Therefore we call
// String::Flatten only in those cases where String::SlowFlatten is not
// called again.
- if (cons->second()->IsConsString() && !cons->second()->IsFlat()) {
+ if (cons->second().IsConsString() && !cons->second().IsFlat()) {
cons = handle(ConsString::cast(cons->second()), isolate);
} else {
return String::Flatten(isolate, handle(cons->second(), isolate));
@@ -66,6 +67,66 @@ Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
return result;
}
+namespace {
+
+template <class StringClass>
+void MigrateExternalStringResource(Isolate* isolate, String from, String to) {
+ StringClass cast_from = StringClass::cast(from);
+ StringClass cast_to = StringClass::cast(to);
+ const typename StringClass::Resource* to_resource = cast_to.resource();
+ if (to_resource == nullptr) {
+ // |to| is a just-created internalized copy of |from|. Migrate the resource.
+ cast_to.SetResource(isolate, cast_from.resource());
+ // Zap |from|'s resource pointer to reflect the fact that |from| has
+ // relinquished ownership of its resource.
+ isolate->heap()->UpdateExternalString(
+ from, ExternalString::cast(from).ExternalPayloadSize(), 0);
+ cast_from.SetResource(isolate, nullptr);
+ } else if (to_resource != cast_from.resource()) {
+ // |to| already existed and has its own resource. Finalize |from|.
+ isolate->heap()->FinalizeExternalString(from);
+ }
+}
+
+} // namespace
+
+void String::MakeThin(Isolate* isolate, String internalized) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_NE(*this, internalized);
+ DCHECK(internalized.IsInternalizedString());
+
+ if (this->IsExternalString()) {
+ if (internalized.IsExternalOneByteString()) {
+ MigrateExternalStringResource<ExternalOneByteString>(isolate, *this,
+ internalized);
+ } else if (internalized.IsExternalTwoByteString()) {
+ MigrateExternalStringResource<ExternalTwoByteString>(isolate, *this,
+ internalized);
+ } else {
+ // If the external string is duped into an existing non-external
+ // internalized string, free its resource (it's about to be rewritten
+ // into a ThinString below).
+ isolate->heap()->FinalizeExternalString(*this);
+ }
+ }
+
+ int old_size = this->Size();
+ isolate->heap()->NotifyObjectLayoutChange(*this, old_size, no_gc);
+ bool one_byte = internalized.IsOneByteRepresentation();
+ Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
+ : isolate->factory()->thin_string_map();
+ DCHECK_GE(old_size, ThinString::kSize);
+ this->synchronized_set_map(*map);
+ ThinString thin = ThinString::cast(*this);
+ thin.set_actual(internalized);
+ Address thin_end = thin.address() + ThinString::kSize;
+ int size_delta = old_size - ThinString::kSize;
+ if (size_delta != 0) {
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(thin_end, size_delta, ClearRecordedSlots::kNo);
+ }
+}
+
bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
DisallowHeapAllocation no_allocation;
// Externalizing twice leaks the external resource, so it's
@@ -77,8 +138,8 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Assert that the resource and the string are equivalent.
DCHECK(static_cast<size_t>(this->length()) == resource->length());
ScopedVector<uc16> smart_chars(this->length());
- String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
- DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
+ String::WriteToFlat(*this, smart_chars.begin(), 0, this->length());
+ DCHECK_EQ(0, memcmp(smart_chars.begin(), resource->data(),
resource->length() * sizeof(smart_chars[0])));
}
#endif // DEBUG
@@ -103,7 +164,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// strings in generated code, we need to bailout to runtime.
Map new_map;
ReadOnlyRoots roots(heap);
- if (size < ExternalString::kSize) {
+ if (size < ExternalString::kSizeOfAllExternalStrings) {
if (is_internalized) {
new_map = roots.uncached_external_internalized_string_map();
} else {
@@ -127,9 +188,9 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
this->synchronized_set_map(new_map);
ExternalTwoByteString self = ExternalTwoByteString::cast(*this);
- self->SetResource(isolate, resource);
+ self.SetResource(isolate, resource);
heap->RegisterExternalString(*this);
- if (is_internalized) self->Hash(); // Force regeneration of the hash value.
+ if (is_internalized) self.Hash(); // Force regeneration of the hash value.
return true;
}
@@ -145,12 +206,12 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
DCHECK(static_cast<size_t>(this->length()) == resource->length());
if (this->IsTwoByteRepresentation()) {
ScopedVector<uint16_t> smart_chars(this->length());
- String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
- DCHECK(String::IsOneByte(smart_chars.start(), this->length()));
+ String::WriteToFlat(*this, smart_chars.begin(), 0, this->length());
+ DCHECK(String::IsOneByte(smart_chars.begin(), this->length()));
}
ScopedVector<char> smart_chars(this->length());
- String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
- DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
+ String::WriteToFlat(*this, smart_chars.begin(), 0, this->length());
+ DCHECK_EQ(0, memcmp(smart_chars.begin(), resource->data(),
resource->length() * sizeof(smart_chars[0])));
}
#endif // DEBUG
@@ -177,7 +238,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// strings in generated code, we need to bailout to runtime.
Map new_map;
ReadOnlyRoots roots(heap);
- if (size < ExternalString::kSize) {
+ if (size < ExternalString::kSizeOfAllExternalStrings) {
new_map = is_internalized
? roots.uncached_external_one_byte_internalized_string_map()
: roots.uncached_external_one_byte_string_map();
@@ -200,15 +261,15 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
this->synchronized_set_map(new_map);
ExternalOneByteString self = ExternalOneByteString::cast(*this);
- self->SetResource(isolate, resource);
+ self.SetResource(isolate, resource);
heap->RegisterExternalString(*this);
- if (is_internalized) self->Hash(); // Force regeneration of the hash value.
+ if (is_internalized) self.Hash(); // Force regeneration of the hash value.
return true;
}
bool String::SupportsExternalization() {
if (this->IsThinString()) {
- return i::ThinString::cast(*this)->actual()->SupportsExternalization();
+ return i::ThinString::cast(*this).actual().SupportsExternalization();
}
Isolate* isolate;
@@ -295,7 +356,6 @@ void String::StringShortPrint(StringStream* accumulator, bool show_details) {
}
if (show_details) accumulator->Put('>');
}
- return;
}
void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
@@ -337,7 +397,7 @@ bool String::LooksValid() {
// basically the same logic as the way we access the heap in the first place.
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*this);
// RO_SPACE objects should always be valid.
- if (chunk->owner()->identity() == RO_SPACE) return true;
+ if (ReadOnlyHeap::Contains(*this)) return true;
if (chunk->heap() == nullptr) return false;
return chunk->heap()->Contains(*this);
}
@@ -435,22 +495,22 @@ String::FlatContent String::GetFlatContent(
int offset = 0;
if (shape.representation_tag() == kConsStringTag) {
ConsString cons = ConsString::cast(string);
- if (cons->second()->length() != 0) {
+ if (cons.second().length() != 0) {
return FlatContent();
}
- string = cons->first();
+ string = cons.first();
shape = StringShape(string);
} else if (shape.representation_tag() == kSlicedStringTag) {
SlicedString slice = SlicedString::cast(string);
- offset = slice->offset();
- string = slice->parent();
+ offset = slice.offset();
+ string = slice.parent();
shape = StringShape(string);
DCHECK(shape.representation_tag() != kConsStringTag &&
shape.representation_tag() != kSlicedStringTag);
}
if (shape.representation_tag() == kThinStringTag) {
ThinString thin = ThinString::cast(string);
- string = thin->actual();
+ string = thin.actual();
shape = StringShape(string);
DCHECK(!shape.IsCons());
DCHECK(!shape.IsSliced());
@@ -458,18 +518,18 @@ String::FlatContent String::GetFlatContent(
if (shape.encoding_tag() == kOneByteStringTag) {
const uint8_t* start;
if (shape.representation_tag() == kSeqStringTag) {
- start = SeqOneByteString::cast(string)->GetChars(no_gc);
+ start = SeqOneByteString::cast(string).GetChars(no_gc);
} else {
- start = ExternalOneByteString::cast(string)->GetChars();
+ start = ExternalOneByteString::cast(string).GetChars();
}
return FlatContent(start + offset, length);
} else {
DCHECK_EQ(shape.encoding_tag(), kTwoByteStringTag);
const uc16* start;
if (shape.representation_tag() == kSeqStringTag) {
- start = SeqTwoByteString::cast(string)->GetChars(no_gc);
+ start = SeqTwoByteString::cast(string).GetChars(no_gc);
} else {
- start = ExternalTwoByteString::cast(string)->GetChars();
+ start = ExternalTwoByteString::cast(string).GetChars();
}
return FlatContent(start + offset, length);
}
@@ -533,38 +593,40 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
int from = f;
int to = t;
while (true) {
- DCHECK(0 <= from && from <= to && to <= source->length());
+ DCHECK_LE(0, from);
+ DCHECK_LE(from, to);
+ DCHECK_LE(to, source.length());
switch (StringShape(source).full_representation_tag()) {
case kOneByteStringTag | kExternalStringTag: {
- CopyChars(sink, ExternalOneByteString::cast(source)->GetChars() + from,
+ CopyChars(sink, ExternalOneByteString::cast(source).GetChars() + from,
to - from);
return;
}
case kTwoByteStringTag | kExternalStringTag: {
- const uc16* data = ExternalTwoByteString::cast(source)->GetChars();
+ const uc16* data = ExternalTwoByteString::cast(source).GetChars();
CopyChars(sink, data + from, to - from);
return;
}
case kOneByteStringTag | kSeqStringTag: {
- CopyChars(sink, SeqOneByteString::cast(source)->GetChars(no_gc) + from,
+ CopyChars(sink, SeqOneByteString::cast(source).GetChars(no_gc) + from,
to - from);
return;
}
case kTwoByteStringTag | kSeqStringTag: {
- CopyChars(sink, SeqTwoByteString::cast(source)->GetChars(no_gc) + from,
+ CopyChars(sink, SeqTwoByteString::cast(source).GetChars(no_gc) + from,
to - from);
return;
}
case kOneByteStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
ConsString cons_string = ConsString::cast(source);
- String first = cons_string->first();
- int boundary = first->length();
+ String first = cons_string.first();
+ int boundary = first.length();
if (to - boundary >= boundary - from) {
// Right hand side is longer. Recurse over left.
if (from < boundary) {
WriteToFlat(first, sink, from, boundary);
- if (from == 0 && cons_string->second() == first) {
+ if (from == 0 && cons_string.second() == first) {
CopyChars(sink + boundary, sink, boundary);
return;
}
@@ -574,19 +636,19 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
from -= boundary;
}
to -= boundary;
- source = cons_string->second();
+ source = cons_string.second();
} else {
// Left hand side is longer. Recurse over right.
if (to > boundary) {
- String second = cons_string->second();
+ String second = cons_string.second();
// When repeatedly appending to a string, we get a cons string that
// is unbalanced to the left, a list, essentially. We inline the
// common case of sequential one-byte right child.
if (to - boundary == 1) {
- sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
- } else if (second->IsSeqOneByteString()) {
+ sink[boundary - from] = static_cast<sinkchar>(second.Get(0));
+ } else if (second.IsSeqOneByteString()) {
CopyChars(sink + boundary - from,
- SeqOneByteString::cast(second)->GetChars(no_gc),
+ SeqOneByteString::cast(second).GetChars(no_gc),
to - boundary);
} else {
WriteToFlat(second, sink + boundary - from, 0, to - boundary);
@@ -600,13 +662,13 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
case kOneByteStringTag | kSlicedStringTag:
case kTwoByteStringTag | kSlicedStringTag: {
SlicedString slice = SlicedString::cast(source);
- unsigned offset = slice->offset();
- WriteToFlat(slice->parent(), sink, from + offset, to + offset);
+ unsigned offset = slice.offset();
+ WriteToFlat(slice.parent(), sink, from + offset, to + offset);
return;
}
case kOneByteStringTag | kThinStringTag:
case kTwoByteStringTag | kThinStringTag:
- source = ThinString::cast(source)->actual();
+ source = ThinString::cast(source).actual();
break;
}
}
@@ -667,15 +729,15 @@ bool String::SlowEquals(String other) {
DisallowHeapAllocation no_gc;
// Fast check: negative check with lengths.
int len = length();
- if (len != other->length()) return false;
+ if (len != other.length()) return false;
if (len == 0) return true;
// Fast check: if at least one ThinString is involved, dereference it/them
// and restart.
- if (this->IsThinString() || other->IsThinString()) {
- if (other->IsThinString()) other = ThinString::cast(other)->actual();
+ if (this->IsThinString() || other.IsThinString()) {
+ if (other.IsThinString()) other = ThinString::cast(other).actual();
if (this->IsThinString()) {
- return ThinString::cast(*this)->actual()->Equals(other);
+ return ThinString::cast(*this).actual().Equals(other);
} else {
return this->Equals(other);
}
@@ -683,13 +745,13 @@ bool String::SlowEquals(String other) {
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
- if (HasHashCode() && other->HasHashCode()) {
+ if (HasHashCode() && other.HasHashCode()) {
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
- if (Hash() != other->Hash()) {
+ if (Hash() != other.Hash()) {
bool found_difference = false;
for (int i = 0; i < len; i++) {
- if (Get(i) != other->Get(i)) {
+ if (Get(i) != other.Get(i)) {
found_difference = true;
break;
}
@@ -698,16 +760,16 @@ bool String::SlowEquals(String other) {
}
}
#endif
- if (Hash() != other->Hash()) return false;
+ if (Hash() != other.Hash()) return false;
}
// We know the strings are both non-empty. Compare the first chars
// before we try to flatten the strings.
- if (this->Get(0) != other->Get(0)) return false;
+ if (this->Get(0) != other.Get(0)) return false;
- if (IsSeqOneByteString() && other->IsSeqOneByteString()) {
- const uint8_t* str1 = SeqOneByteString::cast(*this)->GetChars(no_gc);
- const uint8_t* str2 = SeqOneByteString::cast(other)->GetChars(no_gc);
+ if (IsSeqOneByteString() && other.IsSeqOneByteString()) {
+ const uint8_t* str1 = SeqOneByteString::cast(*this).GetChars(no_gc);
+ const uint8_t* str2 = SeqOneByteString::cast(other).GetChars(no_gc);
return CompareRawStringContents(str1, str2, len);
}
@@ -726,9 +788,9 @@ bool String::SlowEquals(Isolate* isolate, Handle<String> one,
// and restart.
if (one->IsThinString() || two->IsThinString()) {
if (one->IsThinString())
- one = handle(ThinString::cast(*one)->actual(), isolate);
+ one = handle(ThinString::cast(*one).actual(), isolate);
if (two->IsThinString())
- two = handle(ThinString::cast(*two)->actual(), isolate);
+ two = handle(ThinString::cast(*two).actual(), isolate);
return String::Equals(isolate, one, two);
}
@@ -764,8 +826,8 @@ bool String::SlowEquals(Isolate* isolate, Handle<String> one,
String::FlatContent flat2 = two->GetFlatContent(no_gc);
if (flat1.IsOneByte() && flat2.IsOneByte()) {
- return CompareRawStringContents(flat1.ToOneByteVector().start(),
- flat2.ToOneByteVector().start(),
+ return CompareRawStringContents(flat1.ToOneByteVector().begin(),
+ flat2.ToOneByteVector().begin(),
one_length);
} else {
for (int i = 0; i < one_length; i++) {
@@ -815,19 +877,19 @@ ComparisonResult String::Compare(Isolate* isolate, Handle<String> x,
Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
if (y_content.IsOneByte()) {
Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ r = CompareChars(x_chars.begin(), y_chars.begin(), prefix_length);
} else {
Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ r = CompareChars(x_chars.begin(), y_chars.begin(), prefix_length);
}
} else {
Vector<const uc16> x_chars = x_content.ToUC16Vector();
if (y_content.IsOneByte()) {
Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ r = CompareChars(x_chars.begin(), y_chars.begin(), prefix_length);
} else {
Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ r = CompareChars(x_chars.begin(), y_chars.begin(), prefix_length);
}
}
if (r < 0) {
@@ -1180,26 +1242,6 @@ Object String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
return Smi::FromInt(last_index);
}
-bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
- int slen = length();
- // Can't check exact length equality, but we can check bounds.
- int str_len = str.length();
- if (!allow_prefix_match &&
- (str_len < slen ||
- str_len > slen * static_cast<int>(unibrow::Utf8::kMaxEncodedSize))) {
- return false;
- }
-
- int i = 0;
- unibrow::Utf8Iterator it = unibrow::Utf8Iterator(str);
- while (i < slen && !it.Done()) {
- if (Get(i++) != *it) return false;
- ++it;
- }
-
- return (allow_prefix_match || i == slen) && it.Done();
-}
-
template <>
bool String::IsEqualTo(Vector<const uint8_t> str) {
return IsOneByteEqualTo(str);
@@ -1210,16 +1252,28 @@ bool String::IsEqualTo(Vector<const uc16> str) {
return IsTwoByteEqualTo(str);
}
+bool String::HasOneBytePrefix(Vector<const char> str) {
+ int slen = str.length();
+ if (slen > length()) return false;
+ DisallowHeapAllocation no_gc;
+ FlatContent content = GetFlatContent(no_gc);
+ if (content.IsOneByte()) {
+ return CompareChars(content.ToOneByteVector().begin(), str.begin(), slen) ==
+ 0;
+ }
+ return CompareChars(content.ToUC16Vector().begin(), str.begin(), slen) == 0;
+}
+
bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
int slen = length();
if (str.length() != slen) return false;
DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent(no_gc);
if (content.IsOneByte()) {
- return CompareChars(content.ToOneByteVector().start(), str.start(), slen) ==
+ return CompareChars(content.ToOneByteVector().begin(), str.begin(), slen) ==
0;
}
- return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
+ return CompareChars(content.ToUC16Vector().begin(), str.begin(), slen) == 0;
}
bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
@@ -1228,20 +1282,67 @@ bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent(no_gc);
if (content.IsOneByte()) {
- return CompareChars(content.ToOneByteVector().start(), str.start(), slen) ==
+ return CompareChars(content.ToOneByteVector().begin(), str.begin(), slen) ==
0;
}
- return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
+ return CompareChars(content.ToUC16Vector().begin(), str.begin(), slen) == 0;
+}
+
+namespace {
+
+template <typename Char>
+uint32_t HashString(String string, size_t start, int length, uint64_t seed) {
+ DisallowHeapAllocation no_gc;
+
+ if (length > String::kMaxHashCalcLength) {
+ return StringHasher::GetTrivialHash(length);
+ }
+
+ std::unique_ptr<Char[]> buffer;
+ const Char* chars;
+
+ if (string.IsConsString()) {
+ DCHECK_EQ(0, start);
+ DCHECK(!string.IsFlat());
+ buffer.reset(new Char[length]);
+ String::WriteToFlat(string, buffer.get(), 0, length);
+ chars = buffer.get();
+ } else {
+ chars = string.GetChars<Char>(no_gc) + start;
+ }
+
+ return StringHasher::HashSequentialString<Char>(chars, length, seed);
}
+} // namespace
+
uint32_t String::ComputeAndSetHash() {
DisallowHeapAllocation no_gc;
// Should only be called if hash code has not yet been computed.
DCHECK(!HasHashCode());
// Store the hash code in the object.
- uint32_t field =
- IteratingStringHasher::Hash(*this, HashSeed(GetReadOnlyRoots()));
+ uint64_t seed = HashSeed(GetReadOnlyRoots());
+ size_t start = 0;
+ String string = *this;
+ if (string.IsSlicedString()) {
+ SlicedString sliced = SlicedString::cast(string);
+ start = sliced.offset();
+ string = sliced.parent();
+ }
+ if (string.IsConsString() && string.IsFlat()) {
+ string = ConsString::cast(string).first();
+ }
+ if (string.IsThinString()) {
+ string = ThinString::cast(string).actual();
+ if (length() == string.length()) {
+ set_hash_field(string.hash_field());
+ return hash_field() >> kHashShift;
+ }
+ }
+ uint32_t field = string.IsOneByteRepresentation()
+ ? HashString<uint8_t>(string, start, length(), seed)
+ : HashString<uint16_t>(string, start, length(), seed);
set_hash_field(field);
// Check the hash code is there.
@@ -1325,13 +1426,13 @@ void SeqTwoByteString::clear_padding() {
SizeFor(length()) - data_size);
}
-uint16_t ConsString::ConsStringGet(int index) {
+uint16_t ConsString::Get(int index) {
DCHECK(index >= 0 && index < this->length());
// Check for a flattened cons string
- if (second()->length() == 0) {
+ if (second().length() == 0) {
String left = first();
- return left->Get(index);
+ return left.Get(index);
}
String string = String::cast(*this);
@@ -1339,26 +1440,24 @@ uint16_t ConsString::ConsStringGet(int index) {
while (true) {
if (StringShape(string).IsCons()) {
ConsString cons_string = ConsString::cast(string);
- String left = cons_string->first();
- if (left->length() > index) {
+ String left = cons_string.first();
+ if (left.length() > index) {
string = left;
} else {
- index -= left->length();
- string = cons_string->second();
+ index -= left.length();
+ string = cons_string.second();
}
} else {
- return string->Get(index);
+ return string.Get(index);
}
}
UNREACHABLE();
}
-uint16_t ThinString::ThinStringGet(int index) { return actual()->Get(index); }
+uint16_t ThinString::Get(int index) { return actual().Get(index); }
-uint16_t SlicedString::SlicedStringGet(int index) {
- return parent()->Get(offset() + index);
-}
+uint16_t SlicedString::Get(int index) { return parent().Get(offset() + index); }
int ExternalString::ExternalPayloadSize() const {
int length_multiplier = IsTwoByteRepresentation() ? i::kShortSize : kCharSize;
@@ -1375,7 +1474,7 @@ FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
str_(nullptr),
is_one_byte_(true),
length_(input.length()),
- start_(input.start()) {}
+ start_(input.begin()) {}
void FlatStringReader::PostGarbageCollection() {
if (str_ == nullptr) return;
@@ -1387,9 +1486,9 @@ void FlatStringReader::PostGarbageCollection() {
DCHECK(content.IsFlat());
is_one_byte_ = content.IsOneByte();
if (is_one_byte_) {
- start_ = content.ToOneByteVector().start();
+ start_ = content.ToOneByteVector().begin();
} else {
- start_ = content.ToUC16Vector().start();
+ start_ = content.ToUC16Vector().begin();
}
}
@@ -1430,13 +1529,13 @@ String ConsStringIterator::Search(int* offset_out) {
int offset = 0;
while (true) {
// Loop until the string is found which contains the target offset.
- String string = cons_string->first();
- int length = string->length();
+ String string = cons_string.first();
+ int length = string.length();
int32_t type;
if (consumed < offset + length) {
// Target offset is in the left branch.
// Keep going if we're still in a ConString.
- type = string->map()->instance_type();
+ type = string.map().instance_type();
if ((type & kStringRepresentationMask) == kConsStringTag) {
cons_string = ConsString::cast(string);
PushLeft(cons_string);
@@ -1449,15 +1548,15 @@ String ConsStringIterator::Search(int* offset_out) {
// Update progress through the string.
offset += length;
// Keep going if we're still in a ConString.
- string = cons_string->second();
- type = string->map()->instance_type();
+ string = cons_string.second();
+ type = string.map().instance_type();
if ((type & kStringRepresentationMask) == kConsStringTag) {
cons_string = ConsString::cast(string);
PushRight(cons_string);
continue;
}
// Need this to be updated for the current string.
- length = string->length();
+ length = string.length();
// Account for the possibility of an empty right leaf.
// This happens only if we have asked for an offset outside the string.
if (length == 0) {
@@ -1493,12 +1592,12 @@ String ConsStringIterator::NextLeaf(bool* blew_stack) {
}
// Go right.
ConsString cons_string = frames_[OffsetForDepth(depth_ - 1)];
- String string = cons_string->second();
- int32_t type = string->map()->instance_type();
+ String string = cons_string.second();
+ int32_t type = string.map().instance_type();
if ((type & kStringRepresentationMask) != kConsStringTag) {
// Pop stack so next iteration is in correct place.
Pop();
- int length = string->length();
+ int length = string.length();
// Could be a flattened ConsString.
if (length == 0) continue;
consumed_ += length;
@@ -1509,11 +1608,11 @@ String ConsStringIterator::NextLeaf(bool* blew_stack) {
// Need to traverse all the way left.
while (true) {
// Continue left.
- string = cons_string->first();
- type = string->map()->instance_type();
+ string = cons_string.first();
+ type = string.map().instance_type();
if ((type & kStringRepresentationMask) != kConsStringTag) {
AdjustMaximumDepth();
- int length = string->length();
+ int length = string.length();
if (length == 0) break; // Skip empty left-hand sides of ConsStrings.
consumed_ += length;
return string;
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 7c6616a6f6..74fc8fa763 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -10,7 +10,7 @@
#include "src/objects/instance-type.h"
#include "src/objects/name.h"
#include "src/objects/smi.h"
-#include "src/unicode-decoder.h"
+#include "src/strings/unicode-decoder.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -142,10 +142,16 @@ class String : public Name {
friend class IterableSubString;
};
+ void MakeThin(Isolate* isolate, String canonical);
+
template <typename Char>
V8_INLINE Vector<const Char> GetCharVector(
const DisallowHeapAllocation& no_gc);
+ // Get chars from sequential or external strings.
+ template <typename Char>
+ inline const Char* GetChars(const DisallowHeapAllocation& no_gc);
+
// Get and set the length of the string.
inline int length() const;
inline void set_length(int value);
@@ -268,14 +274,16 @@ class String : public Name {
inline bool Equals(String other);
inline static bool Equals(Isolate* isolate, Handle<String> one,
Handle<String> two);
- V8_EXPORT_PRIVATE bool IsUtf8EqualTo(Vector<const char> str,
- bool allow_prefix_match = false);
// Dispatches to Is{One,Two}ByteEqualTo.
template <typename Char>
bool IsEqualTo(Vector<const Char> str);
+ V8_EXPORT_PRIVATE bool HasOneBytePrefix(Vector<const char> str);
V8_EXPORT_PRIVATE bool IsOneByteEqualTo(Vector<const uint8_t> str);
+ V8_EXPORT_PRIVATE bool IsOneByteEqualTo(Vector<const char> str) {
+ return IsOneByteEqualTo(Vector<const uint8_t>::cast(str));
+ }
bool IsTwoByteEqualTo(Vector<const uc16> str);
// Return a UTF8 representation of the string. The string is null
@@ -333,8 +341,6 @@ class String : public Name {
DEFINE_FIELD_OFFSET_CONSTANTS(Name::kHeaderSize,
TORQUE_GENERATED_STRING_FIELDS)
- static const int kHeaderSize = kSize;
-
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
@@ -366,35 +372,46 @@ class String : public Name {
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
static void WriteToFlat(String source, sinkchar* sink, int from, int to);
- // The return value may point to the first aligned word containing the first
- // non-one-byte character, rather than directly to the non-one-byte character.
- // If the return value is >= the passed length, the entire string was
- // one-byte.
- static inline int NonAsciiStart(const char* chars, int length) {
- const char* start = chars;
- const char* limit = chars + length;
-
- if (length >= kIntptrSize) {
- // Check unaligned bytes.
- while (!IsAligned(reinterpret_cast<intptr_t>(chars), sizeof(uintptr_t))) {
- if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
+ static inline bool IsAscii(const char* chars, int length) {
+ return IsAscii(reinterpret_cast<const uint8_t*>(chars), length);
+ }
+
+ static inline bool IsAscii(const uint8_t* chars, int length) {
+ return NonAsciiStart(chars, length) >= length;
+ }
+
+ static inline int NonOneByteStart(const uc16* chars, int length) {
+ DCHECK(IsAligned(reinterpret_cast<Address>(chars), sizeof(uc16)));
+ const uint16_t* start = chars;
+ const uint16_t* limit = chars + length;
+
+ if (static_cast<size_t>(length) >= kUIntptrSize) {
+ // Check unaligned chars.
+ while (!IsAligned(reinterpret_cast<Address>(chars), kUIntptrSize)) {
+ if (*chars > unibrow::Latin1::kMaxChar) {
return static_cast<int>(chars - start);
}
++chars;
}
+
// Check aligned words.
- DCHECK_EQ(unibrow::Utf8::kMaxOneByteChar, 0x7F);
- const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFF * 0x80;
+ STATIC_ASSERT(unibrow::Latin1::kMaxChar == 0xFF);
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFFFF * 0xFF00;
+#else
+ const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFFFF * 0x00FF;
+#endif
while (chars + sizeof(uintptr_t) <= limit) {
if (*reinterpret_cast<const uintptr_t*>(chars) & non_one_byte_mask) {
- return static_cast<int>(chars - start);
+ break;
}
- chars += sizeof(uintptr_t);
+ chars += (sizeof(uintptr_t) / sizeof(uc16));
}
}
- // Check remaining unaligned bytes.
+
+ // Check remaining unaligned chars, or find non-one-byte char in word.
while (chars < limit) {
- if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
+ if (*chars > unibrow::Latin1::kMaxChar) {
return static_cast<int>(chars - start);
}
++chars;
@@ -403,25 +420,6 @@ class String : public Name {
return static_cast<int>(chars - start);
}
- static inline bool IsAscii(const char* chars, int length) {
- return NonAsciiStart(chars, length) >= length;
- }
-
- static inline bool IsAscii(const uint8_t* chars, int length) {
- return NonAsciiStart(reinterpret_cast<const char*>(chars), length) >=
- length;
- }
-
- static inline int NonOneByteStart(const uc16* chars, int length) {
- const uc16* limit = chars + length;
- const uc16* start = chars;
- while (chars < limit) {
- if (*chars > kMaxOneByteCharCodeU) return static_cast<int>(chars - start);
- ++chars;
- }
- return static_cast<int>(chars - start);
- }
-
static inline bool IsOneByte(const uc16* chars, int length) {
return NonOneByteStart(chars, length) >= length;
}
@@ -505,9 +503,10 @@ class InternalizedString : public String {
class SeqOneByteString : public SeqString {
public:
static const bool kHasOneByteEncoding = true;
+ using Char = uint8_t;
// Dispatched behavior.
- inline uint16_t SeqOneByteStringGet(int index);
+ inline uint8_t Get(int index);
inline void SeqOneByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
@@ -546,9 +545,10 @@ class SeqOneByteString : public SeqString {
class SeqTwoByteString : public SeqString {
public:
static const bool kHasOneByteEncoding = false;
+ using Char = uint16_t;
// Dispatched behavior.
- inline uint16_t SeqTwoByteStringGet(int index);
+ inline uint16_t Get(int index);
inline void SeqTwoByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
@@ -610,7 +610,7 @@ class ConsString : public String {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Dispatched behavior.
- V8_EXPORT_PRIVATE uint16_t ConsStringGet(int index);
+ V8_EXPORT_PRIVATE uint16_t Get(int index);
DECL_CAST(ConsString)
@@ -642,7 +642,7 @@ class ThinString : public String {
inline void set_actual(String s,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- V8_EXPORT_PRIVATE uint16_t ThinStringGet(int index);
+ V8_EXPORT_PRIVATE uint16_t Get(int index);
DECL_CAST(ThinString)
DECL_VERIFIER(ThinString)
@@ -676,7 +676,7 @@ class SlicedString : public String {
inline void set_offset(int offset);
// Dispatched behavior.
- V8_EXPORT_PRIVATE uint16_t SlicedStringGet(int index);
+ V8_EXPORT_PRIVATE uint16_t Get(int index);
DECL_CAST(SlicedString)
@@ -728,6 +728,7 @@ class ExternalString : public String {
inline void DisposeResource();
STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset);
+ static const int kSizeOfAllExternalStrings = kHeaderSize;
OBJECT_CONSTRUCTORS(ExternalString, String);
};
@@ -758,12 +759,18 @@ class ExternalOneByteString : public ExternalString {
inline const uint8_t* GetChars();
// Dispatched behavior.
- inline uint16_t ExternalOneByteStringGet(int index);
+ inline uint8_t Get(int index);
DECL_CAST(ExternalOneByteString)
class BodyDescriptor;
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ ExternalString::kHeaderSize,
+ TORQUE_GENERATED_EXTERNAL_ONE_BYTE_STRING_FIELDS)
+
+ STATIC_ASSERT(kSize == kSizeOfAllExternalStrings);
+
OBJECT_CONSTRUCTORS(ExternalOneByteString, ExternalString);
};
@@ -793,7 +800,7 @@ class ExternalTwoByteString : public ExternalString {
inline const uint16_t* GetChars();
// Dispatched behavior.
- inline uint16_t ExternalTwoByteStringGet(int index);
+ inline uint16_t Get(int index);
// For regexp code.
inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
@@ -802,6 +809,12 @@ class ExternalTwoByteString : public ExternalString {
class BodyDescriptor;
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ ExternalString::kHeaderSize,
+ TORQUE_GENERATED_EXTERNAL_TWO_BYTE_STRING_FIELDS)
+
+ STATIC_ASSERT(kSize == kSizeOfAllExternalStrings);
+
OBJECT_CONSTRUCTORS(ExternalTwoByteString, ExternalString);
};
@@ -895,6 +908,21 @@ class StringCharacterStream {
DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
};
+template <typename Char>
+struct CharTraits;
+
+template <>
+struct CharTraits<uint8_t> {
+ using String = SeqOneByteString;
+ using ExternalString = ExternalOneByteString;
+};
+
+template <>
+struct CharTraits<uint16_t> {
+ using String = SeqTwoByteString;
+ using ExternalString = ExternalTwoByteString;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/struct-inl.h b/deps/v8/src/objects/struct-inl.h
index 9502698058..47d55a876f 100644
--- a/deps/v8/src/objects/struct-inl.h
+++ b/deps/v8/src/objects/struct-inl.h
@@ -8,9 +8,10 @@
#include "src/objects/struct.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
+#include "torque-generated/class-definitions-tq-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,20 +19,14 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(Struct, HeapObject)
-// TODO(jkummerow): Fix IsTuple2() and IsTuple3() to be subclassing-aware,
-// or rethink this more generally (see crbug.com/v8/8516).
-Tuple2::Tuple2(Address ptr) : Struct(ptr) {}
-Tuple3::Tuple3(Address ptr) : Tuple2(ptr) {}
+TQ_OBJECT_CONSTRUCTORS_IMPL(Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple2)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple3)
OBJECT_CONSTRUCTORS_IMPL(AccessorPair, Struct)
OBJECT_CONSTRUCTORS_IMPL(ClassPositions, Struct)
CAST_ACCESSOR(AccessorPair)
-CAST_ACCESSOR(Struct)
-CAST_ACCESSOR(Tuple2)
-CAST_ACCESSOR(Tuple3)
-
CAST_ACCESSOR(ClassPositions)
void Struct::InitializeBody(int object_size) {
@@ -41,10 +36,6 @@ void Struct::InitializeBody(int object_size) {
}
}
-ACCESSORS(Tuple2, value1, Object, kValue1Offset)
-ACCESSORS(Tuple2, value2, Object, kValue2Offset)
-ACCESSORS(Tuple3, value3, Object, kValue3Offset)
-
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -64,8 +55,8 @@ void AccessorPair::set(AccessorComponent component, Object value) {
}
void AccessorPair::SetComponents(Object getter, Object setter) {
- if (!getter->IsNull()) set_getter(getter);
- if (!setter->IsNull()) set_setter(setter);
+ if (!getter.IsNull()) set_getter(getter);
+ if (!setter.IsNull()) set_setter(setter);
}
bool AccessorPair::Equals(Object getter_value, Object setter_value) {
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index cab41665bd..b01a33561b 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_STRUCT_H_
#define V8_OBJECTS_STRUCT_H_
-#include "src/objects.h"
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "src/objects/objects.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,47 +18,26 @@ namespace internal {
// An abstract superclass, a marker class really, for simple structure classes.
// It doesn't carry much functionality but allows struct classes to be
// identified in the type system.
-class Struct : public HeapObject {
+class Struct : public TorqueGeneratedStruct<Struct, HeapObject> {
public:
inline void InitializeBody(int object_size);
- DECL_CAST(Struct)
void BriefPrintDetails(std::ostream& os);
- OBJECT_CONSTRUCTORS(Struct, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(Struct)
};
-class Tuple2 : public Struct {
+class Tuple2 : public TorqueGeneratedTuple2<Tuple2, Struct> {
public:
- DECL_ACCESSORS(value1, Object)
- DECL_ACCESSORS(value2, Object)
-
- DECL_CAST(Tuple2)
-
- // Dispatched behavior.
- DECL_PRINTER(Tuple2)
- DECL_VERIFIER(Tuple2)
void BriefPrintDetails(std::ostream& os);
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_TUPLE2_FIELDS)
-
- OBJECT_CONSTRUCTORS(Tuple2, Struct);
+ TQ_OBJECT_CONSTRUCTORS(Tuple2)
};
-class Tuple3 : public Tuple2 {
+class Tuple3 : public TorqueGeneratedTuple3<Tuple3, Tuple2> {
public:
- DECL_ACCESSORS(value3, Object)
-
- DECL_CAST(Tuple3)
-
- // Dispatched behavior.
- DECL_PRINTER(Tuple3)
- DECL_VERIFIER(Tuple3)
void BriefPrintDetails(std::ostream& os);
- DEFINE_FIELD_OFFSET_CONSTANTS(Tuple2::kSize, TORQUE_GENERATED_TUPLE3_FIELDS)
-
- OBJECT_CONSTRUCTORS(Tuple3, Tuple2);
+ TQ_OBJECT_CONSTRUCTORS(Tuple3)
};
// Support for JavaScript accessors: A pair of a getter and a setter. Each
diff --git a/deps/v8/src/objects/tagged-impl-inl.h b/deps/v8/src/objects/tagged-impl-inl.h
new file mode 100644
index 0000000000..f735a241a8
--- /dev/null
+++ b/deps/v8/src/objects/tagged-impl-inl.h
@@ -0,0 +1,257 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_IMPL_INL_H_
+#define V8_OBJECTS_TAGGED_IMPL_INL_H_
+
+#include "src/objects/tagged-impl.h"
+
+#ifdef V8_COMPRESS_POINTERS
+#include "src/execution/isolate.h"
+#endif
+#include "src/common/ptr-compr-inl.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/smi.h"
+#include "src/roots/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::ToSmi(Smi* value) const {
+ if (HAS_SMI_TAG(ptr_)) {
+ *value = ToSmi();
+ return true;
+ }
+ return false;
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+Smi TaggedImpl<kRefType, StorageType>::ToSmi() const {
+ DCHECK(HAS_SMI_TAG(ptr_));
+ if (kIsFull) {
+ return Smi(ptr_);
+ }
+ // Implementation for compressed pointers.
+ return Smi(DecompressTaggedSigned(static_cast<Tagged_t>(ptr_)));
+}
+
+//
+// TaggedImpl::GetHeapObject(HeapObject* result) implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
+ HeapObject* result) const {
+ CHECK(kIsFull);
+ if (!IsStrongOrWeak()) return false;
+ *result = GetHeapObject();
+ return true;
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
+ ROOT_PARAM, HeapObject* result) const {
+ if (kIsFull) return GetHeapObject(result);
+ // Implementation for compressed pointers.
+ if (!IsStrongOrWeak()) return false;
+ *result = GetHeapObject(ROOT_VALUE);
+ return true;
+}
+
+//
+// TaggedImpl::GetHeapObject(HeapObject* result,
+// HeapObjectReferenceType* reference_type)
+// implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
+ HeapObject* result, HeapObjectReferenceType* reference_type) const {
+ CHECK(kIsFull);
+ if (!IsStrongOrWeak()) return false;
+ *reference_type = IsWeakOrCleared() ? HeapObjectReferenceType::WEAK
+ : HeapObjectReferenceType::STRONG;
+ *result = GetHeapObject();
+ return true;
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
+ ROOT_PARAM, HeapObject* result,
+ HeapObjectReferenceType* reference_type) const {
+ if (kIsFull) return GetHeapObject(result, reference_type);
+ // Implementation for compressed pointers.
+ if (!IsStrongOrWeak()) return false;
+ *reference_type = IsWeakOrCleared() ? HeapObjectReferenceType::WEAK
+ : HeapObjectReferenceType::STRONG;
+ *result = GetHeapObject(ROOT_VALUE);
+ return true;
+}
+
+//
+// TaggedImpl::GetHeapObjectIfStrong(HeapObject* result) implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
+ HeapObject* result) const {
+ CHECK(kIsFull);
+ if (IsStrong()) {
+ *result = HeapObject::cast(Object(ptr_));
+ return true;
+ }
+ return false;
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
+ ROOT_PARAM, HeapObject* result) const {
+ if (kIsFull) return GetHeapObjectIfStrong(result);
+ // Implementation for compressed pointers.
+ if (IsStrong()) {
+ *result =
+ HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+ return true;
+ }
+ return false;
+}
+
+//
+// TaggedImpl::GetHeapObjectAssumeStrong() implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong()
+ const {
+ CHECK(kIsFull);
+ DCHECK(IsStrong());
+ return HeapObject::cast(Object(ptr_));
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong(
+ ROOT_PARAM) const {
+ if (kIsFull) return GetHeapObjectAssumeStrong();
+ // Implementation for compressed pointers.
+ DCHECK(IsStrong());
+ return HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+}
+
+//
+// TaggedImpl::GetHeapObjectIfWeak(HeapObject* result) implementation
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfWeak(
+ HeapObject* result) const {
+ CHECK(kIsFull);
+ if (kCanBeWeak) {
+ if (IsWeak()) {
+ *result = GetHeapObject();
+ return true;
+ }
+ return false;
+ } else {
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
+ return false;
+ }
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfWeak(
+ ROOT_PARAM, HeapObject* result) const {
+ if (kIsFull) return GetHeapObjectIfWeak(result);
+ // Implementation for compressed pointers.
+ if (kCanBeWeak) {
+ if (IsWeak()) {
+ *result = GetHeapObject(ROOT_VALUE);
+ return true;
+ }
+ return false;
+ } else {
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
+ return false;
+ }
+}
+
+//
+// TaggedImpl::GetHeapObjectAssumeWeak() implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeWeak() const {
+ CHECK(kIsFull);
+ DCHECK(IsWeak());
+ return GetHeapObject();
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeWeak(
+ ROOT_PARAM) const {
+ if (kIsFull) return GetHeapObjectAssumeWeak();
+ // Implementation for compressed pointers.
+ DCHECK(IsWeak());
+ return GetHeapObject(ROOT_VALUE);
+}
+
+//
+// TaggedImpl::GetHeapObject() implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject() const {
+ CHECK(kIsFull);
+ DCHECK(!IsSmi());
+ if (kCanBeWeak) {
+ DCHECK(!IsCleared());
+ return HeapObject::cast(Object(ptr_ & ~kWeakHeapObjectMask));
+ } else {
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
+ return HeapObject::cast(Object(ptr_));
+ }
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject(ROOT_PARAM) const {
+ if (kIsFull) return GetHeapObject();
+ // Implementation for compressed pointers.
+ DCHECK(!IsSmi());
+ if (kCanBeWeak) {
+ DCHECK(!IsCleared());
+ return HeapObject::cast(Object(
+ DecompressTaggedPointer(ROOT_VALUE, ptr_ & ~kWeakHeapObjectMask)));
+ } else {
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
+ return HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+ }
+}
+
+//
+// TaggedImpl::GetHeapObjectOrSmi() implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+Object TaggedImpl<kRefType, StorageType>::GetHeapObjectOrSmi() const {
+ CHECK(kIsFull);
+ if (IsSmi()) {
+ return Object(ptr_);
+ }
+ return GetHeapObject();
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+Object TaggedImpl<kRefType, StorageType>::GetHeapObjectOrSmi(ROOT_PARAM) const {
+ if (kIsFull) return GetHeapObjectOrSmi();
+ // Implementation for compressed pointers.
+ if (IsSmi()) {
+ return Object(DecompressTaggedSigned(ptr_));
+ }
+ return GetHeapObject(ROOT_VALUE);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_IMPL_INL_H_
diff --git a/deps/v8/src/objects/tagged-impl.cc b/deps/v8/src/objects/tagged-impl.cc
new file mode 100644
index 0000000000..f50cec1e67
--- /dev/null
+++ b/deps/v8/src/objects/tagged-impl.cc
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/tagged-impl.h"
+
+#include <sstream>
+
+#include "src/objects/objects.h"
+#include "src/strings/string-stream.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+void TaggedImpl<kRefType, StorageType>::ShortPrint(FILE* out) {
+ OFStream os(out);
+ os << Brief(*this);
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+void TaggedImpl<kRefType, StorageType>::ShortPrint(StringStream* accumulator) {
+ std::ostringstream os;
+ os << Brief(*this);
+ accumulator->Add(os.str().c_str());
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+void TaggedImpl<kRefType, StorageType>::ShortPrint(std::ostream& os) {
+ os << Brief(*this);
+}
+
+// Explicit instantiation declarations.
+template class TaggedImpl<HeapObjectReferenceType::STRONG, Address>;
+template class TaggedImpl<HeapObjectReferenceType::WEAK, Address>;
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/tagged-impl.h b/deps/v8/src/objects/tagged-impl.h
new file mode 100644
index 0000000000..e3d982565f
--- /dev/null
+++ b/deps/v8/src/objects/tagged-impl.h
@@ -0,0 +1,181 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_IMPL_H_
+#define V8_OBJECTS_TAGGED_IMPL_H_
+
+#include "include/v8-internal.h"
+#include "include/v8.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// An TaggedImpl is a base class for Object (which is either a Smi or a strong
+// reference to a HeapObject) and MaybeObject (which is either a Smi, a strong
+// reference to a HeapObject, a weak reference to a HeapObject, or a cleared
+// weak reference.
+// This class provides storage and one canonical implementation of various
+// predicates that check Smi and heap object tags' values and also take into
+// account whether the tagged value is expected to be weak reference to a
+// HeapObject or cleared weak reference.
+template <HeapObjectReferenceType kRefType, typename StorageType>
+class TaggedImpl {
+ public:
+ static_assert(std::is_same<StorageType, Address>::value ||
+ std::is_same<StorageType, Tagged_t>::value,
+ "StorageType must be either Address or Tagged_t");
+
+ // True for those TaggedImpl instantiations that represent uncompressed
+ // tagged values and false for TaggedImpl instantiations that represent
+ // compressed tagged values.
+ static const bool kIsFull = sizeof(StorageType) == kSystemPointerSize;
+
+ static const bool kCanBeWeak = kRefType == HeapObjectReferenceType::WEAK;
+
+ constexpr TaggedImpl() : ptr_{} {}
+ explicit constexpr TaggedImpl(StorageType ptr) : ptr_(ptr) {}
+
+ // Make clang on Linux catch what MSVC complains about on Windows:
+ operator bool() const = delete;
+
+ constexpr bool operator==(TaggedImpl other) const {
+ return ptr_ == other.ptr_;
+ }
+ constexpr bool operator!=(TaggedImpl other) const {
+ return ptr_ != other.ptr_;
+ }
+
+ // For using in std::set and std::map.
+ constexpr bool operator<(TaggedImpl other) const {
+ return ptr_ < other.ptr();
+ }
+
+ constexpr StorageType ptr() const { return ptr_; }
+
+ // Returns true if this tagged value is a strong pointer to a HeapObject or
+ // Smi.
+ constexpr inline bool IsObject() const { return !IsWeakOrCleared(); }
+
+ // Returns true if this tagged value is a Smi.
+ constexpr bool IsSmi() const { return HAS_SMI_TAG(ptr_); }
+ inline bool ToSmi(Smi* value) const;
+ inline Smi ToSmi() const;
+
+ // Returns true if this tagged value is a strong pointer to a HeapObject.
+ constexpr inline bool IsHeapObject() const { return IsStrong(); }
+
+ // Returns true if this tagged value is a cleared weak reference.
+ constexpr inline bool IsCleared() const {
+ return kCanBeWeak &&
+ (static_cast<uint32_t>(ptr_) == kClearedWeakHeapObjectLower32);
+ }
+
+ // Returns true if this tagged value is a strong or weak pointer to a
+ // HeapObject.
+ constexpr inline bool IsStrongOrWeak() const {
+ return !IsSmi() && !IsCleared();
+ }
+
+ // Returns true if this tagged value is a strong pointer to a HeapObject.
+ constexpr inline bool IsStrong() const {
+#ifdef V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
+ DCHECK_IMPLIES(!kCanBeWeak, !IsSmi() == HAS_STRONG_HEAP_OBJECT_TAG(ptr_));
+#endif
+ return kCanBeWeak ? HAS_STRONG_HEAP_OBJECT_TAG(ptr_) : !IsSmi();
+ }
+
+ // Returns true if this tagged value is a weak pointer to a HeapObject.
+ constexpr inline bool IsWeak() const {
+ return IsWeakOrCleared() && !IsCleared();
+ }
+
+ // Returns true if this tagged value is a weak pointer to a HeapObject or
+ // cleared weak reference.
+ constexpr inline bool IsWeakOrCleared() const {
+ return kCanBeWeak && HAS_WEAK_HEAP_OBJECT_TAG(ptr_);
+ }
+
+ //
+ // The following set of methods get HeapObject out of the tagged value
+ // which may involve decompression in which case the ROOT_PARAM is required.
+ // If the pointer compression is not enabled then the variants with
+ // ROOT_PARAM will be exactly the same as non-ROOT_PARAM ones.
+ //
+
+ // If this tagged value is a strong pointer to a HeapObject, returns true and
+ // sets *result. Otherwise returns false.
+ inline bool GetHeapObjectIfStrong(HeapObject* result) const;
+ inline bool GetHeapObjectIfStrong(ROOT_PARAM, HeapObject* result) const;
+
+ // DCHECKs that this tagged value is a strong pointer to a HeapObject and
+ // returns the HeapObject.
+ inline HeapObject GetHeapObjectAssumeStrong() const;
+ inline HeapObject GetHeapObjectAssumeStrong(ROOT_PARAM) const;
+
+ // If this tagged value is a weak pointer to a HeapObject, returns true and
+ // sets *result. Otherwise returns false.
+ inline bool GetHeapObjectIfWeak(HeapObject* result) const;
+ inline bool GetHeapObjectIfWeak(ROOT_PARAM, HeapObject* result) const;
+
+ // DCHECKs that this tagged value is a weak pointer to a HeapObject and
+ // returns the HeapObject.
+ inline HeapObject GetHeapObjectAssumeWeak() const;
+ inline HeapObject GetHeapObjectAssumeWeak(ROOT_PARAM) const;
+
+ // If this tagged value is a strong or weak pointer to a HeapObject, returns
+ // true and sets *result. Otherwise returns false.
+ inline bool GetHeapObject(HeapObject* result) const;
+ inline bool GetHeapObject(ROOT_PARAM, HeapObject* result) const;
+
+ inline bool GetHeapObject(HeapObject* result,
+ HeapObjectReferenceType* reference_type) const;
+ inline bool GetHeapObject(ROOT_PARAM, HeapObject* result,
+ HeapObjectReferenceType* reference_type) const;
+
+ // DCHECKs that this tagged value is a strong or a weak pointer to a
+ // HeapObject and returns the HeapObject.
+ inline HeapObject GetHeapObject() const;
+ inline HeapObject GetHeapObject(ROOT_PARAM) const;
+
+ // DCHECKs that this tagged value is a strong or a weak pointer to a
+ // HeapObject or a Smi and returns the HeapObject or Smi.
+ inline Object GetHeapObjectOrSmi() const;
+ inline Object GetHeapObjectOrSmi(ROOT_PARAM) const;
+
+ // Cast operation is available only for full non-weak tagged values.
+ template <typename T>
+ T cast() const {
+ CHECK(kIsFull);
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
+ return T::cast(Object(ptr_));
+ }
+
+ // Prints this object without details.
+ void ShortPrint(FILE* out = stdout);
+
+ // Prints this object without details to a message accumulator.
+ void ShortPrint(StringStream* accumulator);
+
+ void ShortPrint(std::ostream& os);
+
+#ifdef OBJECT_PRINT
+ void Print();
+ void Print(std::ostream& os);
+#else
+ void Print() { ShortPrint(); }
+ void Print(std::ostream& os) { ShortPrint(os); }
+#endif
+
+ private:
+ friend class CompressedObjectSlot;
+ friend class FullObjectSlot;
+
+ StorageType ptr_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_IMPL_H_
diff --git a/deps/v8/src/objects/tagged-value-inl.h b/deps/v8/src/objects/tagged-value-inl.h
new file mode 100644
index 0000000000..5eb0e20947
--- /dev/null
+++ b/deps/v8/src/objects/tagged-value-inl.h
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_VALUE_INL_H_
+#define V8_OBJECTS_TAGGED_VALUE_INL_H_
+
+#include "src/objects/tagged-value.h"
+
+#include "include/v8-internal.h"
+#include "src/common/ptr-compr-inl.h"
+#include "src/objects/heap-object-inl.h"
+#include "src/objects/oddball.h"
+#include "src/objects/tagged-impl-inl.h"
+#include "src/roots/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Object StrongTaggedValue::ToObject(WITH_ROOT_PARAM(StrongTaggedValue object)) {
+#ifdef V8_COMPRESS_POINTERS
+ return Object(DecompressTaggedAny(ROOT_VALUE, object.ptr()));
+#else
+ return Object(object.ptr());
+#endif
+}
+
+MaybeObject TaggedValue::ToMaybeObject(WITH_ROOT_PARAM(TaggedValue object)) {
+#ifdef V8_COMPRESS_POINTERS
+ return MaybeObject(DecompressTaggedAny(ROOT_VALUE, object.ptr()));
+#else
+ return MaybeObject(object.ptr());
+#endif
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_VALUE_INL_H_
diff --git a/deps/v8/src/objects/tagged-value.h b/deps/v8/src/objects/tagged-value.h
new file mode 100644
index 0000000000..bb7609f7c3
--- /dev/null
+++ b/deps/v8/src/objects/tagged-value.h
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_VALUE_H_
+#define V8_OBJECTS_TAGGED_VALUE_H_
+
+#include "src/objects/objects.h"
+
+#include "include/v8-internal.h"
+#include "src/objects/tagged-impl.h"
+
+namespace v8 {
+namespace internal {
+
+// Almost same as Object but this one deals with in-heap and potentially
+// compressed representation of Objects and provide only limited functionality
+// which doesn't require decompression.
+class StrongTaggedValue
+ : public TaggedImpl<HeapObjectReferenceType::STRONG, Tagged_t> {
+ public:
+ constexpr StrongTaggedValue() : TaggedImpl() {}
+ explicit constexpr StrongTaggedValue(Tagged_t ptr) : TaggedImpl(ptr) {}
+
+ inline static Object ToObject(WITH_ROOT_PARAM(StrongTaggedValue object));
+};
+
+// Almost same as MaybeObject but this one deals with in-heap and potentially
+// compressed representation of Objects and provide only limited functionality
+// which doesn't require decompression.
+class TaggedValue : public TaggedImpl<HeapObjectReferenceType::WEAK, Tagged_t> {
+ public:
+ constexpr TaggedValue() : TaggedImpl() {}
+ explicit constexpr TaggedValue(Tagged_t ptr) : TaggedImpl(ptr) {}
+
+ inline static MaybeObject ToMaybeObject(WITH_ROOT_PARAM(TaggedValue object));
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_VALUE_H_
diff --git a/deps/v8/src/objects/template-objects-inl.h b/deps/v8/src/objects/template-objects-inl.h
index cc6c096265..85c1e6c8f4 100644
--- a/deps/v8/src/objects/template-objects-inl.h
+++ b/deps/v8/src/objects/template-objects-inl.h
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription, Tuple2)
+OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription, Struct)
OBJECT_CONSTRUCTORS_IMPL(CachedTemplateObject, Tuple3)
CAST_ACCESSOR(TemplateObjectDescription)
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index 448d54fb9d..2f34a48a2a 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -5,11 +5,11 @@
#include "src/objects/template-objects.h"
#include "src/base/functional.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/objects/template-objects-inl.h"
-#include "src/property-descriptor.h"
namespace v8 {
namespace internal {
@@ -23,20 +23,20 @@ Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
// Check the template weakmap to see if the template object already exists.
Handle<EphemeronHashTable> template_weakmap =
- native_context->template_weakmap()->IsUndefined(isolate)
+ native_context->template_weakmap().IsUndefined(isolate)
? EphemeronHashTable::New(isolate, 0)
: handle(EphemeronHashTable::cast(native_context->template_weakmap()),
isolate);
uint32_t hash = shared_info->Hash();
Object maybe_cached_template = template_weakmap->Lookup(shared_info, hash);
- while (!maybe_cached_template->IsTheHole()) {
+ while (!maybe_cached_template.IsTheHole()) {
CachedTemplateObject cached_template =
CachedTemplateObject::cast(maybe_cached_template);
- if (cached_template->slot_id() == slot_id)
- return handle(cached_template->template_object(), isolate);
+ if (cached_template.slot_id() == slot_id)
+ return handle(cached_template.template_object(), isolate);
- maybe_cached_template = cached_template->next();
+ maybe_cached_template = cached_template.next();
}
// Create the raw object from the {raw_strings}.
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index e99c8530e6..220f9dab1e 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -39,22 +39,25 @@ class CachedTemplateObject final : public Tuple3 {
// TemplateObjectDescription is a tuple of raw strings and cooked strings for
// tagged template literals. Used to communicate with the runtime for template
// object creation within the {Runtime_GetTemplateObject} method.
-class TemplateObjectDescription final : public Tuple2 {
+class TemplateObjectDescription final : public Struct {
public:
DECL_ACCESSORS(raw_strings, FixedArray)
DECL_ACCESSORS(cooked_strings, FixedArray)
+ DECL_CAST(TemplateObjectDescription)
+
static Handle<JSArray> GetTemplateObject(
Isolate* isolate, Handle<Context> native_context,
Handle<TemplateObjectDescription> description,
Handle<SharedFunctionInfo> shared_info, int slot_id);
- DECL_CAST(TemplateObjectDescription)
+ DECL_PRINTER(TemplateObjectDescription)
+ DECL_VERIFIER(TemplateObjectDescription)
- static constexpr int kRawStringsOffset = kValue1Offset;
- static constexpr int kCookedStringsOffset = kValue2Offset;
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ Struct::kHeaderSize, TORQUE_GENERATED_TEMPLATE_OBJECT_DESCRIPTION_FIELDS)
- OBJECT_CONSTRUCTORS(TemplateObjectDescription, Tuple2);
+ OBJECT_CONSTRUCTORS(TemplateObjectDescription, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index 19739be91a..a1a098ffc0 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -40,8 +40,6 @@ ACCESSORS(FunctionTemplateInfo, rare_data, HeapObject,
ACCESSORS(FunctionTemplateInfo, cached_property_name, Object,
kCachedPropertyNameOffset)
SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
- kHiddenPrototypeBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
kNeedsAccessCheckBit)
@@ -58,26 +56,26 @@ SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
HeapObject extra = function_template_info->rare_data();
- if (extra->IsUndefined(isolate)) {
+ if (extra.IsUndefined(isolate)) {
return AllocateFunctionTemplateRareData(isolate, function_template_info);
} else {
return FunctionTemplateRareData::cast(extra);
}
}
-#define RARE_ACCESSORS(Name, CamelName, Type) \
- Type FunctionTemplateInfo::Get##CamelName() { \
- HeapObject extra = rare_data(); \
- HeapObject undefined = GetReadOnlyRoots().undefined_value(); \
- return extra == undefined ? undefined \
- : FunctionTemplateRareData::cast(extra)->Name(); \
- } \
- inline void FunctionTemplateInfo::Set##CamelName( \
- Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info, \
- Handle<Type> Name) { \
- FunctionTemplateRareData rare_data = \
- EnsureFunctionTemplateRareData(isolate, function_template_info); \
- rare_data->set_##Name(*Name); \
+#define RARE_ACCESSORS(Name, CamelName, Type) \
+ Type FunctionTemplateInfo::Get##CamelName() { \
+ HeapObject extra = rare_data(); \
+ HeapObject undefined = GetReadOnlyRoots().undefined_value(); \
+ return extra == undefined ? undefined \
+ : FunctionTemplateRareData::cast(extra).Name(); \
+ } \
+ inline void FunctionTemplateInfo::Set##CamelName( \
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info, \
+ Handle<Type> Name) { \
+ FunctionTemplateRareData rare_data = \
+ EnsureFunctionTemplateRareData(isolate, function_template_info); \
+ rare_data.set_##Name(*Name); \
}
RARE_ACCESSORS(prototype_template, PrototypeTemplate, Object)
@@ -116,33 +114,33 @@ CAST_ACCESSOR(FunctionTemplateRareData)
CAST_ACCESSOR(ObjectTemplateInfo)
bool FunctionTemplateInfo::instantiated() {
- return shared_function_info()->IsSharedFunctionInfo();
+ return shared_function_info().IsSharedFunctionInfo();
}
bool FunctionTemplateInfo::BreakAtEntry() {
Object maybe_shared = shared_function_info();
- if (maybe_shared->IsSharedFunctionInfo()) {
+ if (maybe_shared.IsSharedFunctionInfo()) {
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
- return shared->BreakAtEntry();
+ return shared.BreakAtEntry();
}
return false;
}
FunctionTemplateInfo FunctionTemplateInfo::GetParent(Isolate* isolate) {
Object parent = GetParentTemplate();
- return parent->IsUndefined(isolate) ? FunctionTemplateInfo()
- : FunctionTemplateInfo::cast(parent);
+ return parent.IsUndefined(isolate) ? FunctionTemplateInfo()
+ : FunctionTemplateInfo::cast(parent);
}
ObjectTemplateInfo ObjectTemplateInfo::GetParent(Isolate* isolate) {
Object maybe_ctor = constructor();
- if (maybe_ctor->IsUndefined(isolate)) return ObjectTemplateInfo();
+ if (maybe_ctor.IsUndefined(isolate)) return ObjectTemplateInfo();
FunctionTemplateInfo constructor = FunctionTemplateInfo::cast(maybe_ctor);
while (true) {
- constructor = constructor->GetParent(isolate);
+ constructor = constructor.GetParent(isolate);
if (constructor.is_null()) return ObjectTemplateInfo();
- Object maybe_obj = constructor->GetInstanceTemplate();
- if (!maybe_obj->IsUndefined(isolate)) {
+ Object maybe_obj = constructor.GetInstanceTemplate();
+ if (!maybe_obj.IsUndefined(isolate)) {
return ObjectTemplateInfo::cast(maybe_obj);
}
}
@@ -151,7 +149,7 @@ ObjectTemplateInfo ObjectTemplateInfo::GetParent(Isolate* isolate) {
int ObjectTemplateInfo::embedder_field_count() const {
Object value = data();
- DCHECK(value->IsSmi());
+ DCHECK(value.IsSmi());
return EmbedderFieldCount::decode(Smi::ToInt(value));
}
@@ -163,7 +161,7 @@ void ObjectTemplateInfo::set_embedder_field_count(int count) {
bool ObjectTemplateInfo::immutable_proto() const {
Object value = data();
- DCHECK(value->IsSmi());
+ DCHECK(value.IsSmi());
return IsImmutablePrototype::decode(Smi::ToInt(value));
}
@@ -173,7 +171,7 @@ void ObjectTemplateInfo::set_immutable_proto(bool immutable) {
}
bool FunctionTemplateInfo::IsTemplateFor(JSObject object) {
- return IsTemplateFor(object->map());
+ return IsTemplateFor(object.map());
}
} // namespace internal
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index bd55821c7d..66cd038114 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -139,7 +139,6 @@ class FunctionTemplateInfo : public TemplateInfo {
DECL_ACCESSORS(cached_property_name, Object)
// Begin flag bits ---------------------
- DECL_BOOLEAN_ACCESSORS(hidden_prototype)
DECL_BOOLEAN_ACCESSORS(undetectable)
// If set, object instances created by this function
@@ -169,7 +168,7 @@ class FunctionTemplateInfo : public TemplateInfo {
static const int kInvalidSerialNumber = 0;
- DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kSize,
+ DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kHeaderSize,
TORQUE_GENERATED_FUNCTION_TEMPLATE_INFO_FIELDS)
static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
@@ -189,13 +188,12 @@ class FunctionTemplateInfo : public TemplateInfo {
Handle<Object> getter);
// Bit position in the flag, from least significant bit position.
- static const int kHiddenPrototypeBit = 0;
- static const int kUndetectableBit = 1;
- static const int kNeedsAccessCheckBit = 2;
- static const int kReadOnlyPrototypeBit = 3;
- static const int kRemovePrototypeBit = 4;
- static const int kDoNotCacheBit = 5;
- static const int kAcceptAnyReceiver = 6;
+ static const int kUndetectableBit = 0;
+ static const int kNeedsAccessCheckBit = 1;
+ static const int kReadOnlyPrototypeBit = 2;
+ static const int kRemovePrototypeBit = 3;
+ static const int kDoNotCacheBit = 4;
+ static const int kAcceptAnyReceiver = 5;
private:
static inline FunctionTemplateRareData EnsureFunctionTemplateRareData(
@@ -221,7 +219,7 @@ class ObjectTemplateInfo : public TemplateInfo {
DECL_VERIFIER(ObjectTemplateInfo)
// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kSize,
+ DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kHeaderSize,
TORQUE_GENERATED_OBJECT_TEMPLATE_INFO_FIELDS)
// Starting from given object template's constructor walk up the inheritance
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
new file mode 100644
index 0000000000..893de78dc4
--- /dev/null
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -0,0 +1,321 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TRANSITIONS_INL_H_
+#define V8_OBJECTS_TRANSITIONS_INL_H_
+
+#include "src/objects/transitions.h"
+
+#include "src/ic/handler-configuration-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/maybe-object-inl.h"
+#include "src/objects/slots.h"
+#include "src/objects/smi.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TransitionArray TransitionsAccessor::transitions() {
+ DCHECK_EQ(kFullTransitionArray, encoding());
+ return TransitionArray::cast(raw_transitions_->GetHeapObjectAssumeStrong());
+}
+
+OBJECT_CONSTRUCTORS_IMPL(TransitionArray, WeakFixedArray)
+
+CAST_ACCESSOR(TransitionArray)
+
+bool TransitionArray::HasPrototypeTransitions() {
+ return Get(kPrototypeTransitionsIndex) != MaybeObject::FromSmi(Smi::zero());
+}
+
+WeakFixedArray TransitionArray::GetPrototypeTransitions() {
+ DCHECK(HasPrototypeTransitions()); // Callers must check first.
+ Object prototype_transitions =
+ Get(kPrototypeTransitionsIndex)->GetHeapObjectAssumeStrong();
+ return WeakFixedArray::cast(prototype_transitions);
+}
+
+HeapObjectSlot TransitionArray::GetKeySlot(int transition_number) {
+ DCHECK(transition_number < number_of_transitions());
+ return HeapObjectSlot(RawFieldOfElementAt(ToKeyIndex(transition_number)));
+}
+
+void TransitionArray::SetPrototypeTransitions(WeakFixedArray transitions) {
+ DCHECK(transitions.IsWeakFixedArray());
+ WeakFixedArray::Set(kPrototypeTransitionsIndex,
+ HeapObjectReference::Strong(transitions));
+}
+
+int TransitionArray::NumberOfPrototypeTransitions(
+ WeakFixedArray proto_transitions) {
+ if (proto_transitions.length() == 0) return 0;
+ MaybeObject raw =
+ proto_transitions.Get(kProtoTransitionNumberOfEntriesOffset);
+ return raw.ToSmi().value();
+}
+
+Name TransitionArray::GetKey(int transition_number) {
+ DCHECK(transition_number < number_of_transitions());
+ return Name::cast(
+ Get(ToKeyIndex(transition_number))->GetHeapObjectAssumeStrong());
+}
+
+Name TransitionsAccessor::GetKey(int transition_number) {
+ switch (encoding()) {
+ case kPrototypeInfo:
+ case kUninitialized:
+ case kMigrationTarget:
+ UNREACHABLE();
+ return Name();
+ case kWeakRef: {
+ Map map = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ return GetSimpleTransitionKey(map);
+ }
+ case kFullTransitionArray:
+ return transitions().GetKey(transition_number);
+ }
+ UNREACHABLE();
+}
+
+void TransitionArray::SetKey(int transition_number, Name key) {
+ DCHECK(transition_number < number_of_transitions());
+ WeakFixedArray::Set(ToKeyIndex(transition_number),
+ HeapObjectReference::Strong(key));
+}
+
+HeapObjectSlot TransitionArray::GetTargetSlot(int transition_number) {
+ DCHECK(transition_number < number_of_transitions());
+ return HeapObjectSlot(RawFieldOfElementAt(ToTargetIndex(transition_number)));
+}
+
+// static
+PropertyDetails TransitionsAccessor::GetTargetDetails(Name name, Map target) {
+ DCHECK(!IsSpecialTransition(name.GetReadOnlyRoots(), name));
+ int descriptor = target.LastAdded();
+ DescriptorArray descriptors = target.instance_descriptors();
+ // Transitions are allowed only for the last added property.
+ DCHECK(descriptors.GetKey(descriptor).Equals(name));
+ return descriptors.GetDetails(descriptor);
+}
+
+// static
+PropertyDetails TransitionsAccessor::GetSimpleTargetDetails(Map transition) {
+ return transition.GetLastDescriptorDetails();
+}
+
+// static
+Name TransitionsAccessor::GetSimpleTransitionKey(Map transition) {
+ int descriptor = transition.LastAdded();
+ return transition.instance_descriptors().GetKey(descriptor);
+}
+
+// static
+Map TransitionsAccessor::GetTargetFromRaw(MaybeObject raw) {
+ return Map::cast(raw->GetHeapObjectAssumeWeak());
+}
+
+MaybeObject TransitionArray::GetRawTarget(int transition_number) {
+ DCHECK(transition_number < number_of_transitions());
+ return Get(ToTargetIndex(transition_number));
+}
+
+Map TransitionArray::GetTarget(int transition_number) {
+ MaybeObject raw = GetRawTarget(transition_number);
+ return TransitionsAccessor::GetTargetFromRaw(raw);
+}
+
+Map TransitionsAccessor::GetTarget(int transition_number) {
+ switch (encoding()) {
+ case kPrototypeInfo:
+ case kUninitialized:
+ case kMigrationTarget:
+ UNREACHABLE();
+ return Map();
+ case kWeakRef:
+ return Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ case kFullTransitionArray:
+ return transitions().GetTarget(transition_number);
+ }
+ UNREACHABLE();
+}
+
+void TransitionArray::SetRawTarget(int transition_number, MaybeObject value) {
+ DCHECK(transition_number < number_of_transitions());
+ DCHECK(value->IsWeak());
+ DCHECK(value->GetHeapObjectAssumeWeak().IsMap());
+ WeakFixedArray::Set(ToTargetIndex(transition_number), value);
+}
+
+bool TransitionArray::GetTargetIfExists(int transition_number, Isolate* isolate,
+ Map* target) {
+ MaybeObject raw = GetRawTarget(transition_number);
+ HeapObject heap_object;
+ if (raw->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object.IsUndefined(isolate)) {
+ return false;
+ }
+ *target = TransitionsAccessor::GetTargetFromRaw(raw);
+ return true;
+}
+
+int TransitionArray::SearchNameForTesting(Name name, int* out_insertion_index) {
+ return SearchName(name, out_insertion_index);
+}
+
+int TransitionArray::SearchSpecial(Symbol symbol, int* out_insertion_index) {
+ return SearchName(symbol, out_insertion_index);
+}
+
+int TransitionArray::SearchName(Name name, int* out_insertion_index) {
+ DCHECK(name.IsUniqueName());
+ return internal::Search<ALL_ENTRIES>(this, name, number_of_entries(),
+ out_insertion_index);
+}
+
+TransitionsAccessor::TransitionsAccessor(Isolate* isolate, Map map,
+ DisallowHeapAllocation* no_gc)
+ : isolate_(isolate), map_(map) {
+ Initialize();
+ USE(no_gc);
+}
+
+TransitionsAccessor::TransitionsAccessor(Isolate* isolate, Handle<Map> map)
+ : isolate_(isolate), map_handle_(map), map_(*map) {
+ Initialize();
+}
+
+void TransitionsAccessor::Reload() {
+ DCHECK(!map_handle_.is_null());
+ map_ = *map_handle_;
+ Initialize();
+}
+
+void TransitionsAccessor::Initialize() {
+ raw_transitions_ = map_.raw_transitions();
+ HeapObject heap_object;
+ if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
+ encoding_ = kUninitialized;
+ } else if (raw_transitions_->IsWeak()) {
+ encoding_ = kWeakRef;
+ } else if (raw_transitions_->GetHeapObjectIfStrong(&heap_object)) {
+ if (heap_object.IsTransitionArray()) {
+ encoding_ = kFullTransitionArray;
+ } else if (heap_object.IsPrototypeInfo()) {
+ encoding_ = kPrototypeInfo;
+ } else {
+ DCHECK(map_.is_deprecated());
+ DCHECK(heap_object.IsMap());
+ encoding_ = kMigrationTarget;
+ }
+ } else {
+ UNREACHABLE();
+ }
+#if DEBUG
+ needs_reload_ = false;
+#endif
+}
+
+int TransitionArray::number_of_transitions() const {
+ if (length() < kFirstIndex) return 0;
+ return Get(kTransitionLengthIndex).ToSmi().value();
+}
+
+int TransitionArray::CompareKeys(Name key1, uint32_t hash1, PropertyKind kind1,
+ PropertyAttributes attributes1, Name key2,
+ uint32_t hash2, PropertyKind kind2,
+ PropertyAttributes attributes2) {
+ int cmp = CompareNames(key1, hash1, key2, hash2);
+ if (cmp != 0) return cmp;
+
+ return CompareDetails(kind1, attributes1, kind2, attributes2);
+}
+
+int TransitionArray::CompareNames(Name key1, uint32_t hash1, Name key2,
+ uint32_t hash2) {
+ if (key1 != key2) {
+ // In case of hash collisions key1 is always "less" than key2.
+ return hash1 <= hash2 ? -1 : 1;
+ }
+
+ return 0;
+}
+
+int TransitionArray::CompareDetails(PropertyKind kind1,
+ PropertyAttributes attributes1,
+ PropertyKind kind2,
+ PropertyAttributes attributes2) {
+ if (kind1 != kind2) {
+ return static_cast<int>(kind1) < static_cast<int>(kind2) ? -1 : 1;
+ }
+
+ if (attributes1 != attributes2) {
+ return static_cast<int>(attributes1) < static_cast<int>(attributes2) ? -1
+ : 1;
+ }
+
+ return 0;
+}
+
+void TransitionArray::Set(int transition_number, Name key, MaybeObject target) {
+ WeakFixedArray::Set(ToKeyIndex(transition_number),
+ MaybeObject::FromObject(key));
+ WeakFixedArray::Set(ToTargetIndex(transition_number), target);
+}
+
+Name TransitionArray::GetSortedKey(int transition_number) {
+ return GetKey(transition_number);
+}
+
+int TransitionArray::number_of_entries() const {
+ return number_of_transitions();
+}
+
+int TransitionArray::Capacity() {
+ if (length() <= kFirstIndex) return 0;
+ return (length() - kFirstIndex) / kEntrySize;
+}
+
+void TransitionArray::SetNumberOfTransitions(int number_of_transitions) {
+ DCHECK(number_of_transitions <= Capacity());
+ WeakFixedArray::Set(
+ kTransitionLengthIndex,
+ MaybeObject::FromSmi(Smi::FromInt(number_of_transitions)));
+}
+
+Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
+ DisallowHeapAllocation no_gc;
+ switch (encoding()) {
+ case kPrototypeInfo:
+ case kUninitialized:
+ case kMigrationTarget:
+ case kFullTransitionArray:
+ return Handle<String>::null();
+ case kWeakRef: {
+ Map target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ PropertyDetails details = GetSimpleTargetDetails(target);
+ if (details.location() != kField) return Handle<String>::null();
+ DCHECK_EQ(kData, details.kind());
+ if (details.attributes() != NONE) return Handle<String>::null();
+ Name name = GetSimpleTransitionKey(target);
+ if (!name.IsString()) return Handle<String>::null();
+ return handle(String::cast(name), isolate_);
+ }
+ }
+ UNREACHABLE();
+}
+
+Handle<Map> TransitionsAccessor::ExpectedTransitionTarget() {
+ DCHECK(!ExpectedTransitionKey().is_null());
+ return handle(GetTarget(0), isolate_);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TRANSITIONS_INL_H_
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
new file mode 100644
index 0000000000..a2cd102aaf
--- /dev/null
+++ b/deps/v8/src/objects/transitions.cc
@@ -0,0 +1,657 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/transitions.h"
+
+#include "src/objects/objects-inl.h"
+#include "src/objects/transitions-inl.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+Map TransitionsAccessor::GetSimpleTransition() {
+ switch (encoding()) {
+ case kWeakRef:
+ return Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ default:
+ return Map();
+ }
+}
+
+bool TransitionsAccessor::HasSimpleTransitionTo(Map map) {
+ switch (encoding()) {
+ case kWeakRef:
+ return raw_transitions_->GetHeapObjectAssumeWeak() == map;
+ case kPrototypeInfo:
+ case kUninitialized:
+ case kMigrationTarget:
+ case kFullTransitionArray:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
+ SimpleTransitionFlag flag) {
+ DCHECK(!map_handle_.is_null());
+ target->SetBackPointer(map_);
+
+ // If the map doesn't have any transitions at all yet, install the new one.
+ if (encoding() == kUninitialized || encoding() == kMigrationTarget) {
+ if (flag == SIMPLE_PROPERTY_TRANSITION) {
+ ReplaceTransitions(HeapObjectReference::Weak(*target));
+ return;
+ }
+ // If the flag requires a full TransitionArray, allocate one.
+ Handle<TransitionArray> result =
+ isolate_->factory()->NewTransitionArray(0, 1);
+ ReplaceTransitions(MaybeObject::FromObject(*result));
+ Reload();
+ }
+
+ bool is_special_transition = flag == SPECIAL_TRANSITION;
+ // If the map has a simple transition, check if it should be overwritten.
+ Map simple_transition = GetSimpleTransition();
+ if (!simple_transition.is_null()) {
+ Name key = GetSimpleTransitionKey(simple_transition);
+ PropertyDetails old_details = GetSimpleTargetDetails(simple_transition);
+ PropertyDetails new_details = is_special_transition
+ ? PropertyDetails::Empty()
+ : GetTargetDetails(*name, *target);
+ if (flag == SIMPLE_PROPERTY_TRANSITION && key.Equals(*name) &&
+ old_details.kind() == new_details.kind() &&
+ old_details.attributes() == new_details.attributes()) {
+ ReplaceTransitions(HeapObjectReference::Weak(*target));
+ return;
+ }
+ // Otherwise allocate a full TransitionArray with slack for a new entry.
+ Handle<Map> map(simple_transition, isolate_);
+ Handle<TransitionArray> result =
+ isolate_->factory()->NewTransitionArray(1, 1);
+ // Reload state; allocations might have caused it to be cleared.
+ Reload();
+ simple_transition = GetSimpleTransition();
+ if (!simple_transition.is_null()) {
+ DCHECK_EQ(*map, simple_transition);
+ if (encoding_ == kWeakRef) {
+ result->Set(0, GetSimpleTransitionKey(simple_transition),
+ HeapObjectReference::Weak(simple_transition));
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ result->SetNumberOfTransitions(0);
+ }
+ ReplaceTransitions(MaybeObject::FromObject(*result));
+ Reload();
+ }
+
+ // At this point, we know that the map has a full TransitionArray.
+ DCHECK_EQ(kFullTransitionArray, encoding());
+
+ int number_of_transitions = 0;
+ int new_nof = 0;
+ int insertion_index = kNotFound;
+ DCHECK_EQ(is_special_transition,
+ IsSpecialTransition(ReadOnlyRoots(isolate_), *name));
+ PropertyDetails details = is_special_transition
+ ? PropertyDetails::Empty()
+ : GetTargetDetails(*name, *target);
+
+ {
+ DisallowHeapAllocation no_gc;
+ TransitionArray array = transitions();
+ number_of_transitions = array.number_of_transitions();
+ new_nof = number_of_transitions;
+
+ int index = is_special_transition
+ ? array.SearchSpecial(Symbol::cast(*name), &insertion_index)
+ : array.Search(details.kind(), *name, details.attributes(),
+ &insertion_index);
+ // If an existing entry was found, overwrite it and return.
+ if (index != kNotFound) {
+ array.SetRawTarget(index, HeapObjectReference::Weak(*target));
+ return;
+ }
+
+ ++new_nof;
+ CHECK_LE(new_nof, kMaxNumberOfTransitions);
+ DCHECK(insertion_index >= 0 && insertion_index <= number_of_transitions);
+
+ // If there is enough capacity, insert new entry into the existing array.
+ if (new_nof <= array.Capacity()) {
+ array.SetNumberOfTransitions(new_nof);
+ for (index = number_of_transitions; index > insertion_index; --index) {
+ array.SetKey(index, array.GetKey(index - 1));
+ array.SetRawTarget(index, array.GetRawTarget(index - 1));
+ }
+ array.SetKey(index, *name);
+ array.SetRawTarget(index, HeapObjectReference::Weak(*target));
+ SLOW_DCHECK(array.IsSortedNoDuplicates());
+ return;
+ }
+ }
+
+ // We're gonna need a bigger TransitionArray.
+ Handle<TransitionArray> result = isolate_->factory()->NewTransitionArray(
+ new_nof,
+ Map::SlackForArraySize(number_of_transitions, kMaxNumberOfTransitions));
+
+ // The map's transition array may have shrunk during the allocation above as
+ // it was weakly traversed, though it is guaranteed not to disappear. Trim the
+ // result copy if needed, and recompute variables.
+ Reload();
+ DisallowHeapAllocation no_gc;
+ TransitionArray array = transitions();
+ if (array.number_of_transitions() != number_of_transitions) {
+ DCHECK(array.number_of_transitions() < number_of_transitions);
+
+ number_of_transitions = array.number_of_transitions();
+ new_nof = number_of_transitions;
+
+ insertion_index = kNotFound;
+ int index = is_special_transition
+ ? array.SearchSpecial(Symbol::cast(*name), &insertion_index)
+ : array.Search(details.kind(), *name, details.attributes(),
+ &insertion_index);
+ if (index == kNotFound) {
+ ++new_nof;
+ } else {
+ insertion_index = index;
+ }
+ DCHECK(insertion_index >= 0 && insertion_index <= number_of_transitions);
+
+ result->SetNumberOfTransitions(new_nof);
+ }
+
+ if (array.HasPrototypeTransitions()) {
+ result->SetPrototypeTransitions(array.GetPrototypeTransitions());
+ }
+
+ DCHECK_NE(kNotFound, insertion_index);
+ for (int i = 0; i < insertion_index; ++i) {
+ result->Set(i, array.GetKey(i), array.GetRawTarget(i));
+ }
+ result->Set(insertion_index, *name, HeapObjectReference::Weak(*target));
+ for (int i = insertion_index; i < number_of_transitions; ++i) {
+ result->Set(i + 1, array.GetKey(i), array.GetRawTarget(i));
+ }
+
+ SLOW_DCHECK(result->IsSortedNoDuplicates());
+ ReplaceTransitions(MaybeObject::FromObject(*result));
+}
+
+Map TransitionsAccessor::SearchTransition(Name name, PropertyKind kind,
+ PropertyAttributes attributes) {
+ DCHECK(name.IsUniqueName());
+ switch (encoding()) {
+ case kPrototypeInfo:
+ case kUninitialized:
+ case kMigrationTarget:
+ return Map();
+ case kWeakRef: {
+ Map map = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ if (!IsMatchingMap(map, name, kind, attributes)) return Map();
+ return map;
+ }
+ case kFullTransitionArray: {
+ return transitions().SearchAndGetTarget(kind, name, attributes);
+ }
+ }
+ UNREACHABLE();
+}
+
+Map TransitionsAccessor::SearchSpecial(Symbol name) {
+ if (encoding() != kFullTransitionArray) return Map();
+ int transition = transitions().SearchSpecial(name);
+ if (transition == kNotFound) return Map();
+ return transitions().GetTarget(transition);
+}
+
+// static
+bool TransitionsAccessor::IsSpecialTransition(ReadOnlyRoots roots, Name name) {
+ if (!name.IsSymbol()) return false;
+ return name == roots.nonextensible_symbol() ||
+ name == roots.sealed_symbol() || name == roots.frozen_symbol() ||
+ name == roots.elements_transition_symbol() ||
+ name == roots.strict_function_transition_symbol();
+}
+
+MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
+ Handle<Name> name, RequestedLocation requested_location) {
+ DCHECK(name->IsUniqueName());
+ DisallowHeapAllocation no_gc;
+ PropertyAttributes attributes = name->IsPrivate() ? DONT_ENUM : NONE;
+ Map target = SearchTransition(*name, kData, attributes);
+ if (target.is_null()) return MaybeHandle<Map>();
+ PropertyDetails details = target.GetLastDescriptorDetails();
+ DCHECK_EQ(attributes, details.attributes());
+ DCHECK_EQ(kData, details.kind());
+ if (requested_location == kFieldOnly && details.location() != kField) {
+ return MaybeHandle<Map>();
+ }
+ return Handle<Map>(target, isolate_);
+}
+
+bool TransitionsAccessor::CanHaveMoreTransitions() {
+ if (map_.is_dictionary_map()) return false;
+ if (encoding() == kFullTransitionArray) {
+ return transitions().number_of_transitions() < kMaxNumberOfTransitions;
+ }
+ return true;
+}
+
+// static
+bool TransitionsAccessor::IsMatchingMap(Map target, Name name,
+ PropertyKind kind,
+ PropertyAttributes attributes) {
+ int descriptor = target.LastAdded();
+ DescriptorArray descriptors = target.instance_descriptors();
+ Name key = descriptors.GetKey(descriptor);
+ if (key != name) return false;
+ return descriptors.GetDetails(descriptor)
+ .HasKindAndAttributes(kind, attributes);
+}
+
+// static
+bool TransitionArray::CompactPrototypeTransitionArray(Isolate* isolate,
+ WeakFixedArray array) {
+ const int header = kProtoTransitionHeaderSize;
+ int number_of_transitions = NumberOfPrototypeTransitions(array);
+ if (number_of_transitions == 0) {
+ // Empty array cannot be compacted.
+ return false;
+ }
+ int new_number_of_transitions = 0;
+ for (int i = 0; i < number_of_transitions; i++) {
+ MaybeObject target = array.Get(header + i);
+ DCHECK(target->IsCleared() ||
+ (target->IsWeak() && target->GetHeapObject().IsMap()));
+ if (!target->IsCleared()) {
+ if (new_number_of_transitions != i) {
+ array.Set(header + new_number_of_transitions, target);
+ }
+ new_number_of_transitions++;
+ }
+ }
+ // Fill slots that became free with undefined value.
+ MaybeObject undefined =
+ MaybeObject::FromObject(*isolate->factory()->undefined_value());
+ for (int i = new_number_of_transitions; i < number_of_transitions; i++) {
+ array.Set(header + i, undefined);
+ }
+ if (number_of_transitions != new_number_of_transitions) {
+ SetNumberOfPrototypeTransitions(array, new_number_of_transitions);
+ }
+ return new_number_of_transitions < number_of_transitions;
+}
+
+// static
+Handle<WeakFixedArray> TransitionArray::GrowPrototypeTransitionArray(
+ Handle<WeakFixedArray> array, int new_capacity, Isolate* isolate) {
+ // Grow array by factor 2 up to MaxCachedPrototypeTransitions.
+ int capacity = array->length() - kProtoTransitionHeaderSize;
+ new_capacity = Min(kMaxCachedPrototypeTransitions, new_capacity);
+ DCHECK_GT(new_capacity, capacity);
+ int grow_by = new_capacity - capacity;
+ array = isolate->factory()->CopyWeakFixedArrayAndGrow(array, grow_by,
+ AllocationType::kOld);
+ if (capacity < 0) {
+ // There was no prototype transitions array before, so the size
+ // couldn't be copied. Initialize it explicitly.
+ SetNumberOfPrototypeTransitions(*array, 0);
+ }
+ return array;
+}
+
+void TransitionsAccessor::PutPrototypeTransition(Handle<Object> prototype,
+ Handle<Map> target_map) {
+ DCHECK(HeapObject::cast(*prototype).map().IsMap());
+ // Don't cache prototype transition if this map is either shared, or a map of
+ // a prototype.
+ if (map_.is_prototype_map()) return;
+ if (map_.is_dictionary_map() || !FLAG_cache_prototype_transitions) return;
+
+ const int header = TransitionArray::kProtoTransitionHeaderSize;
+
+ Handle<WeakFixedArray> cache(GetPrototypeTransitions(), isolate_);
+ int capacity = cache->length() - header;
+ int transitions = TransitionArray::NumberOfPrototypeTransitions(*cache) + 1;
+
+ if (transitions > capacity) {
+ // Grow the array if compacting it doesn't free space.
+ if (!TransitionArray::CompactPrototypeTransitionArray(isolate_, *cache)) {
+ if (capacity == TransitionArray::kMaxCachedPrototypeTransitions) return;
+ cache = TransitionArray::GrowPrototypeTransitionArray(
+ cache, 2 * transitions, isolate_);
+ Reload();
+ SetPrototypeTransitions(cache);
+ }
+ }
+
+ // Reload number of transitions as they might have been compacted.
+ int last = TransitionArray::NumberOfPrototypeTransitions(*cache);
+ int entry = header + last;
+
+ cache->Set(entry, HeapObjectReference::Weak(*target_map));
+ TransitionArray::SetNumberOfPrototypeTransitions(*cache, last + 1);
+}
+
+Handle<Map> TransitionsAccessor::GetPrototypeTransition(
+ Handle<Object> prototype) {
+ DisallowHeapAllocation no_gc;
+ WeakFixedArray cache = GetPrototypeTransitions();
+ int length = TransitionArray::NumberOfPrototypeTransitions(cache);
+ for (int i = 0; i < length; i++) {
+ MaybeObject target =
+ cache.Get(TransitionArray::kProtoTransitionHeaderSize + i);
+ DCHECK(target->IsWeakOrCleared());
+ HeapObject heap_object;
+ if (target->GetHeapObjectIfWeak(&heap_object)) {
+ Map map = Map::cast(heap_object);
+ if (map.prototype() == *prototype) {
+ return handle(map, isolate_);
+ }
+ }
+ }
+ return Handle<Map>();
+}
+
+WeakFixedArray TransitionsAccessor::GetPrototypeTransitions() {
+ if (encoding() != kFullTransitionArray ||
+ !transitions().HasPrototypeTransitions()) {
+ return ReadOnlyRoots(isolate_).empty_weak_fixed_array();
+ }
+ return transitions().GetPrototypeTransitions();
+}
+
+// static
+void TransitionArray::SetNumberOfPrototypeTransitions(
+ WeakFixedArray proto_transitions, int value) {
+ DCHECK_NE(proto_transitions.length(), 0);
+ proto_transitions.Set(kProtoTransitionNumberOfEntriesOffset,
+ MaybeObject::FromSmi(Smi::FromInt(value)));
+}
+
+int TransitionsAccessor::NumberOfTransitions() {
+ switch (encoding()) {
+ case kPrototypeInfo:
+ case kUninitialized:
+ case kMigrationTarget:
+ return 0;
+ case kWeakRef:
+ return 1;
+ case kFullTransitionArray:
+ return transitions().number_of_transitions();
+ }
+ UNREACHABLE();
+ return 0; // Make GCC happy.
+}
+
+void TransitionsAccessor::SetMigrationTarget(Map migration_target) {
+ // We only cache the migration target for maps with empty transitions for GC's
+ // sake.
+ if (encoding() != kUninitialized) return;
+ DCHECK(map_.is_deprecated());
+ map_.set_raw_transitions(MaybeObject::FromObject(migration_target));
+ MarkNeedsReload();
+}
+
+Map TransitionsAccessor::GetMigrationTarget() {
+ if (encoding() == kMigrationTarget) {
+ return map_.raw_transitions()->cast<Map>();
+ }
+ return Map();
+}
+
+void TransitionArray::Zap(Isolate* isolate) {
+ MemsetTagged(ObjectSlot(RawFieldOfElementAt(kPrototypeTransitionsIndex)),
+ ReadOnlyRoots(isolate).the_hole_value(),
+ length() - kPrototypeTransitionsIndex);
+ SetNumberOfTransitions(0);
+}
+
+void TransitionsAccessor::ReplaceTransitions(MaybeObject new_transitions) {
+ if (encoding() == kFullTransitionArray) {
+ TransitionArray old_transitions = transitions();
+#if DEBUG
+ CheckNewTransitionsAreConsistent(
+ old_transitions, new_transitions->GetHeapObjectAssumeStrong());
+ DCHECK(old_transitions != new_transitions->GetHeapObjectAssumeStrong());
+#endif
+ // Transition arrays are not shared. When one is replaced, it should not
+ // keep referenced objects alive, so we zap it.
+ // When there is another reference to the array somewhere (e.g. a handle),
+ // not zapping turns from a waste of memory into a source of crashes.
+ old_transitions.Zap(isolate_);
+ }
+ map_.set_raw_transitions(new_transitions);
+ MarkNeedsReload();
+}
+
+void TransitionsAccessor::SetPrototypeTransitions(
+ Handle<WeakFixedArray> proto_transitions) {
+ EnsureHasFullTransitionArray();
+ transitions().SetPrototypeTransitions(*proto_transitions);
+}
+
+void TransitionsAccessor::EnsureHasFullTransitionArray() {
+ if (encoding() == kFullTransitionArray) return;
+ int nof =
+ (encoding() == kUninitialized || encoding() == kMigrationTarget) ? 0 : 1;
+ Handle<TransitionArray> result = isolate_->factory()->NewTransitionArray(nof);
+ Reload(); // Reload after possible GC.
+ if (nof == 1) {
+ if (encoding() == kUninitialized) {
+ // If allocation caused GC and cleared the target, trim the new array.
+ result->SetNumberOfTransitions(0);
+ } else {
+ // Otherwise populate the new array.
+ Handle<Map> target(GetSimpleTransition(), isolate_);
+ Name key = GetSimpleTransitionKey(*target);
+ result->Set(0, key, HeapObjectReference::Weak(*target));
+ }
+ }
+ ReplaceTransitions(MaybeObject::FromObject(*result));
+ Reload(); // Reload after replacing transitions.
+}
+
+void TransitionsAccessor::TraverseTransitionTreeInternal(
+ TraverseCallback callback, void* data, DisallowHeapAllocation* no_gc) {
+ switch (encoding()) {
+ case kPrototypeInfo:
+ case kUninitialized:
+ case kMigrationTarget:
+ break;
+ case kWeakRef: {
+ Map simple_target =
+ Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ TransitionsAccessor(isolate_, simple_target, no_gc)
+ .TraverseTransitionTreeInternal(callback, data, no_gc);
+ break;
+ }
+ case kFullTransitionArray: {
+ if (transitions().HasPrototypeTransitions()) {
+ WeakFixedArray proto_trans = transitions().GetPrototypeTransitions();
+ int length = TransitionArray::NumberOfPrototypeTransitions(proto_trans);
+ for (int i = 0; i < length; ++i) {
+ int index = TransitionArray::kProtoTransitionHeaderSize + i;
+ MaybeObject target = proto_trans.Get(index);
+ HeapObject heap_object;
+ if (target->GetHeapObjectIfWeak(&heap_object)) {
+ TransitionsAccessor(isolate_, Map::cast(heap_object), no_gc)
+ .TraverseTransitionTreeInternal(callback, data, no_gc);
+ } else {
+ DCHECK(target->IsCleared());
+ }
+ }
+ }
+ for (int i = 0; i < transitions().number_of_transitions(); ++i) {
+ TransitionsAccessor(isolate_, transitions().GetTarget(i), no_gc)
+ .TraverseTransitionTreeInternal(callback, data, no_gc);
+ }
+ break;
+ }
+ }
+ callback(map_, data);
+}
+
+#ifdef DEBUG
+void TransitionsAccessor::CheckNewTransitionsAreConsistent(
+ TransitionArray old_transitions, Object transitions) {
+ // This function only handles full transition arrays.
+ DCHECK_EQ(kFullTransitionArray, encoding());
+ TransitionArray new_transitions = TransitionArray::cast(transitions);
+ for (int i = 0; i < old_transitions.number_of_transitions(); i++) {
+ Map target = old_transitions.GetTarget(i);
+ if (target.instance_descriptors() == map_.instance_descriptors()) {
+ Name key = old_transitions.GetKey(i);
+ int new_target_index;
+ if (IsSpecialTransition(ReadOnlyRoots(isolate_), key)) {
+ new_target_index = new_transitions.SearchSpecial(Symbol::cast(key));
+ } else {
+ PropertyDetails details = GetTargetDetails(key, target);
+ new_target_index =
+ new_transitions.Search(details.kind(), key, details.attributes());
+ }
+ DCHECK_NE(TransitionArray::kNotFound, new_target_index);
+ DCHECK_EQ(target, new_transitions.GetTarget(new_target_index));
+ }
+ }
+}
+#endif
+
+// Private non-static helper functions (operating on full transition arrays).
+
+int TransitionArray::SearchDetails(int transition, PropertyKind kind,
+ PropertyAttributes attributes,
+ int* out_insertion_index) {
+ int nof_transitions = number_of_transitions();
+ DCHECK(transition < nof_transitions);
+ Name key = GetKey(transition);
+ for (; transition < nof_transitions && GetKey(transition) == key;
+ transition++) {
+ Map target = GetTarget(transition);
+ PropertyDetails target_details =
+ TransitionsAccessor::GetTargetDetails(key, target);
+
+ int cmp = CompareDetails(kind, attributes, target_details.kind(),
+ target_details.attributes());
+ if (cmp == 0) {
+ return transition;
+ } else if (cmp < 0) {
+ break;
+ }
+ }
+ if (out_insertion_index != nullptr) *out_insertion_index = transition;
+ return kNotFound;
+}
+
+Map TransitionArray::SearchDetailsAndGetTarget(int transition,
+ PropertyKind kind,
+ PropertyAttributes attributes) {
+ int nof_transitions = number_of_transitions();
+ DCHECK(transition < nof_transitions);
+ Name key = GetKey(transition);
+ for (; transition < nof_transitions && GetKey(transition) == key;
+ transition++) {
+ Map target = GetTarget(transition);
+ PropertyDetails target_details =
+ TransitionsAccessor::GetTargetDetails(key, target);
+
+ int cmp = CompareDetails(kind, attributes, target_details.kind(),
+ target_details.attributes());
+ if (cmp == 0) {
+ return target;
+ } else if (cmp < 0) {
+ break;
+ }
+ }
+ return Map();
+}
+
+int TransitionArray::Search(PropertyKind kind, Name name,
+ PropertyAttributes attributes,
+ int* out_insertion_index) {
+ int transition = SearchName(name, out_insertion_index);
+ if (transition == kNotFound) return kNotFound;
+ return SearchDetails(transition, kind, attributes, out_insertion_index);
+}
+
+Map TransitionArray::SearchAndGetTarget(PropertyKind kind, Name name,
+ PropertyAttributes attributes) {
+ int transition = SearchName(name, nullptr);
+ if (transition == kNotFound) {
+ return Map();
+ }
+ return SearchDetailsAndGetTarget(transition, kind, attributes);
+}
+
+void TransitionArray::Sort() {
+ DisallowHeapAllocation no_gc;
+ // In-place insertion sort.
+ int length = number_of_transitions();
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ for (int i = 1; i < length; i++) {
+ Name key = GetKey(i);
+ MaybeObject target = GetRawTarget(i);
+ PropertyKind kind = kData;
+ PropertyAttributes attributes = NONE;
+ if (!TransitionsAccessor::IsSpecialTransition(roots, key)) {
+ Map target_map = TransitionsAccessor::GetTargetFromRaw(target);
+ PropertyDetails details =
+ TransitionsAccessor::GetTargetDetails(key, target_map);
+ kind = details.kind();
+ attributes = details.attributes();
+ }
+ int j;
+ for (j = i - 1; j >= 0; j--) {
+ Name temp_key = GetKey(j);
+ MaybeObject temp_target = GetRawTarget(j);
+ PropertyKind temp_kind = kData;
+ PropertyAttributes temp_attributes = NONE;
+ if (!TransitionsAccessor::IsSpecialTransition(roots, temp_key)) {
+ Map temp_target_map =
+ TransitionsAccessor::GetTargetFromRaw(temp_target);
+ PropertyDetails details =
+ TransitionsAccessor::GetTargetDetails(temp_key, temp_target_map);
+ temp_kind = details.kind();
+ temp_attributes = details.attributes();
+ }
+ int cmp = CompareKeys(temp_key, temp_key.Hash(), temp_kind,
+ temp_attributes, key, key.Hash(), kind, attributes);
+ if (cmp > 0) {
+ SetKey(j + 1, temp_key);
+ SetRawTarget(j + 1, temp_target);
+ } else {
+ break;
+ }
+ }
+ SetKey(j + 1, key);
+ SetRawTarget(j + 1, target);
+ }
+ DCHECK(IsSortedNoDuplicates());
+}
+
+bool TransitionsAccessor::HasIntegrityLevelTransitionTo(
+ Map to, Symbol* out_symbol, PropertyAttributes* out_integrity_level) {
+ ReadOnlyRoots roots(isolate_);
+ if (SearchSpecial(roots.frozen_symbol()) == to) {
+ if (out_integrity_level) *out_integrity_level = FROZEN;
+ if (out_symbol) *out_symbol = roots.frozen_symbol();
+ } else if (SearchSpecial(roots.sealed_symbol()) == to) {
+ if (out_integrity_level) *out_integrity_level = SEALED;
+ if (out_symbol) *out_symbol = roots.sealed_symbol();
+ } else if (SearchSpecial(roots.nonextensible_symbol()) == to) {
+ if (out_integrity_level) *out_integrity_level = NONE;
+ if (out_symbol) *out_symbol = roots.nonextensible_symbol();
+ } else {
+ return false;
+ }
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/transitions.h b/deps/v8/src/objects/transitions.h
new file mode 100644
index 0000000000..b4dadcc22a
--- /dev/null
+++ b/deps/v8/src/objects/transitions.h
@@ -0,0 +1,350 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TRANSITIONS_H_
+#define V8_OBJECTS_TRANSITIONS_H_
+
+#include "src/common/checks.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/map.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/name.h"
+#include "src/objects/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// TransitionsAccessor is a helper class to encapsulate access to the various
+// ways a Map can store transitions to other maps in its respective field at
+// Map::kTransitionsOrPrototypeInfo.
+// It caches state information internally, which becomes stale when a Map's
+// transitions storage changes or when a GC cycle clears dead transitions;
+// so while a TransitionsAccessor instance can be used for several read-only
+// operations in a row (provided no GC happens between them), it must be
+// discarded and recreated after "Insert" and "UpdateHandler" operations.
+//
+// Internal details: a Map's field either holds an in-place weak reference to a
+// transition target, or a StoreIC handler for a transitioning store (which in
+// turn points to its target map), or a TransitionArray for several target maps
+// and/or handlers as well as prototype and ElementsKind transitions. Property
+// details (and in case of inline target storage, the key) are retrieved from
+// the target map's descriptor array. Stored transitions are weak in the GC
+// sense: both single transitions stored inline and TransitionArray fields are
+// cleared when the map they refer to is not otherwise reachable.
+class V8_EXPORT_PRIVATE TransitionsAccessor {
+ public:
+ inline TransitionsAccessor(Isolate* isolate, Map map,
+ DisallowHeapAllocation* no_gc);
+ inline TransitionsAccessor(Isolate* isolate, Handle<Map> map);
+ // Insert a new transition into |map|'s transition array, extending it
+ // as necessary.
+ // Requires the constructor that takes a Handle<Map> to have been used.
+ // This TransitionsAccessor instance is unusable after this operation.
+ void Insert(Handle<Name> name, Handle<Map> target, SimpleTransitionFlag flag);
+
+ Map SearchTransition(Name name, PropertyKind kind,
+ PropertyAttributes attributes);
+
+ Map SearchSpecial(Symbol name);
+ // Returns true for non-property transitions like elements kind, or
+ // or frozen/sealed transitions.
+ static bool IsSpecialTransition(ReadOnlyRoots roots, Name name);
+
+ enum RequestedLocation { kAnyLocation, kFieldOnly };
+ MaybeHandle<Map> FindTransitionToDataProperty(
+ Handle<Name> name, RequestedLocation requested_location = kAnyLocation);
+
+ MaybeHandle<Map> FindTransitionToField(Handle<Name> name) {
+ return FindTransitionToDataProperty(name, kFieldOnly);
+ }
+
+ inline Handle<String> ExpectedTransitionKey();
+ inline Handle<Map> ExpectedTransitionTarget();
+
+ int NumberOfTransitions();
+ // The size of transition arrays are limited so they do not end up in large
+ // object space. Otherwise ClearNonLiveReferences would leak memory while
+ // applying in-place right trimming.
+ static const int kMaxNumberOfTransitions = 1024 + 512;
+ bool CanHaveMoreTransitions();
+ inline Name GetKey(int transition_number);
+ inline Map GetTarget(int transition_number);
+ static inline PropertyDetails GetTargetDetails(Name name, Map target);
+
+ static bool IsMatchingMap(Map target, Name name, PropertyKind kind,
+ PropertyAttributes attributes);
+
+ bool HasIntegrityLevelTransitionTo(
+ Map to, Symbol* out_symbol = nullptr,
+ PropertyAttributes* out_integrity_level = nullptr);
+
+ // ===== ITERATION =====
+ using TraverseCallback = void (*)(Map map, void* data);
+
+ // Traverse the transition tree in postorder.
+ void TraverseTransitionTree(TraverseCallback callback, void* data) {
+ // Make sure that we do not allocate in the callback.
+ DisallowHeapAllocation no_allocation;
+ TraverseTransitionTreeInternal(callback, data, &no_allocation);
+ }
+
+ // ===== PROTOTYPE TRANSITIONS =====
+ // When you set the prototype of an object using the __proto__ accessor you
+ // need a new map for the object (the prototype is stored in the map). In
+ // order not to multiply maps unnecessarily we store these as transitions in
+ // the original map. That way we can transition to the same map if the same
+ // prototype is set, rather than creating a new map every time. The
+ // transitions are in the form of a map where the keys are prototype objects
+ // and the values are the maps they transition to.
+ void PutPrototypeTransition(Handle<Object> prototype, Handle<Map> target_map);
+ Handle<Map> GetPrototypeTransition(Handle<Object> prototype);
+
+ // During the first-time Map::Update and Map::TryUpdate, the migration target
+ // map could be cached in the raw_transitions slot of the old map that is
+ // deprecated from the map transition tree. The next time old map is updated,
+ // we will check this cache slot as a shortcut to get the migration target
+ // map.
+ void SetMigrationTarget(Map migration_target);
+ Map GetMigrationTarget();
+
+#if DEBUG || OBJECT_PRINT
+ void PrintTransitions(std::ostream& os);
+ static void PrintOneTransition(std::ostream& os, Name key, Map target);
+ void PrintTransitionTree();
+ void PrintTransitionTree(std::ostream& os, int level,
+ DisallowHeapAllocation* no_gc);
+#endif
+#if DEBUG
+ void CheckNewTransitionsAreConsistent(TransitionArray old_transitions,
+ Object transitions);
+ bool IsConsistentWithBackPointers();
+ bool IsSortedNoDuplicates();
+#endif
+
+ protected:
+ // Allow tests to use inheritance to access internals.
+ enum Encoding {
+ kPrototypeInfo,
+ kUninitialized,
+ kMigrationTarget,
+ kWeakRef,
+ kFullTransitionArray,
+ };
+
+ inline void Reload();
+
+ inline Encoding encoding() {
+ DCHECK(!needs_reload_);
+ return encoding_;
+ }
+
+ private:
+ friend class MarkCompactCollector; // For HasSimpleTransitionTo.
+ friend class TransitionArray;
+
+ static inline PropertyDetails GetSimpleTargetDetails(Map transition);
+
+ static inline Name GetSimpleTransitionKey(Map transition);
+
+ static inline Map GetTargetFromRaw(MaybeObject raw);
+
+ void MarkNeedsReload() {
+#if DEBUG
+ needs_reload_ = true;
+#endif
+ }
+
+ inline void Initialize();
+
+ inline Map GetSimpleTransition();
+ bool HasSimpleTransitionTo(Map map);
+
+ void ReplaceTransitions(MaybeObject new_transitions);
+
+ inline Map GetTargetMapFromWeakRef();
+
+ void EnsureHasFullTransitionArray();
+ void SetPrototypeTransitions(Handle<WeakFixedArray> proto_transitions);
+ WeakFixedArray GetPrototypeTransitions();
+
+ void TraverseTransitionTreeInternal(TraverseCallback callback, void* data,
+ DisallowHeapAllocation* no_gc);
+
+ inline TransitionArray transitions();
+
+ Isolate* isolate_;
+ Handle<Map> map_handle_;
+ Map map_;
+ MaybeObject raw_transitions_;
+ Encoding encoding_;
+#if DEBUG
+ bool needs_reload_;
+#endif
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionsAccessor);
+};
+
+// TransitionArrays are fixed arrays used to hold map transitions for property,
+// constant, and element changes.
+// The TransitionArray class exposes a very low-level interface. Most clients
+// should use TransitionsAccessors.
+// TransitionArrays have the following format:
+// [0] Link to next TransitionArray (for weak handling support) (strong ref)
+// [1] Smi(0) or WeakFixedArray of prototype transitions (strong ref)
+// [2] Number of transitions (can be zero after trimming)
+// [3] First transition key (strong ref)
+// [4] First transition target (weak ref)
+// ...
+// [3 + number of transitions * kTransitionSize]: start of slack
+class TransitionArray : public WeakFixedArray {
+ public:
+ DECL_CAST(TransitionArray)
+
+ inline WeakFixedArray GetPrototypeTransitions();
+ inline bool HasPrototypeTransitions();
+
+ // Accessors for fetching instance transition at transition number.
+ inline void SetKey(int transition_number, Name value);
+ inline Name GetKey(int transition_number);
+ inline HeapObjectSlot GetKeySlot(int transition_number);
+
+ inline Map GetTarget(int transition_number);
+ inline void SetRawTarget(int transition_number, MaybeObject target);
+ inline MaybeObject GetRawTarget(int transition_number);
+ inline HeapObjectSlot GetTargetSlot(int transition_number);
+ inline bool GetTargetIfExists(int transition_number, Isolate* isolate,
+ Map* target);
+
+ // Required for templatized Search interface.
+ static constexpr int kNotFound = -1;
+
+ inline Name GetSortedKey(int transition_number);
+ int GetSortedKeyIndex(int transition_number) { return transition_number; }
+ inline int number_of_entries() const;
+#ifdef DEBUG
+ V8_EXPORT_PRIVATE bool IsSortedNoDuplicates(int valid_entries = -1);
+#endif
+
+ void Sort();
+
+ void PrintInternal(std::ostream& os);
+
+ DECL_PRINTER(TransitionArray)
+ DECL_VERIFIER(TransitionArray)
+
+ // Layout for full transition arrays.
+ static const int kPrototypeTransitionsIndex = 0;
+ static const int kTransitionLengthIndex = 1;
+ static const int kFirstIndex = 2;
+
+ // Layout of map transition entries in full transition arrays.
+ static const int kEntryKeyIndex = 0;
+ static const int kEntryTargetIndex = 1;
+ static const int kEntrySize = 2;
+
+ // Conversion from transition number to array indices.
+ static int ToKeyIndex(int transition_number) {
+ return kFirstIndex + (transition_number * kEntrySize) + kEntryKeyIndex;
+ }
+
+ static int ToTargetIndex(int transition_number) {
+ return kFirstIndex + (transition_number * kEntrySize) + kEntryTargetIndex;
+ }
+
+ inline int SearchNameForTesting(Name name,
+ int* out_insertion_index = nullptr);
+
+ private:
+ friend class Factory;
+ friend class MarkCompactCollector;
+ friend class TransitionsAccessor;
+
+ inline void SetNumberOfTransitions(int number_of_transitions);
+
+ inline int Capacity();
+
+ // ===== PROTOTYPE TRANSITIONS =====
+ // Cache format:
+ // 0: finger - index of the first free cell in the cache
+ // 1 + i: target map
+ static const int kProtoTransitionHeaderSize = 1;
+ static const int kMaxCachedPrototypeTransitions = 256;
+
+ inline void SetPrototypeTransitions(WeakFixedArray prototype_transitions);
+
+ static inline int NumberOfPrototypeTransitions(
+ WeakFixedArray proto_transitions);
+ static void SetNumberOfPrototypeTransitions(WeakFixedArray proto_transitions,
+ int value);
+
+ static const int kProtoTransitionNumberOfEntriesOffset = 0;
+ STATIC_ASSERT(kProtoTransitionHeaderSize == 1);
+
+ // Returns the fixed array length required to hold number_of_transitions
+ // transitions.
+ static int LengthFor(int number_of_transitions) {
+ return ToKeyIndex(number_of_transitions);
+ }
+
+ // Search a transition for a given kind, property name and attributes.
+ int Search(PropertyKind kind, Name name, PropertyAttributes attributes,
+ int* out_insertion_index = nullptr);
+
+ Map SearchAndGetTarget(PropertyKind kind, Name name,
+ PropertyAttributes attributes);
+
+ // Search a non-property transition (like elements kind, observe or frozen
+ // transitions).
+ inline int SearchSpecial(Symbol symbol, int* out_insertion_index = nullptr);
+ // Search a first transition for a given property name.
+ inline int SearchName(Name name, int* out_insertion_index = nullptr);
+ int SearchDetails(int transition, PropertyKind kind,
+ PropertyAttributes attributes, int* out_insertion_index);
+ Map SearchDetailsAndGetTarget(int transition, PropertyKind kind,
+ PropertyAttributes attributes);
+
+ inline int number_of_transitions() const;
+
+ static bool CompactPrototypeTransitionArray(Isolate* isolate,
+ WeakFixedArray array);
+
+ static Handle<WeakFixedArray> GrowPrototypeTransitionArray(
+ Handle<WeakFixedArray> array, int new_capacity, Isolate* isolate);
+
+ // Compares two tuples <key, kind, attributes>, returns -1 if
+ // tuple1 is "less" than tuple2, 0 if tuple1 equal to tuple2 and 1 otherwise.
+ static inline int CompareKeys(Name key1, uint32_t hash1, PropertyKind kind1,
+ PropertyAttributes attributes1, Name key2,
+ uint32_t hash2, PropertyKind kind2,
+ PropertyAttributes attributes2);
+
+ // Compares keys, returns -1 if key1 is "less" than key2,
+ // 0 if key1 equal to key2 and 1 otherwise.
+ static inline int CompareNames(Name key1, uint32_t hash1, Name key2,
+ uint32_t hash2);
+
+ // Compares two details, returns -1 if details1 is "less" than details2,
+ // 0 if details1 equal to details2 and 1 otherwise.
+ static inline int CompareDetails(PropertyKind kind1,
+ PropertyAttributes attributes1,
+ PropertyKind kind2,
+ PropertyAttributes attributes2);
+
+ inline void Set(int transition_number, Name key, MaybeObject target);
+
+ void Zap(Isolate* isolate);
+
+ OBJECT_CONSTRUCTORS(TransitionArray, WeakFixedArray);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TRANSITIONS_H_
diff --git a/deps/v8/src/objects/type-hints.cc b/deps/v8/src/objects/type-hints.cc
new file mode 100644
index 0000000000..cb0a6a4ea9
--- /dev/null
+++ b/deps/v8/src/objects/type-hints.cc
@@ -0,0 +1,89 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/type-hints.h"
+
+namespace v8 {
+namespace internal {
+
+std::ostream& operator<<(std::ostream& os, BinaryOperationHint hint) {
+ switch (hint) {
+ case BinaryOperationHint::kNone:
+ return os << "None";
+ case BinaryOperationHint::kSignedSmall:
+ return os << "SignedSmall";
+ case BinaryOperationHint::kSignedSmallInputs:
+ return os << "SignedSmallInputs";
+ case BinaryOperationHint::kSigned32:
+ return os << "Signed32";
+ case BinaryOperationHint::kNumber:
+ return os << "Number";
+ case BinaryOperationHint::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ case BinaryOperationHint::kString:
+ return os << "String";
+ case BinaryOperationHint::kBigInt:
+ return os << "BigInt";
+ case BinaryOperationHint::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+}
+
+std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
+ switch (hint) {
+ case CompareOperationHint::kNone:
+ return os << "None";
+ case CompareOperationHint::kSignedSmall:
+ return os << "SignedSmall";
+ case CompareOperationHint::kNumber:
+ return os << "Number";
+ case CompareOperationHint::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ case CompareOperationHint::kInternalizedString:
+ return os << "InternalizedString";
+ case CompareOperationHint::kString:
+ return os << "String";
+ case CompareOperationHint::kSymbol:
+ return os << "Symbol";
+ case CompareOperationHint::kBigInt:
+ return os << "BigInt";
+ case CompareOperationHint::kReceiver:
+ return os << "Receiver";
+ case CompareOperationHint::kReceiverOrNullOrUndefined:
+ return os << "ReceiverOrNullOrUndefined";
+ case CompareOperationHint::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+}
+
+std::ostream& operator<<(std::ostream& os, ForInHint hint) {
+ switch (hint) {
+ case ForInHint::kNone:
+ return os << "None";
+ case ForInHint::kEnumCacheKeys:
+ return os << "EnumCacheKeys";
+ case ForInHint::kEnumCacheKeysAndIndices:
+ return os << "EnumCacheKeysAndIndices";
+ case ForInHint::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+}
+
+std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
+ switch (flags) {
+ case STRING_ADD_CHECK_NONE:
+ return os << "CheckNone";
+ case STRING_ADD_CONVERT_LEFT:
+ return os << "ConvertLeft";
+ case STRING_ADD_CONVERT_RIGHT:
+ return os << "ConvertRight";
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/type-hints.h b/deps/v8/src/objects/type-hints.h
new file mode 100644
index 0000000000..1aa2709665
--- /dev/null
+++ b/deps/v8/src/objects/type-hints.h
@@ -0,0 +1,77 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TYPE_HINTS_H_
+#define V8_OBJECTS_TYPE_HINTS_H_
+
+#include "src/base/flags.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Type hints for an binary operation.
+enum class BinaryOperationHint : uint8_t {
+ kNone,
+ kSignedSmall,
+ kSignedSmallInputs,
+ kSigned32,
+ kNumber,
+ kNumberOrOddball,
+ kString,
+ kBigInt,
+ kAny
+};
+
+inline size_t hash_value(BinaryOperationHint hint) {
+ return static_cast<unsigned>(hint);
+}
+
+std::ostream& operator<<(std::ostream&, BinaryOperationHint);
+
+// Type hints for an compare operation.
+enum class CompareOperationHint : uint8_t {
+ kNone,
+ kSignedSmall,
+ kNumber,
+ kNumberOrOddball,
+ kInternalizedString,
+ kString,
+ kSymbol,
+ kBigInt,
+ kReceiver,
+ kReceiverOrNullOrUndefined,
+ kAny
+};
+
+inline size_t hash_value(CompareOperationHint hint) {
+ return static_cast<unsigned>(hint);
+}
+
+std::ostream& operator<<(std::ostream&, CompareOperationHint);
+
+// Type hints for for..in statements.
+enum class ForInHint : uint8_t {
+ kNone,
+ kEnumCacheKeysAndIndices,
+ kEnumCacheKeys,
+ kAny
+};
+
+std::ostream& operator<<(std::ostream&, ForInHint);
+
+enum StringAddFlags {
+ // Omit both parameter checks.
+ STRING_ADD_CHECK_NONE,
+ // Convert parameters when check fails (instead of throwing an exception).
+ STRING_ADD_CONVERT_LEFT,
+ STRING_ADD_CONVERT_RIGHT,
+};
+
+std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TYPE_HINTS_H_
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
new file mode 100644
index 0000000000..331a12b157
--- /dev/null
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -0,0 +1,2213 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/value-serializer.h"
+
+#include <type_traits>
+
+#include "include/v8-value-serializer-version.h"
+#include "src/api/api-inl.h"
+#include "src/base/logging.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/maybe-handles-inl.h"
+#include "src/heap/factory.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/oddball-inl.h"
+#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/smi.h"
+#include "src/objects/transitions-inl.h"
+#include "src/snapshot/code-serializer.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-result.h"
+#include "src/wasm/wasm-serialization.h"
+
+namespace v8 {
+namespace internal {
+
+// Version 9: (imported from Blink)
+// Version 10: one-byte (Latin-1) strings
+// Version 11: properly separate undefined from the hole in arrays
+// Version 12: regexp and string objects share normal string encoding
+// Version 13: host objects have an explicit tag (rather than handling all
+// unknown tags)
+//
+// WARNING: Increasing this value is a change which cannot safely be rolled
+// back without breaking compatibility with data stored on disk. It is
+// strongly recommended that you do not make such changes near a release
+// milestone branch point.
+//
+// Recent changes are routinely reverted in preparation for branch, and this
+// has been the cause of at least one bug in the past.
+static const uint32_t kLatestVersion = 13;
+static_assert(kLatestVersion == v8::CurrentValueSerializerFormatVersion(),
+ "Exported format version must match latest version.");
+
+static const int kPretenureThreshold = 100 * KB;
+
+template <typename T>
+static size_t BytesNeededForVarint(T value) {
+ static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
+ "Only unsigned integer types can be written as varints.");
+ size_t result = 0;
+ do {
+ result++;
+ value >>= 7;
+ } while (value);
+ return result;
+}
+
+// Note that some additional tag values are defined in Blink's
+// Source/bindings/core/v8/serialization/SerializationTag.h, which must
+// not clash with values defined here.
+enum class SerializationTag : uint8_t {
+ // version:uint32_t (if at beginning of data, sets version > 0)
+ kVersion = 0xFF,
+ // ignore
+ kPadding = '\0',
+ // refTableSize:uint32_t (previously used for sanity checks; safe to ignore)
+ kVerifyObjectCount = '?',
+ // Oddballs (no data).
+ kTheHole = '-',
+ kUndefined = '_',
+ kNull = '0',
+ kTrue = 'T',
+ kFalse = 'F',
+ // Number represented as 32-bit integer, ZigZag-encoded
+ // (like sint32 in protobuf)
+ kInt32 = 'I',
+ // Number represented as 32-bit unsigned integer, varint-encoded
+ // (like uint32 in protobuf)
+ kUint32 = 'U',
+ // Number represented as a 64-bit double.
+ // Host byte order is used (N.B. this makes the format non-portable).
+ kDouble = 'N',
+ // BigInt. Bitfield:uint32_t, then raw digits storage.
+ kBigInt = 'Z',
+ // byteLength:uint32_t, then raw data
+ kUtf8String = 'S',
+ kOneByteString = '"',
+ kTwoByteString = 'c',
+ // Reference to a serialized object. objectID:uint32_t
+ kObjectReference = '^',
+ // Beginning of a JS object.
+ kBeginJSObject = 'o',
+ // End of a JS object. numProperties:uint32_t
+ kEndJSObject = '{',
+ // Beginning of a sparse JS array. length:uint32_t
+ // Elements and properties are written as key/value pairs, like objects.
+ kBeginSparseJSArray = 'a',
+ // End of a sparse JS array. numProperties:uint32_t length:uint32_t
+ kEndSparseJSArray = '@',
+ // Beginning of a dense JS array. length:uint32_t
+ // |length| elements, followed by properties as key/value pairs
+ kBeginDenseJSArray = 'A',
+ // End of a dense JS array. numProperties:uint32_t length:uint32_t
+ kEndDenseJSArray = '$',
+ // Date. millisSinceEpoch:double
+ kDate = 'D',
+ // Boolean object. No data.
+ kTrueObject = 'y',
+ kFalseObject = 'x',
+ // Number object. value:double
+ kNumberObject = 'n',
+ // BigInt object. Bitfield:uint32_t, then raw digits storage.
+ kBigIntObject = 'z',
+ // String object, UTF-8 encoding. byteLength:uint32_t, then raw data.
+ kStringObject = 's',
+ // Regular expression, UTF-8 encoding. byteLength:uint32_t, raw data,
+ // flags:uint32_t.
+ kRegExp = 'R',
+ // Beginning of a JS map.
+ kBeginJSMap = ';',
+ // End of a JS map. length:uint32_t.
+ kEndJSMap = ':',
+ // Beginning of a JS set.
+ kBeginJSSet = '\'',
+ // End of a JS set. length:uint32_t.
+ kEndJSSet = ',',
+ // Array buffer. byteLength:uint32_t, then raw data.
+ kArrayBuffer = 'B',
+ // Array buffer (transferred). transferID:uint32_t
+ kArrayBufferTransfer = 't',
+ // View into an array buffer.
+ // subtag:ArrayBufferViewTag, byteOffset:uint32_t, byteLength:uint32_t
+ // For typed arrays, byteOffset and byteLength must be divisible by the size
+ // of the element.
+ // Note: kArrayBufferView is special, and should have an ArrayBuffer (or an
+ // ObjectReference to one) serialized just before it. This is a quirk arising
+ // from the previous stack-based implementation.
+ kArrayBufferView = 'V',
+ // Shared array buffer. transferID:uint32_t
+ kSharedArrayBuffer = 'u',
+ // Compiled WebAssembly module. encodingType:(one-byte tag).
+ // If encodingType == 'y' (raw bytes):
+ // wasmWireByteLength:uint32_t, then raw data
+ // compiledDataLength:uint32_t, then raw data
+ kWasmModule = 'W',
+ // A wasm module object transfer. next value is its index.
+ kWasmModuleTransfer = 'w',
+ // The delegate is responsible for processing all following data.
+ // This "escapes" to whatever wire format the delegate chooses.
+ kHostObject = '\\',
+ // A transferred WebAssembly.Memory object. maximumPages:int32_t, then by
+ // SharedArrayBuffer tag and its data.
+ kWasmMemoryTransfer = 'm',
+};
+
+namespace {
+
+enum class ArrayBufferViewTag : uint8_t {
+ kInt8Array = 'b',
+ kUint8Array = 'B',
+ kUint8ClampedArray = 'C',
+ kInt16Array = 'w',
+ kUint16Array = 'W',
+ kInt32Array = 'd',
+ kUint32Array = 'D',
+ kFloat32Array = 'f',
+ kFloat64Array = 'F',
+ kBigInt64Array = 'q',
+ kBigUint64Array = 'Q',
+ kDataView = '?',
+};
+
+enum class WasmEncodingTag : uint8_t {
+ kRawBytes = 'y',
+};
+
+} // namespace
+
+ValueSerializer::ValueSerializer(Isolate* isolate,
+ v8::ValueSerializer::Delegate* delegate)
+ : isolate_(isolate),
+ delegate_(delegate),
+ zone_(isolate->allocator(), ZONE_NAME),
+ id_map_(isolate->heap(), ZoneAllocationPolicy(&zone_)),
+ array_buffer_transfer_map_(isolate->heap(),
+ ZoneAllocationPolicy(&zone_)) {}
+
+ValueSerializer::~ValueSerializer() {
+ if (buffer_) {
+ if (delegate_) {
+ delegate_->FreeBufferMemory(buffer_);
+ } else {
+ free(buffer_);
+ }
+ }
+}
+
+void ValueSerializer::WriteHeader() {
+ WriteTag(SerializationTag::kVersion);
+ WriteVarint(kLatestVersion);
+}
+
+void ValueSerializer::SetTreatArrayBufferViewsAsHostObjects(bool mode) {
+ treat_array_buffer_views_as_host_objects_ = mode;
+}
+
+void ValueSerializer::WriteTag(SerializationTag tag) {
+ uint8_t raw_tag = static_cast<uint8_t>(tag);
+ WriteRawBytes(&raw_tag, sizeof(raw_tag));
+}
+
+template <typename T>
+void ValueSerializer::WriteVarint(T value) {
+ // Writes an unsigned integer as a base-128 varint.
+ // The number is written, 7 bits at a time, from the least significant to the
+ // most significant 7 bits. Each byte, except the last, has the MSB set.
+ // See also https://developers.google.com/protocol-buffers/docs/encoding
+ static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
+ "Only unsigned integer types can be written as varints.");
+ uint8_t stack_buffer[sizeof(T) * 8 / 7 + 1];
+ uint8_t* next_byte = &stack_buffer[0];
+ do {
+ *next_byte = (value & 0x7F) | 0x80;
+ next_byte++;
+ value >>= 7;
+ } while (value);
+ *(next_byte - 1) &= 0x7F;
+ WriteRawBytes(stack_buffer, next_byte - stack_buffer);
+}
+
+template <typename T>
+void ValueSerializer::WriteZigZag(T value) {
+ // Writes a signed integer as a varint using ZigZag encoding (i.e. 0 is
+ // encoded as 0, -1 as 1, 1 as 2, -2 as 3, and so on).
+ // See also https://developers.google.com/protocol-buffers/docs/encoding
+ // Note that this implementation relies on the right shift being arithmetic.
+ static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
+ "Only signed integer types can be written as zigzag.");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ WriteVarint((static_cast<UnsignedT>(value) << 1) ^
+ (value >> (8 * sizeof(T) - 1)));
+}
+
+void ValueSerializer::WriteDouble(double value) {
+ // Warning: this uses host endianness.
+ WriteRawBytes(&value, sizeof(value));
+}
+
+void ValueSerializer::WriteOneByteString(Vector<const uint8_t> chars) {
+ WriteVarint<uint32_t>(chars.length());
+ WriteRawBytes(chars.begin(), chars.length() * sizeof(uint8_t));
+}
+
+void ValueSerializer::WriteTwoByteString(Vector<const uc16> chars) {
+ // Warning: this uses host endianness.
+ WriteVarint<uint32_t>(chars.length() * sizeof(uc16));
+ WriteRawBytes(chars.begin(), chars.length() * sizeof(uc16));
+}
+
+void ValueSerializer::WriteBigIntContents(BigInt bigint) {
+ uint32_t bitfield = bigint.GetBitfieldForSerialization();
+ int bytelength = BigInt::DigitsByteLengthForBitfield(bitfield);
+ WriteVarint<uint32_t>(bitfield);
+ uint8_t* dest;
+ if (ReserveRawBytes(bytelength).To(&dest)) {
+ bigint.SerializeDigits(dest);
+ }
+}
+
+void ValueSerializer::WriteRawBytes(const void* source, size_t length) {
+ uint8_t* dest;
+ if (ReserveRawBytes(length).To(&dest) && length > 0) {
+ memcpy(dest, source, length);
+ }
+}
+
+Maybe<uint8_t*> ValueSerializer::ReserveRawBytes(size_t bytes) {
+ size_t old_size = buffer_size_;
+ size_t new_size = old_size + bytes;
+ if (V8_UNLIKELY(new_size > buffer_capacity_)) {
+ bool ok;
+ if (!ExpandBuffer(new_size).To(&ok)) {
+ return Nothing<uint8_t*>();
+ }
+ }
+ buffer_size_ = new_size;
+ return Just(&buffer_[old_size]);
+}
+
+Maybe<bool> ValueSerializer::ExpandBuffer(size_t required_capacity) {
+ DCHECK_GT(required_capacity, buffer_capacity_);
+ size_t requested_capacity =
+ std::max(required_capacity, buffer_capacity_ * 2) + 64;
+ size_t provided_capacity = 0;
+ void* new_buffer = nullptr;
+ if (delegate_) {
+ new_buffer = delegate_->ReallocateBufferMemory(buffer_, requested_capacity,
+ &provided_capacity);
+ } else {
+ new_buffer = realloc(buffer_, requested_capacity);
+ provided_capacity = requested_capacity;
+ }
+ if (new_buffer) {
+ DCHECK(provided_capacity >= requested_capacity);
+ buffer_ = reinterpret_cast<uint8_t*>(new_buffer);
+ buffer_capacity_ = provided_capacity;
+ return Just(true);
+ } else {
+ out_of_memory_ = true;
+ return Nothing<bool>();
+ }
+}
+
+void ValueSerializer::WriteUint32(uint32_t value) {
+ WriteVarint<uint32_t>(value);
+}
+
+void ValueSerializer::WriteUint64(uint64_t value) {
+ WriteVarint<uint64_t>(value);
+}
+
+std::pair<uint8_t*, size_t> ValueSerializer::Release() {
+ auto result = std::make_pair(buffer_, buffer_size_);
+ buffer_ = nullptr;
+ buffer_size_ = 0;
+ buffer_capacity_ = 0;
+ return result;
+}
+
+void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
+ Handle<JSArrayBuffer> array_buffer) {
+ DCHECK(!array_buffer_transfer_map_.Find(array_buffer));
+ DCHECK(!array_buffer->is_shared());
+ array_buffer_transfer_map_.Set(array_buffer, transfer_id);
+}
+
+Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
+ // There is no sense in trying to proceed if we've previously run out of
+ // memory. Bail immediately, as this likely implies that some write has
+ // previously failed and so the buffer is corrupt.
+ if (V8_UNLIKELY(out_of_memory_)) return ThrowIfOutOfMemory();
+
+ if (object->IsSmi()) {
+ WriteSmi(Smi::cast(*object));
+ return ThrowIfOutOfMemory();
+ }
+
+ DCHECK(object->IsHeapObject());
+ switch (HeapObject::cast(*object).map().instance_type()) {
+ case ODDBALL_TYPE:
+ WriteOddball(Oddball::cast(*object));
+ return ThrowIfOutOfMemory();
+ case HEAP_NUMBER_TYPE:
+ WriteHeapNumber(HeapNumber::cast(*object));
+ return ThrowIfOutOfMemory();
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ WriteMutableHeapNumber(MutableHeapNumber::cast(*object));
+ return ThrowIfOutOfMemory();
+ case BIGINT_TYPE:
+ WriteBigInt(BigInt::cast(*object));
+ return ThrowIfOutOfMemory();
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE: {
+ // Despite being JSReceivers, these have their wrapped buffer serialized
+ // first. That makes this logic a little quirky, because it needs to
+ // happen before we assign object IDs.
+ // TODO(jbroman): It may be possible to avoid materializing a typed
+ // array's buffer here.
+ Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(object);
+ if (!id_map_.Find(view) && !treat_array_buffer_views_as_host_objects_) {
+ Handle<JSArrayBuffer> buffer(
+ view->IsJSTypedArray()
+ ? Handle<JSTypedArray>::cast(view)->GetBuffer()
+ : handle(JSArrayBuffer::cast(view->buffer()), isolate_));
+ if (!WriteJSReceiver(buffer).FromMaybe(false)) return Nothing<bool>();
+ }
+ return WriteJSReceiver(view);
+ }
+ default:
+ if (object->IsString()) {
+ WriteString(Handle<String>::cast(object));
+ return ThrowIfOutOfMemory();
+ } else if (object->IsJSReceiver()) {
+ return WriteJSReceiver(Handle<JSReceiver>::cast(object));
+ } else {
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
+ return Nothing<bool>();
+ }
+ }
+}
+
+void ValueSerializer::WriteOddball(Oddball oddball) {
+ SerializationTag tag = SerializationTag::kUndefined;
+ switch (oddball.kind()) {
+ case Oddball::kUndefined:
+ tag = SerializationTag::kUndefined;
+ break;
+ case Oddball::kFalse:
+ tag = SerializationTag::kFalse;
+ break;
+ case Oddball::kTrue:
+ tag = SerializationTag::kTrue;
+ break;
+ case Oddball::kNull:
+ tag = SerializationTag::kNull;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ WriteTag(tag);
+}
+
+void ValueSerializer::WriteSmi(Smi smi) {
+ static_assert(kSmiValueSize <= 32, "Expected SMI <= 32 bits.");
+ WriteTag(SerializationTag::kInt32);
+ WriteZigZag<int32_t>(smi.value());
+}
+
+void ValueSerializer::WriteHeapNumber(HeapNumber number) {
+ WriteTag(SerializationTag::kDouble);
+ WriteDouble(number.value());
+}
+
+void ValueSerializer::WriteMutableHeapNumber(MutableHeapNumber number) {
+ WriteTag(SerializationTag::kDouble);
+ WriteDouble(number.value());
+}
+
+void ValueSerializer::WriteBigInt(BigInt bigint) {
+ WriteTag(SerializationTag::kBigInt);
+ WriteBigIntContents(bigint);
+}
+
+void ValueSerializer::WriteString(Handle<String> string) {
+ string = String::Flatten(isolate_, string);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = string->GetFlatContent(no_gc);
+ DCHECK(flat.IsFlat());
+ if (flat.IsOneByte()) {
+ Vector<const uint8_t> chars = flat.ToOneByteVector();
+ WriteTag(SerializationTag::kOneByteString);
+ WriteOneByteString(chars);
+ } else if (flat.IsTwoByte()) {
+ Vector<const uc16> chars = flat.ToUC16Vector();
+ uint32_t byte_length = chars.length() * sizeof(uc16);
+ // The existing reading code expects 16-byte strings to be aligned.
+ if ((buffer_size_ + 1 + BytesNeededForVarint(byte_length)) & 1)
+ WriteTag(SerializationTag::kPadding);
+ WriteTag(SerializationTag::kTwoByteString);
+ WriteTwoByteString(chars);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
+ // If the object has already been serialized, just write its ID.
+ uint32_t* id_map_entry = id_map_.Get(receiver);
+ if (uint32_t id = *id_map_entry) {
+ WriteTag(SerializationTag::kObjectReference);
+ WriteVarint(id - 1);
+ return ThrowIfOutOfMemory();
+ }
+
+ // Otherwise, allocate an ID for it.
+ uint32_t id = next_id_++;
+ *id_map_entry = id + 1;
+
+ // Eliminate callable and exotic objects, which should not be serialized.
+ InstanceType instance_type = receiver->map().instance_type();
+ if (receiver->IsCallable() || (IsSpecialReceiverInstanceType(instance_type) &&
+ instance_type != JS_SPECIAL_API_OBJECT_TYPE)) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
+ return Nothing<bool>();
+ }
+
+ // If we are at the end of the stack, abort. This function may recurse.
+ STACK_CHECK(isolate_, Nothing<bool>());
+
+ HandleScope scope(isolate_);
+ switch (instance_type) {
+ case JS_ARRAY_TYPE:
+ return WriteJSArray(Handle<JSArray>::cast(receiver));
+ case JS_OBJECT_TYPE:
+ case JS_API_OBJECT_TYPE: {
+ Handle<JSObject> js_object = Handle<JSObject>::cast(receiver);
+ if (JSObject::GetEmbedderFieldCount(js_object->map())) {
+ return WriteHostObject(js_object);
+ } else {
+ return WriteJSObject(js_object);
+ }
+ }
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ return WriteHostObject(Handle<JSObject>::cast(receiver));
+ case JS_DATE_TYPE:
+ WriteJSDate(JSDate::cast(*receiver));
+ return ThrowIfOutOfMemory();
+ case JS_VALUE_TYPE:
+ return WriteJSValue(Handle<JSValue>::cast(receiver));
+ case JS_REGEXP_TYPE:
+ WriteJSRegExp(JSRegExp::cast(*receiver));
+ return ThrowIfOutOfMemory();
+ case JS_MAP_TYPE:
+ return WriteJSMap(Handle<JSMap>::cast(receiver));
+ case JS_SET_TYPE:
+ return WriteJSSet(Handle<JSSet>::cast(receiver));
+ case JS_ARRAY_BUFFER_TYPE:
+ return WriteJSArrayBuffer(Handle<JSArrayBuffer>::cast(receiver));
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
+ case WASM_MODULE_TYPE: {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if (!FLAG_wasm_disable_structured_cloning || enabled_features.threads) {
+ // Only write WebAssembly modules if not disabled by a flag.
+ return WriteWasmModule(Handle<WasmModuleObject>::cast(receiver));
+ }
+ break;
+ }
+ case WASM_MEMORY_TYPE: {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if (enabled_features.threads) {
+ return WriteWasmMemory(Handle<WasmMemoryObject>::cast(receiver));
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
+ return Nothing<bool>();
+}
+
+Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
+ DCHECK(!object->map().IsCustomElementsReceiverMap());
+ const bool can_serialize_fast =
+ object->HasFastProperties() && object->elements().length() == 0;
+ if (!can_serialize_fast) return WriteJSObjectSlow(object);
+
+ Handle<Map> map(object->map(), isolate_);
+ WriteTag(SerializationTag::kBeginJSObject);
+
+ // Write out fast properties as long as they are only data properties and the
+ // map doesn't change.
+ uint32_t properties_written = 0;
+ bool map_changed = false;
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ Handle<Name> key(map->instance_descriptors().GetKey(i), isolate_);
+ if (!key->IsString()) continue;
+ PropertyDetails details = map->instance_descriptors().GetDetails(i);
+ if (details.IsDontEnum()) continue;
+
+ Handle<Object> value;
+ if (V8_LIKELY(!map_changed)) map_changed = *map == object->map();
+ if (V8_LIKELY(!map_changed && details.location() == kField)) {
+ DCHECK_EQ(kData, details.kind());
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ value = JSObject::FastPropertyAt(object, details.representation(),
+ field_index);
+ } else {
+ // This logic should essentially match WriteJSObjectPropertiesSlow.
+ // If the property is no longer found, do not serialize it.
+ // This could happen if a getter deleted the property.
+ LookupIterator it(isolate_, object, key, LookupIterator::OWN);
+ if (!it.IsFound()) continue;
+ if (!Object::GetProperty(&it).ToHandle(&value)) return Nothing<bool>();
+ }
+
+ if (!WriteObject(key).FromMaybe(false) ||
+ !WriteObject(value).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ properties_written++;
+ }
+
+ WriteTag(SerializationTag::kEndJSObject);
+ WriteVarint<uint32_t>(properties_written);
+ return ThrowIfOutOfMemory();
+}
+
+Maybe<bool> ValueSerializer::WriteJSObjectSlow(Handle<JSObject> object) {
+ WriteTag(SerializationTag::kBeginJSObject);
+ Handle<FixedArray> keys;
+ uint32_t properties_written = 0;
+ if (!KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS)
+ .ToHandle(&keys) ||
+ !WriteJSObjectPropertiesSlow(object, keys).To(&properties_written)) {
+ return Nothing<bool>();
+ }
+ WriteTag(SerializationTag::kEndJSObject);
+ WriteVarint<uint32_t>(properties_written);
+ return ThrowIfOutOfMemory();
+}
+
+Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
+ uint32_t length = 0;
+ bool valid_length = array->length().ToArrayLength(&length);
+ DCHECK(valid_length);
+ USE(valid_length);
+
+ // To keep things simple, for now we decide between dense and sparse
+ // serialization based on elements kind. A more principled heuristic could
+ // count the elements, but would need to take care to note which indices
+ // existed (as only indices which were enumerable own properties at this point
+ // should be serialized).
+ const bool should_serialize_densely =
+ array->HasFastElements() && !array->HasHoleyElements();
+
+ if (should_serialize_densely) {
+ DCHECK_LE(length, static_cast<uint32_t>(FixedArray::kMaxLength));
+ WriteTag(SerializationTag::kBeginDenseJSArray);
+ WriteVarint<uint32_t>(length);
+ uint32_t i = 0;
+
+ // Fast paths. Note that PACKED_ELEMENTS in particular can bail due to the
+ // structure of the elements changing.
+ switch (array->GetElementsKind()) {
+ case PACKED_SMI_ELEMENTS: {
+ Handle<FixedArray> elements(FixedArray::cast(array->elements()),
+ isolate_);
+ for (; i < length; i++) WriteSmi(Smi::cast(elements->get(i)));
+ break;
+ }
+ case PACKED_DOUBLE_ELEMENTS: {
+ // Elements are empty_fixed_array, not a FixedDoubleArray, if the array
+ // is empty. No elements to encode in this case anyhow.
+ if (length == 0) break;
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(array->elements()), isolate_);
+ for (; i < length; i++) {
+ WriteTag(SerializationTag::kDouble);
+ WriteDouble(elements->get_scalar(i));
+ }
+ break;
+ }
+ case PACKED_ELEMENTS: {
+ Handle<Object> old_length(array->length(), isolate_);
+ for (; i < length; i++) {
+ if (array->length() != *old_length ||
+ array->GetElementsKind() != PACKED_ELEMENTS) {
+ // Fall back to slow path.
+ break;
+ }
+ Handle<Object> element(FixedArray::cast(array->elements()).get(i),
+ isolate_);
+ if (!WriteObject(element).FromMaybe(false)) return Nothing<bool>();
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ // If there are elements remaining, serialize them slowly.
+ for (; i < length; i++) {
+ // Serializing the array's elements can have arbitrary side effects, so we
+ // cannot rely on still having fast elements, even if it did to begin
+ // with.
+ Handle<Object> element;
+ LookupIterator it(isolate_, array, i, array, LookupIterator::OWN);
+ if (!it.IsFound()) {
+ // This can happen in the case where an array that was originally dense
+ // became sparse during serialization. It's too late to switch to the
+ // sparse format, but we can mark the elements as absent.
+ WriteTag(SerializationTag::kTheHole);
+ continue;
+ }
+ if (!Object::GetProperty(&it).ToHandle(&element) ||
+ !WriteObject(element).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ }
+
+ KeyAccumulator accumulator(isolate_, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS);
+ if (!accumulator.CollectOwnPropertyNames(array, array).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ Handle<FixedArray> keys =
+ accumulator.GetKeys(GetKeysConversion::kConvertToString);
+ uint32_t properties_written;
+ if (!WriteJSObjectPropertiesSlow(array, keys).To(&properties_written)) {
+ return Nothing<bool>();
+ }
+ WriteTag(SerializationTag::kEndDenseJSArray);
+ WriteVarint<uint32_t>(properties_written);
+ WriteVarint<uint32_t>(length);
+ } else {
+ WriteTag(SerializationTag::kBeginSparseJSArray);
+ WriteVarint<uint32_t>(length);
+ Handle<FixedArray> keys;
+ uint32_t properties_written = 0;
+ if (!KeyAccumulator::GetKeys(array, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS)
+ .ToHandle(&keys) ||
+ !WriteJSObjectPropertiesSlow(array, keys).To(&properties_written)) {
+ return Nothing<bool>();
+ }
+ WriteTag(SerializationTag::kEndSparseJSArray);
+ WriteVarint<uint32_t>(properties_written);
+ WriteVarint<uint32_t>(length);
+ }
+ return ThrowIfOutOfMemory();
+}
+
+void ValueSerializer::WriteJSDate(JSDate date) {
+ WriteTag(SerializationTag::kDate);
+ WriteDouble(date.value().Number());
+}
+
+Maybe<bool> ValueSerializer::WriteJSValue(Handle<JSValue> value) {
+ Object inner_value = value->value();
+ if (inner_value.IsTrue(isolate_)) {
+ WriteTag(SerializationTag::kTrueObject);
+ } else if (inner_value.IsFalse(isolate_)) {
+ WriteTag(SerializationTag::kFalseObject);
+ } else if (inner_value.IsNumber()) {
+ WriteTag(SerializationTag::kNumberObject);
+ WriteDouble(inner_value.Number());
+ } else if (inner_value.IsBigInt()) {
+ WriteTag(SerializationTag::kBigIntObject);
+ WriteBigIntContents(BigInt::cast(inner_value));
+ } else if (inner_value.IsString()) {
+ WriteTag(SerializationTag::kStringObject);
+ WriteString(handle(String::cast(inner_value), isolate_));
+ } else {
+ DCHECK(inner_value.IsSymbol());
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, value);
+ return Nothing<bool>();
+ }
+ return ThrowIfOutOfMemory();
+}
+
+void ValueSerializer::WriteJSRegExp(JSRegExp regexp) {
+ WriteTag(SerializationTag::kRegExp);
+ WriteString(handle(regexp.Pattern(), isolate_));
+ WriteVarint(static_cast<uint32_t>(regexp.GetFlags()));
+}
+
+Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
+ // First copy the key-value pairs, since getters could mutate them.
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()), isolate_);
+ int length = table->NumberOfElements() * 2;
+ Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
+ {
+ DisallowHeapAllocation no_gc;
+ Oddball the_hole = ReadOnlyRoots(isolate_).the_hole_value();
+ int capacity = table->UsedCapacity();
+ int result_index = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object key = table->KeyAt(i);
+ if (key == the_hole) continue;
+ entries->set(result_index++, key);
+ entries->set(result_index++, table->ValueAt(i));
+ }
+ DCHECK_EQ(result_index, length);
+ }
+
+ // Then write it out.
+ WriteTag(SerializationTag::kBeginJSMap);
+ for (int i = 0; i < length; i++) {
+ if (!WriteObject(handle(entries->get(i), isolate_)).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ }
+ WriteTag(SerializationTag::kEndJSMap);
+ WriteVarint<uint32_t>(length);
+ return ThrowIfOutOfMemory();
+}
+
+Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
+ // First copy the element pointers, since getters could mutate them.
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()), isolate_);
+ int length = table->NumberOfElements();
+ Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
+ {
+ DisallowHeapAllocation no_gc;
+ Oddball the_hole = ReadOnlyRoots(isolate_).the_hole_value();
+ int capacity = table->UsedCapacity();
+ int result_index = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object key = table->KeyAt(i);
+ if (key == the_hole) continue;
+ entries->set(result_index++, key);
+ }
+ DCHECK_EQ(result_index, length);
+ }
+
+ // Then write it out.
+ WriteTag(SerializationTag::kBeginJSSet);
+ for (int i = 0; i < length; i++) {
+ if (!WriteObject(handle(entries->get(i), isolate_)).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ }
+ WriteTag(SerializationTag::kEndJSSet);
+ WriteVarint<uint32_t>(length);
+ return ThrowIfOutOfMemory();
+}
+
+Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
+ Handle<JSArrayBuffer> array_buffer) {
+ if (array_buffer->is_shared()) {
+ if (!delegate_) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
+ return Nothing<bool>();
+ }
+
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ Maybe<uint32_t> index = delegate_->GetSharedArrayBufferId(
+ v8_isolate, Utils::ToLocalShared(array_buffer));
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
+
+ WriteTag(SerializationTag::kSharedArrayBuffer);
+ WriteVarint(index.FromJust());
+ return ThrowIfOutOfMemory();
+ }
+
+ uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
+ if (transfer_entry) {
+ WriteTag(SerializationTag::kArrayBufferTransfer);
+ WriteVarint(*transfer_entry);
+ return ThrowIfOutOfMemory();
+ }
+ if (array_buffer->was_detached()) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneErrorDetachedArrayBuffer);
+ return Nothing<bool>();
+ }
+ double byte_length = array_buffer->byte_length();
+ if (byte_length > std::numeric_limits<uint32_t>::max()) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
+ return Nothing<bool>();
+ }
+ WriteTag(SerializationTag::kArrayBuffer);
+ WriteVarint<uint32_t>(byte_length);
+ WriteRawBytes(array_buffer->backing_store(), byte_length);
+ return ThrowIfOutOfMemory();
+}
+
+Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView view) {
+ if (treat_array_buffer_views_as_host_objects_) {
+ return WriteHostObject(handle(view, isolate_));
+ }
+ WriteTag(SerializationTag::kArrayBufferView);
+ ArrayBufferViewTag tag = ArrayBufferViewTag::kInt8Array;
+ if (view.IsJSTypedArray()) {
+ switch (JSTypedArray::cast(view).type()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
+ tag = ArrayBufferViewTag::k##Type##Array; \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ } else {
+ DCHECK(view.IsJSDataView());
+ tag = ArrayBufferViewTag::kDataView;
+ }
+ WriteVarint(static_cast<uint8_t>(tag));
+ WriteVarint(static_cast<uint32_t>(view.byte_offset()));
+ WriteVarint(static_cast<uint32_t>(view.byte_length()));
+ return ThrowIfOutOfMemory();
+}
+
+Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
+ if (delegate_ != nullptr) {
+ // TODO(titzer): introduce a Utils::ToLocal for WasmModuleObject.
+ Maybe<uint32_t> transfer_id = delegate_->GetWasmModuleTransferId(
+ reinterpret_cast<v8::Isolate*>(isolate_),
+ v8::Local<v8::WasmModuleObject>::Cast(
+ Utils::ToLocal(Handle<JSObject>::cast(object))));
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
+ uint32_t id = 0;
+ if (transfer_id.To(&id)) {
+ WriteTag(SerializationTag::kWasmModuleTransfer);
+ WriteVarint<uint32_t>(id);
+ return Just(true);
+ }
+ }
+
+ WasmEncodingTag encoding_tag = WasmEncodingTag::kRawBytes;
+ WriteTag(SerializationTag::kWasmModule);
+ WriteRawBytes(&encoding_tag, sizeof(encoding_tag));
+
+ wasm::NativeModule* native_module = object->native_module();
+ Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ WriteVarint<uint32_t>(static_cast<uint32_t>(wire_bytes.size()));
+ uint8_t* destination;
+ if (ReserveRawBytes(wire_bytes.size()).To(&destination)) {
+ memcpy(destination, wire_bytes.begin(), wire_bytes.size());
+ }
+
+ wasm::WasmSerializer wasm_serializer(native_module);
+ size_t module_size = wasm_serializer.GetSerializedNativeModuleSize();
+ CHECK_GE(std::numeric_limits<uint32_t>::max(), module_size);
+ WriteVarint<uint32_t>(static_cast<uint32_t>(module_size));
+ uint8_t* module_buffer;
+ if (ReserveRawBytes(module_size).To(&module_buffer)) {
+ if (!wasm_serializer.SerializeNativeModule({module_buffer, module_size})) {
+ return Nothing<bool>();
+ }
+ }
+ return ThrowIfOutOfMemory();
+}
+
+Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) {
+ if (!object->array_buffer().is_shared()) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
+ return Nothing<bool>();
+ }
+
+ isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared(
+ object, isolate_);
+
+ WriteTag(SerializationTag::kWasmMemoryTransfer);
+ WriteZigZag<int32_t>(object->maximum_pages());
+ return WriteJSReceiver(Handle<JSReceiver>(object->array_buffer(), isolate_));
+}
+
+Maybe<bool> ValueSerializer::WriteHostObject(Handle<JSObject> object) {
+ WriteTag(SerializationTag::kHostObject);
+ if (!delegate_) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ isolate_->error_function(), MessageTemplate::kDataCloneError, object));
+ return Nothing<bool>();
+ }
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ Maybe<bool> result =
+ delegate_->WriteHostObject(v8_isolate, Utils::ToLocal(object));
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
+ USE(result);
+ DCHECK(!result.IsNothing());
+ DCHECK(result.ToChecked());
+ return ThrowIfOutOfMemory();
+}
+
+Maybe<uint32_t> ValueSerializer::WriteJSObjectPropertiesSlow(
+ Handle<JSObject> object, Handle<FixedArray> keys) {
+ uint32_t properties_written = 0;
+ int length = keys->length();
+ for (int i = 0; i < length; i++) {
+ Handle<Object> key(keys->get(i), isolate_);
+
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate_, object, key, &success, LookupIterator::OWN);
+ DCHECK(success);
+ Handle<Object> value;
+ if (!Object::GetProperty(&it).ToHandle(&value)) return Nothing<uint32_t>();
+
+ // If the property is no longer found, do not serialize it.
+ // This could happen if a getter deleted the property.
+ if (!it.IsFound()) continue;
+
+ if (!WriteObject(key).FromMaybe(false) ||
+ !WriteObject(value).FromMaybe(false)) {
+ return Nothing<uint32_t>();
+ }
+
+ properties_written++;
+ }
+ return Just(properties_written);
+}
+
+void ValueSerializer::ThrowDataCloneError(MessageTemplate template_index) {
+ return ThrowDataCloneError(template_index,
+ isolate_->factory()->empty_string());
+}
+
+Maybe<bool> ValueSerializer::ThrowIfOutOfMemory() {
+ if (out_of_memory_) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneErrorOutOfMemory);
+ return Nothing<bool>();
+ }
+ return Just(true);
+}
+
+void ValueSerializer::ThrowDataCloneError(MessageTemplate index,
+ Handle<Object> arg0) {
+ Handle<String> message = MessageFormatter::Format(isolate_, index, arg0);
+ if (delegate_) {
+ delegate_->ThrowDataCloneError(Utils::ToLocal(message));
+ } else {
+ isolate_->Throw(
+ *isolate_->factory()->NewError(isolate_->error_function(), message));
+ }
+ if (isolate_->has_scheduled_exception()) {
+ isolate_->PromoteScheduledException();
+ }
+}
+
+ValueDeserializer::ValueDeserializer(Isolate* isolate,
+ Vector<const uint8_t> data,
+ v8::ValueDeserializer::Delegate* delegate)
+ : isolate_(isolate),
+ delegate_(delegate),
+ position_(data.begin()),
+ end_(data.begin() + data.length()),
+ allocation_(data.length() > kPretenureThreshold ? AllocationType::kOld
+ : AllocationType::kYoung),
+ id_map_(isolate->global_handles()->Create(
+ ReadOnlyRoots(isolate_).empty_fixed_array())) {}
+
+ValueDeserializer::~ValueDeserializer() {
+ GlobalHandles::Destroy(id_map_.location());
+
+ Handle<Object> transfer_map_handle;
+ if (array_buffer_transfer_map_.ToHandle(&transfer_map_handle)) {
+ GlobalHandles::Destroy(transfer_map_handle.location());
+ }
+}
+
+Maybe<bool> ValueDeserializer::ReadHeader() {
+ if (position_ < end_ &&
+ *position_ == static_cast<uint8_t>(SerializationTag::kVersion)) {
+ ReadTag().ToChecked();
+ if (!ReadVarint<uint32_t>().To(&version_) || version_ > kLatestVersion) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationVersionError));
+ return Nothing<bool>();
+ }
+ }
+ return Just(true);
+}
+
+Maybe<SerializationTag> ValueDeserializer::PeekTag() const {
+ const uint8_t* peek_position = position_;
+ SerializationTag tag;
+ do {
+ if (peek_position >= end_) return Nothing<SerializationTag>();
+ tag = static_cast<SerializationTag>(*peek_position);
+ peek_position++;
+ } while (tag == SerializationTag::kPadding);
+ return Just(tag);
+}
+
+void ValueDeserializer::ConsumeTag(SerializationTag peeked_tag) {
+ SerializationTag actual_tag = ReadTag().ToChecked();
+ DCHECK(actual_tag == peeked_tag);
+ USE(actual_tag);
+}
+
+Maybe<SerializationTag> ValueDeserializer::ReadTag() {
+ SerializationTag tag;
+ do {
+ if (position_ >= end_) return Nothing<SerializationTag>();
+ tag = static_cast<SerializationTag>(*position_);
+ position_++;
+ } while (tag == SerializationTag::kPadding);
+ return Just(tag);
+}
+
+template <typename T>
+Maybe<T> ValueDeserializer::ReadVarint() {
+ // Reads an unsigned integer as a base-128 varint.
+ // The number is written, 7 bits at a time, from the least significant to the
+ // most significant 7 bits. Each byte, except the last, has the MSB set.
+ // If the varint is larger than T, any more significant bits are discarded.
+ // See also https://developers.google.com/protocol-buffers/docs/encoding
+ static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
+ "Only unsigned integer types can be read as varints.");
+ T value = 0;
+ unsigned shift = 0;
+ bool has_another_byte;
+ do {
+ if (position_ >= end_) return Nothing<T>();
+ uint8_t byte = *position_;
+ if (V8_LIKELY(shift < sizeof(T) * 8)) {
+ value |= static_cast<T>(byte & 0x7F) << shift;
+ shift += 7;
+ }
+ has_another_byte = byte & 0x80;
+ position_++;
+ } while (has_another_byte);
+ return Just(value);
+}
+
+template <typename T>
+Maybe<T> ValueDeserializer::ReadZigZag() {
+ // Writes a signed integer as a varint using ZigZag encoding (i.e. 0 is
+ // encoded as 0, -1 as 1, 1 as 2, -2 as 3, and so on).
+ // See also https://developers.google.com/protocol-buffers/docs/encoding
+ static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
+ "Only signed integer types can be read as zigzag.");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ UnsignedT unsigned_value;
+ if (!ReadVarint<UnsignedT>().To(&unsigned_value)) return Nothing<T>();
+ return Just(static_cast<T>((unsigned_value >> 1) ^
+ -static_cast<T>(unsigned_value & 1)));
+}
+
+Maybe<double> ValueDeserializer::ReadDouble() {
+ // Warning: this uses host endianness.
+ if (position_ > end_ - sizeof(double)) return Nothing<double>();
+ double value;
+ memcpy(&value, position_, sizeof(double));
+ position_ += sizeof(double);
+ if (std::isnan(value)) value = std::numeric_limits<double>::quiet_NaN();
+ return Just(value);
+}
+
+Maybe<Vector<const uint8_t>> ValueDeserializer::ReadRawBytes(int size) {
+ if (size > end_ - position_) return Nothing<Vector<const uint8_t>>();
+ const uint8_t* start = position_;
+ position_ += size;
+ return Just(Vector<const uint8_t>(start, size));
+}
+
+bool ValueDeserializer::ReadUint32(uint32_t* value) {
+ return ReadVarint<uint32_t>().To(value);
+}
+
+bool ValueDeserializer::ReadUint64(uint64_t* value) {
+ return ReadVarint<uint64_t>().To(value);
+}
+
+bool ValueDeserializer::ReadDouble(double* value) {
+ return ReadDouble().To(value);
+}
+
+bool ValueDeserializer::ReadRawBytes(size_t length, const void** data) {
+ if (length > static_cast<size_t>(end_ - position_)) return false;
+ *data = position_;
+ position_ += length;
+ return true;
+}
+
+void ValueDeserializer::TransferArrayBuffer(
+ uint32_t transfer_id, Handle<JSArrayBuffer> array_buffer) {
+ if (array_buffer_transfer_map_.is_null()) {
+ array_buffer_transfer_map_ = isolate_->global_handles()->Create(
+ *SimpleNumberDictionary::New(isolate_, 0));
+ }
+ Handle<SimpleNumberDictionary> dictionary =
+ array_buffer_transfer_map_.ToHandleChecked();
+ Handle<SimpleNumberDictionary> new_dictionary = SimpleNumberDictionary::Set(
+ isolate_, dictionary, transfer_id, array_buffer);
+ if (!new_dictionary.is_identical_to(dictionary)) {
+ GlobalHandles::Destroy(dictionary.location());
+ array_buffer_transfer_map_ =
+ isolate_->global_handles()->Create(*new_dictionary);
+ }
+}
+
+MaybeHandle<Object> ValueDeserializer::ReadObject() {
+ DisallowJavascriptExecution no_js(isolate_);
+ // If we are at the end of the stack, abort. This function may recurse.
+ STACK_CHECK(isolate_, MaybeHandle<Object>());
+
+ MaybeHandle<Object> result = ReadObjectInternal();
+
+ // ArrayBufferView is special in that it consumes the value before it, even
+ // after format version 0.
+ Handle<Object> object;
+ SerializationTag tag;
+ if (result.ToHandle(&object) && V8_UNLIKELY(object->IsJSArrayBuffer()) &&
+ PeekTag().To(&tag) && tag == SerializationTag::kArrayBufferView) {
+ ConsumeTag(SerializationTag::kArrayBufferView);
+ result = ReadJSArrayBufferView(Handle<JSArrayBuffer>::cast(object));
+ }
+
+ if (result.is_null() && !isolate_->has_pending_exception()) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
+ }
+
+ return result;
+}
+
+MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
+ SerializationTag tag;
+ if (!ReadTag().To(&tag)) return MaybeHandle<Object>();
+ switch (tag) {
+ case SerializationTag::kVerifyObjectCount:
+ // Read the count and ignore it.
+ if (ReadVarint<uint32_t>().IsNothing()) return MaybeHandle<Object>();
+ return ReadObject();
+ case SerializationTag::kUndefined:
+ return isolate_->factory()->undefined_value();
+ case SerializationTag::kNull:
+ return isolate_->factory()->null_value();
+ case SerializationTag::kTrue:
+ return isolate_->factory()->true_value();
+ case SerializationTag::kFalse:
+ return isolate_->factory()->false_value();
+ case SerializationTag::kInt32: {
+ Maybe<int32_t> number = ReadZigZag<int32_t>();
+ if (number.IsNothing()) return MaybeHandle<Object>();
+ return isolate_->factory()->NewNumberFromInt(number.FromJust(),
+ allocation_);
+ }
+ case SerializationTag::kUint32: {
+ Maybe<uint32_t> number = ReadVarint<uint32_t>();
+ if (number.IsNothing()) return MaybeHandle<Object>();
+ return isolate_->factory()->NewNumberFromUint(number.FromJust(),
+ allocation_);
+ }
+ case SerializationTag::kDouble: {
+ Maybe<double> number = ReadDouble();
+ if (number.IsNothing()) return MaybeHandle<Object>();
+ return isolate_->factory()->NewNumber(number.FromJust(), allocation_);
+ }
+ case SerializationTag::kBigInt:
+ return ReadBigInt();
+ case SerializationTag::kUtf8String:
+ return ReadUtf8String();
+ case SerializationTag::kOneByteString:
+ return ReadOneByteString();
+ case SerializationTag::kTwoByteString:
+ return ReadTwoByteString();
+ case SerializationTag::kObjectReference: {
+ uint32_t id;
+ if (!ReadVarint<uint32_t>().To(&id)) return MaybeHandle<Object>();
+ return GetObjectWithID(id);
+ }
+ case SerializationTag::kBeginJSObject:
+ return ReadJSObject();
+ case SerializationTag::kBeginSparseJSArray:
+ return ReadSparseJSArray();
+ case SerializationTag::kBeginDenseJSArray:
+ return ReadDenseJSArray();
+ case SerializationTag::kDate:
+ return ReadJSDate();
+ case SerializationTag::kTrueObject:
+ case SerializationTag::kFalseObject:
+ case SerializationTag::kNumberObject:
+ case SerializationTag::kBigIntObject:
+ case SerializationTag::kStringObject:
+ return ReadJSValue(tag);
+ case SerializationTag::kRegExp:
+ return ReadJSRegExp();
+ case SerializationTag::kBeginJSMap:
+ return ReadJSMap();
+ case SerializationTag::kBeginJSSet:
+ return ReadJSSet();
+ case SerializationTag::kArrayBuffer: {
+ const bool is_shared = false;
+ return ReadJSArrayBuffer(is_shared);
+ }
+ case SerializationTag::kArrayBufferTransfer: {
+ return ReadTransferredJSArrayBuffer();
+ }
+ case SerializationTag::kSharedArrayBuffer: {
+ const bool is_shared = true;
+ return ReadJSArrayBuffer(is_shared);
+ }
+ case SerializationTag::kWasmModule:
+ return ReadWasmModule();
+ case SerializationTag::kWasmModuleTransfer:
+ return ReadWasmModuleTransfer();
+ case SerializationTag::kWasmMemoryTransfer:
+ return ReadWasmMemory();
+ case SerializationTag::kHostObject:
+ return ReadHostObject();
+ default:
+ // Before there was an explicit tag for host objects, all unknown tags
+ // were delegated to the host.
+ if (version_ < 13) {
+ position_--;
+ return ReadHostObject();
+ }
+ return MaybeHandle<Object>();
+ }
+}
+
+MaybeHandle<String> ValueDeserializer::ReadString() {
+ if (version_ < 12) return ReadUtf8String();
+ Handle<Object> object;
+ if (!ReadObject().ToHandle(&object) || !object->IsString()) {
+ return MaybeHandle<String>();
+ }
+ return Handle<String>::cast(object);
+}
+
+MaybeHandle<BigInt> ValueDeserializer::ReadBigInt() {
+ uint32_t bitfield;
+ if (!ReadVarint<uint32_t>().To(&bitfield)) return MaybeHandle<BigInt>();
+ int bytelength = BigInt::DigitsByteLengthForBitfield(bitfield);
+ Vector<const uint8_t> digits_storage;
+ if (!ReadRawBytes(bytelength).To(&digits_storage)) {
+ return MaybeHandle<BigInt>();
+ }
+ return BigInt::FromSerializedDigits(isolate_, bitfield, digits_storage,
+ allocation_);
+}
+
+MaybeHandle<String> ValueDeserializer::ReadUtf8String() {
+ uint32_t utf8_length;
+ Vector<const uint8_t> utf8_bytes;
+ if (!ReadVarint<uint32_t>().To(&utf8_length) ||
+ utf8_length >
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
+ !ReadRawBytes(utf8_length).To(&utf8_bytes)) {
+ return MaybeHandle<String>();
+ }
+ return isolate_->factory()->NewStringFromUtf8(
+ Vector<const char>::cast(utf8_bytes), allocation_);
+}
+
+MaybeHandle<String> ValueDeserializer::ReadOneByteString() {
+ uint32_t byte_length;
+ Vector<const uint8_t> bytes;
+ if (!ReadVarint<uint32_t>().To(&byte_length) ||
+ byte_length >
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
+ !ReadRawBytes(byte_length).To(&bytes)) {
+ return MaybeHandle<String>();
+ }
+ return isolate_->factory()->NewStringFromOneByte(bytes, allocation_);
+}
+
+MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
+ uint32_t byte_length;
+ Vector<const uint8_t> bytes;
+ if (!ReadVarint<uint32_t>().To(&byte_length) ||
+ byte_length >
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
+ byte_length % sizeof(uc16) != 0 ||
+ !ReadRawBytes(byte_length).To(&bytes)) {
+ return MaybeHandle<String>();
+ }
+
+ // Allocate an uninitialized string so that we can do a raw memcpy into the
+ // string on the heap (regardless of alignment).
+ if (byte_length == 0) return isolate_->factory()->empty_string();
+ Handle<SeqTwoByteString> string;
+ if (!isolate_->factory()
+ ->NewRawTwoByteString(byte_length / sizeof(uc16), allocation_)
+ .ToHandle(&string)) {
+ return MaybeHandle<String>();
+ }
+
+ // Copy the bytes directly into the new string.
+ // Warning: this uses host endianness.
+ DisallowHeapAllocation no_gc;
+ memcpy(string->GetChars(no_gc), bytes.begin(), bytes.length());
+ return string;
+}
+
+bool ValueDeserializer::ReadExpectedString(Handle<String> expected) {
+ DisallowHeapAllocation no_gc;
+ // In the case of failure, the position in the stream is reset.
+ const uint8_t* original_position = position_;
+
+ SerializationTag tag;
+ uint32_t byte_length;
+ Vector<const uint8_t> bytes;
+ if (!ReadTag().To(&tag) || !ReadVarint<uint32_t>().To(&byte_length) ||
+ byte_length >
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
+ !ReadRawBytes(byte_length).To(&bytes)) {
+ position_ = original_position;
+ return false;
+ }
+
+ String::FlatContent flat = expected->GetFlatContent(no_gc);
+
+ // If the bytes are verbatim what is in the flattened string, then the string
+ // is successfully consumed.
+ if (tag == SerializationTag::kOneByteString && flat.IsOneByte()) {
+ Vector<const uint8_t> chars = flat.ToOneByteVector();
+ if (byte_length == static_cast<size_t>(chars.length()) &&
+ memcmp(bytes.begin(), chars.begin(), byte_length) == 0) {
+ return true;
+ }
+ } else if (tag == SerializationTag::kTwoByteString && flat.IsTwoByte()) {
+ Vector<const uc16> chars = flat.ToUC16Vector();
+ if (byte_length == static_cast<unsigned>(chars.length()) * sizeof(uc16) &&
+ memcmp(bytes.begin(), chars.begin(), byte_length) == 0) {
+ return true;
+ }
+ } else if (tag == SerializationTag::kUtf8String && flat.IsOneByte()) {
+ Vector<const uint8_t> chars = flat.ToOneByteVector();
+ if (byte_length == static_cast<size_t>(chars.length()) &&
+ String::IsAscii(chars.begin(), chars.length()) &&
+ memcmp(bytes.begin(), chars.begin(), byte_length) == 0) {
+ return true;
+ }
+ }
+
+ position_ = original_position;
+ return false;
+}
+
+MaybeHandle<JSObject> ValueDeserializer::ReadJSObject() {
+ // If we are at the end of the stack, abort. This function may recurse.
+ STACK_CHECK(isolate_, MaybeHandle<JSObject>());
+
+ uint32_t id = next_id_++;
+ HandleScope scope(isolate_);
+ Handle<JSObject> object = isolate_->factory()->NewJSObject(
+ isolate_->object_function(), allocation_);
+ AddObjectWithID(id, object);
+
+ uint32_t num_properties;
+ uint32_t expected_num_properties;
+ if (!ReadJSObjectProperties(object, SerializationTag::kEndJSObject, true)
+ .To(&num_properties) ||
+ !ReadVarint<uint32_t>().To(&expected_num_properties) ||
+ num_properties != expected_num_properties) {
+ return MaybeHandle<JSObject>();
+ }
+
+ DCHECK(HasObjectWithID(id));
+ return scope.CloseAndEscape(object);
+}
+
+MaybeHandle<JSArray> ValueDeserializer::ReadSparseJSArray() {
+ // If we are at the end of the stack, abort. This function may recurse.
+ STACK_CHECK(isolate_, MaybeHandle<JSArray>());
+
+ uint32_t length;
+ if (!ReadVarint<uint32_t>().To(&length)) return MaybeHandle<JSArray>();
+
+ uint32_t id = next_id_++;
+ HandleScope scope(isolate_);
+ Handle<JSArray> array = isolate_->factory()->NewJSArray(
+ 0, TERMINAL_FAST_ELEMENTS_KIND, allocation_);
+ JSArray::SetLength(array, length);
+ AddObjectWithID(id, array);
+
+ uint32_t num_properties;
+ uint32_t expected_num_properties;
+ uint32_t expected_length;
+ if (!ReadJSObjectProperties(array, SerializationTag::kEndSparseJSArray, false)
+ .To(&num_properties) ||
+ !ReadVarint<uint32_t>().To(&expected_num_properties) ||
+ !ReadVarint<uint32_t>().To(&expected_length) ||
+ num_properties != expected_num_properties || length != expected_length) {
+ return MaybeHandle<JSArray>();
+ }
+
+ DCHECK(HasObjectWithID(id));
+ return scope.CloseAndEscape(array);
+}
+
+MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
+ // If we are at the end of the stack, abort. This function may recurse.
+ STACK_CHECK(isolate_, MaybeHandle<JSArray>());
+
+ // We shouldn't permit an array larger than the biggest we can request from
+ // V8. As an additional sanity check, since each entry will take at least one
+ // byte to encode, if there are fewer bytes than that we can also fail fast.
+ uint32_t length;
+ if (!ReadVarint<uint32_t>().To(&length) ||
+ length > static_cast<uint32_t>(FixedArray::kMaxLength) ||
+ length > static_cast<size_t>(end_ - position_)) {
+ return MaybeHandle<JSArray>();
+ }
+
+ uint32_t id = next_id_++;
+ HandleScope scope(isolate_);
+ Handle<JSArray> array = isolate_->factory()->NewJSArray(
+ HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
+ allocation_);
+ AddObjectWithID(id, array);
+
+ Handle<FixedArray> elements(FixedArray::cast(array->elements()), isolate_);
+ for (uint32_t i = 0; i < length; i++) {
+ SerializationTag tag;
+ if (PeekTag().To(&tag) && tag == SerializationTag::kTheHole) {
+ ConsumeTag(SerializationTag::kTheHole);
+ continue;
+ }
+
+ Handle<Object> element;
+ if (!ReadObject().ToHandle(&element)) return MaybeHandle<JSArray>();
+
+ // Serialization versions less than 11 encode the hole the same as
+ // undefined. For consistency with previous behavior, store these as the
+ // hole. Past version 11, undefined means undefined.
+ if (version_ < 11 && element->IsUndefined(isolate_)) continue;
+
+ // Safety check.
+ if (i >= static_cast<uint32_t>(elements->length())) {
+ return MaybeHandle<JSArray>();
+ }
+
+ elements->set(i, *element);
+ }
+
+ uint32_t num_properties;
+ uint32_t expected_num_properties;
+ uint32_t expected_length;
+ if (!ReadJSObjectProperties(array, SerializationTag::kEndDenseJSArray, false)
+ .To(&num_properties) ||
+ !ReadVarint<uint32_t>().To(&expected_num_properties) ||
+ !ReadVarint<uint32_t>().To(&expected_length) ||
+ num_properties != expected_num_properties || length != expected_length) {
+ return MaybeHandle<JSArray>();
+ }
+
+ DCHECK(HasObjectWithID(id));
+ return scope.CloseAndEscape(array);
+}
+
+MaybeHandle<JSDate> ValueDeserializer::ReadJSDate() {
+ double value;
+ if (!ReadDouble().To(&value)) return MaybeHandle<JSDate>();
+ uint32_t id = next_id_++;
+ Handle<JSDate> date;
+ if (!JSDate::New(isolate_->date_function(), isolate_->date_function(), value)
+ .ToHandle(&date)) {
+ return MaybeHandle<JSDate>();
+ }
+ AddObjectWithID(id, date);
+ return date;
+}
+
+MaybeHandle<JSValue> ValueDeserializer::ReadJSValue(SerializationTag tag) {
+ uint32_t id = next_id_++;
+ Handle<JSValue> value;
+ switch (tag) {
+ case SerializationTag::kTrueObject:
+ value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ isolate_->boolean_function(), allocation_));
+ value->set_value(ReadOnlyRoots(isolate_).true_value());
+ break;
+ case SerializationTag::kFalseObject:
+ value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ isolate_->boolean_function(), allocation_));
+ value->set_value(ReadOnlyRoots(isolate_).false_value());
+ break;
+ case SerializationTag::kNumberObject: {
+ double number;
+ if (!ReadDouble().To(&number)) return MaybeHandle<JSValue>();
+ value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ isolate_->number_function(), allocation_));
+ Handle<Object> number_object =
+ isolate_->factory()->NewNumber(number, allocation_);
+ value->set_value(*number_object);
+ break;
+ }
+ case SerializationTag::kBigIntObject: {
+ Handle<BigInt> bigint;
+ if (!ReadBigInt().ToHandle(&bigint)) return MaybeHandle<JSValue>();
+ value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ isolate_->bigint_function(), allocation_));
+ value->set_value(*bigint);
+ break;
+ }
+ case SerializationTag::kStringObject: {
+ Handle<String> string;
+ if (!ReadString().ToHandle(&string)) return MaybeHandle<JSValue>();
+ value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ isolate_->string_function(), allocation_));
+ value->set_value(*string);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ AddObjectWithID(id, value);
+ return value;
+}
+
+MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
+ uint32_t id = next_id_++;
+ Handle<String> pattern;
+ uint32_t raw_flags;
+ Handle<JSRegExp> regexp;
+ if (!ReadString().ToHandle(&pattern) ||
+ !ReadVarint<uint32_t>().To(&raw_flags)) {
+ return MaybeHandle<JSRegExp>();
+ }
+
+ // Ensure the deserialized flags are valid.
+ // TODO(adamk): Can we remove this check now that dotAll is always-on?
+ uint32_t flags_mask = static_cast<uint32_t>(-1) << JSRegExp::FlagCount();
+ if ((raw_flags & flags_mask) ||
+ !JSRegExp::New(isolate_, pattern, static_cast<JSRegExp::Flags>(raw_flags))
+ .ToHandle(&regexp)) {
+ return MaybeHandle<JSRegExp>();
+ }
+
+ AddObjectWithID(id, regexp);
+ return regexp;
+}
+
+MaybeHandle<JSMap> ValueDeserializer::ReadJSMap() {
+ // If we are at the end of the stack, abort. This function may recurse.
+ STACK_CHECK(isolate_, MaybeHandle<JSMap>());
+
+ HandleScope scope(isolate_);
+ uint32_t id = next_id_++;
+ Handle<JSMap> map = isolate_->factory()->NewJSMap();
+ AddObjectWithID(id, map);
+
+ Handle<JSFunction> map_set = isolate_->map_set();
+ uint32_t length = 0;
+ while (true) {
+ SerializationTag tag;
+ if (!PeekTag().To(&tag)) return MaybeHandle<JSMap>();
+ if (tag == SerializationTag::kEndJSMap) {
+ ConsumeTag(SerializationTag::kEndJSMap);
+ break;
+ }
+
+ Handle<Object> argv[2];
+ if (!ReadObject().ToHandle(&argv[0]) || !ReadObject().ToHandle(&argv[1])) {
+ return MaybeHandle<JSMap>();
+ }
+
+ AllowJavascriptExecution allow_js(isolate_);
+ if (Execution::Call(isolate_, map_set, map, arraysize(argv), argv)
+ .is_null()) {
+ return MaybeHandle<JSMap>();
+ }
+ length += 2;
+ }
+
+ uint32_t expected_length;
+ if (!ReadVarint<uint32_t>().To(&expected_length) ||
+ length != expected_length) {
+ return MaybeHandle<JSMap>();
+ }
+ DCHECK(HasObjectWithID(id));
+ return scope.CloseAndEscape(map);
+}
+
+MaybeHandle<JSSet> ValueDeserializer::ReadJSSet() {
+ // If we are at the end of the stack, abort. This function may recurse.
+ STACK_CHECK(isolate_, MaybeHandle<JSSet>());
+
+ HandleScope scope(isolate_);
+ uint32_t id = next_id_++;
+ Handle<JSSet> set = isolate_->factory()->NewJSSet();
+ AddObjectWithID(id, set);
+ Handle<JSFunction> set_add = isolate_->set_add();
+ uint32_t length = 0;
+ while (true) {
+ SerializationTag tag;
+ if (!PeekTag().To(&tag)) return MaybeHandle<JSSet>();
+ if (tag == SerializationTag::kEndJSSet) {
+ ConsumeTag(SerializationTag::kEndJSSet);
+ break;
+ }
+
+ Handle<Object> argv[1];
+ if (!ReadObject().ToHandle(&argv[0])) return MaybeHandle<JSSet>();
+
+ AllowJavascriptExecution allow_js(isolate_);
+ if (Execution::Call(isolate_, set_add, set, arraysize(argv), argv)
+ .is_null()) {
+ return MaybeHandle<JSSet>();
+ }
+ length++;
+ }
+
+ uint32_t expected_length;
+ if (!ReadVarint<uint32_t>().To(&expected_length) ||
+ length != expected_length) {
+ return MaybeHandle<JSSet>();
+ }
+ DCHECK(HasObjectWithID(id));
+ return scope.CloseAndEscape(set);
+}
+
+MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer(
+ bool is_shared) {
+ uint32_t id = next_id_++;
+ if (is_shared) {
+ uint32_t clone_id;
+ Local<SharedArrayBuffer> sab_value;
+ if (!ReadVarint<uint32_t>().To(&clone_id) || delegate_ == nullptr ||
+ !delegate_
+ ->GetSharedArrayBufferFromId(
+ reinterpret_cast<v8::Isolate*>(isolate_), clone_id)
+ .ToLocal(&sab_value)) {
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSArrayBuffer);
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ Handle<JSArrayBuffer> array_buffer = Utils::OpenHandle(*sab_value);
+ DCHECK_EQ(is_shared, array_buffer->is_shared());
+ AddObjectWithID(id, array_buffer);
+ return array_buffer;
+ }
+ uint32_t byte_length;
+ if (!ReadVarint<uint32_t>().To(&byte_length) ||
+ byte_length > static_cast<size_t>(end_ - position_)) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ const bool should_initialize = false;
+ Handle<JSArrayBuffer> array_buffer = isolate_->factory()->NewJSArrayBuffer(
+ SharedFlag::kNotShared, allocation_);
+ if (!JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length,
+ should_initialize)) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ if (byte_length > 0) {
+ memcpy(array_buffer->backing_store(), position_, byte_length);
+ }
+ position_ += byte_length;
+ AddObjectWithID(id, array_buffer);
+ return array_buffer;
+}
+
+MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadTransferredJSArrayBuffer() {
+ uint32_t id = next_id_++;
+ uint32_t transfer_id;
+ Handle<SimpleNumberDictionary> transfer_map;
+ if (!ReadVarint<uint32_t>().To(&transfer_id) ||
+ !array_buffer_transfer_map_.ToHandle(&transfer_map)) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ int index = transfer_map->FindEntry(isolate_, transfer_id);
+ if (index == SimpleNumberDictionary::kNotFound) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ Handle<JSArrayBuffer> array_buffer(
+ JSArrayBuffer::cast(transfer_map->ValueAt(index)), isolate_);
+ AddObjectWithID(id, array_buffer);
+ return array_buffer;
+}
+
+MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
+ Handle<JSArrayBuffer> buffer) {
+ uint32_t buffer_byte_length = static_cast<uint32_t>(buffer->byte_length());
+ uint8_t tag = 0;
+ uint32_t byte_offset = 0;
+ uint32_t byte_length = 0;
+ if (!ReadVarint<uint8_t>().To(&tag) ||
+ !ReadVarint<uint32_t>().To(&byte_offset) ||
+ !ReadVarint<uint32_t>().To(&byte_length) ||
+ byte_offset > buffer_byte_length ||
+ byte_length > buffer_byte_length - byte_offset) {
+ return MaybeHandle<JSArrayBufferView>();
+ }
+ uint32_t id = next_id_++;
+ ExternalArrayType external_array_type = kExternalInt8Array;
+ unsigned element_size = 0;
+
+ switch (static_cast<ArrayBufferViewTag>(tag)) {
+ case ArrayBufferViewTag::kDataView: {
+ Handle<JSDataView> data_view =
+ isolate_->factory()->NewJSDataView(buffer, byte_offset, byte_length);
+ AddObjectWithID(id, data_view);
+ return data_view;
+ }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case ArrayBufferViewTag::k##Type##Array: \
+ external_array_type = kExternal##Type##Array; \
+ element_size = sizeof(ctype); \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ if (element_size == 0 || byte_offset % element_size != 0 ||
+ byte_length % element_size != 0) {
+ return MaybeHandle<JSArrayBufferView>();
+ }
+ Handle<JSTypedArray> typed_array = isolate_->factory()->NewJSTypedArray(
+ external_array_type, buffer, byte_offset, byte_length / element_size,
+ allocation_);
+ AddObjectWithID(id, typed_array);
+ return typed_array;
+}
+
+MaybeHandle<JSObject> ValueDeserializer::ReadWasmModuleTransfer() {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if ((FLAG_wasm_disable_structured_cloning && !enabled_features.threads) ||
+ expect_inline_wasm()) {
+ return MaybeHandle<JSObject>();
+ }
+
+ uint32_t transfer_id = 0;
+ Local<Value> module_value;
+ if (!ReadVarint<uint32_t>().To(&transfer_id) || delegate_ == nullptr ||
+ !delegate_
+ ->GetWasmModuleFromId(reinterpret_cast<v8::Isolate*>(isolate_),
+ transfer_id)
+ .ToLocal(&module_value)) {
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSObject);
+ return MaybeHandle<JSObject>();
+ }
+ uint32_t id = next_id_++;
+ Handle<JSObject> module =
+ Handle<JSObject>::cast(Utils::OpenHandle(*module_value));
+ AddObjectWithID(id, module);
+ return module;
+}
+
+MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if ((FLAG_wasm_disable_structured_cloning && !enabled_features.threads) ||
+ !expect_inline_wasm()) {
+ return MaybeHandle<JSObject>();
+ }
+
+ Vector<const uint8_t> encoding_tag;
+ if (!ReadRawBytes(sizeof(WasmEncodingTag)).To(&encoding_tag) ||
+ encoding_tag[0] != static_cast<uint8_t>(WasmEncodingTag::kRawBytes)) {
+ return MaybeHandle<JSObject>();
+ }
+
+ // Extract the data from the buffer: wasm wire bytes, followed by V8 compiled
+ // script data.
+ static_assert(sizeof(int) <= sizeof(uint32_t),
+ "max int must fit in uint32_t");
+ const uint32_t max_valid_size = std::numeric_limits<int>::max();
+ uint32_t wire_bytes_length = 0;
+ Vector<const uint8_t> wire_bytes;
+ uint32_t compiled_bytes_length = 0;
+ Vector<const uint8_t> compiled_bytes;
+ if (!ReadVarint<uint32_t>().To(&wire_bytes_length) ||
+ wire_bytes_length > max_valid_size ||
+ !ReadRawBytes(wire_bytes_length).To(&wire_bytes) ||
+ !ReadVarint<uint32_t>().To(&compiled_bytes_length) ||
+ compiled_bytes_length > max_valid_size ||
+ !ReadRawBytes(compiled_bytes_length).To(&compiled_bytes)) {
+ return MaybeHandle<JSObject>();
+ }
+
+ // Try to deserialize the compiled module first.
+ MaybeHandle<WasmModuleObject> result =
+ wasm::DeserializeNativeModule(isolate_, compiled_bytes, wire_bytes);
+ if (result.is_null()) {
+ wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
+ // TODO(titzer): are the current features appropriate for deserializing?
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ result = isolate_->wasm_engine()->SyncCompile(
+ isolate_, enabled_features, &thrower,
+ wasm::ModuleWireBytes(wire_bytes));
+ }
+ uint32_t id = next_id_++;
+ if (!result.is_null()) {
+ AddObjectWithID(id, result.ToHandleChecked());
+ }
+ return result;
+}
+
+MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
+ uint32_t id = next_id_++;
+
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if (!enabled_features.threads) {
+ return MaybeHandle<WasmMemoryObject>();
+ }
+
+ int32_t maximum_pages;
+ if (!ReadZigZag<int32_t>().To(&maximum_pages)) {
+ return MaybeHandle<WasmMemoryObject>();
+ }
+
+ SerializationTag tag;
+ if (!ReadTag().To(&tag) || tag != SerializationTag::kSharedArrayBuffer) {
+ return MaybeHandle<WasmMemoryObject>();
+ }
+
+ const bool is_shared = true;
+ Handle<JSArrayBuffer> buffer;
+ if (!ReadJSArrayBuffer(is_shared).ToHandle(&buffer)) {
+ return MaybeHandle<WasmMemoryObject>();
+ }
+
+ Handle<WasmMemoryObject> result =
+ WasmMemoryObject::New(isolate_, buffer, maximum_pages);
+
+ isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared(
+ result, isolate_);
+
+ AddObjectWithID(id, result);
+ return result;
+}
+
+MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
+ if (!delegate_) return MaybeHandle<JSObject>();
+ STACK_CHECK(isolate_, MaybeHandle<JSObject>());
+ uint32_t id = next_id_++;
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ v8::Local<v8::Object> object;
+ if (!delegate_->ReadHostObject(v8_isolate).ToLocal(&object)) {
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSObject);
+ return MaybeHandle<JSObject>();
+ }
+ Handle<JSObject> js_object =
+ Handle<JSObject>::cast(Utils::OpenHandle(*object));
+ AddObjectWithID(id, js_object);
+ return js_object;
+}
+
+// Copies a vector of property values into an object, given the map that should
+// be used.
+static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
+ const std::vector<Handle<Object>>& properties) {
+ JSObject::AllocateStorageForMap(object, map);
+ DCHECK(!object->map().is_dictionary_map());
+
+ DisallowHeapAllocation no_gc;
+ DescriptorArray descriptors = object->map().instance_descriptors();
+ for (unsigned i = 0; i < properties.size(); i++) {
+ // Initializing store.
+ object->WriteToField(i, descriptors.GetDetails(i), *properties[i]);
+ }
+}
+
+static bool IsValidObjectKey(Handle<Object> value) {
+ return value->IsName() || value->IsNumber();
+}
+
+Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
+ Handle<JSObject> object, SerializationTag end_tag,
+ bool can_use_transitions) {
+ uint32_t num_properties = 0;
+
+ // Fast path (following map transitions).
+ if (can_use_transitions) {
+ bool transitioning = true;
+ Handle<Map> map(object->map(), isolate_);
+ DCHECK(!map->is_dictionary_map());
+ DCHECK_EQ(0, map->instance_descriptors().number_of_descriptors());
+ std::vector<Handle<Object>> properties;
+ properties.reserve(8);
+
+ while (transitioning) {
+ // If there are no more properties, finish.
+ SerializationTag tag;
+ if (!PeekTag().To(&tag)) return Nothing<uint32_t>();
+ if (tag == end_tag) {
+ ConsumeTag(end_tag);
+ CommitProperties(object, map, properties);
+ CHECK_LT(properties.size(), std::numeric_limits<uint32_t>::max());
+ return Just(static_cast<uint32_t>(properties.size()));
+ }
+
+ // Determine the key to be used and the target map to transition to, if
+ // possible. Transitioning may abort if the key is not a string, or if no
+ // transition was found.
+ Handle<Object> key;
+ Handle<Map> target;
+ TransitionsAccessor transitions(isolate_, map);
+ Handle<String> expected_key = transitions.ExpectedTransitionKey();
+ if (!expected_key.is_null() && ReadExpectedString(expected_key)) {
+ key = expected_key;
+ target = transitions.ExpectedTransitionTarget();
+ } else {
+ if (!ReadObject().ToHandle(&key) || !IsValidObjectKey(key)) {
+ return Nothing<uint32_t>();
+ }
+ if (key->IsString()) {
+ key =
+ isolate_->factory()->InternalizeString(Handle<String>::cast(key));
+ // Don't reuse |transitions| because it could be stale.
+ transitioning = TransitionsAccessor(isolate_, map)
+ .FindTransitionToField(Handle<String>::cast(key))
+ .ToHandle(&target);
+ } else {
+ transitioning = false;
+ }
+ }
+
+ // Read the value that corresponds to it.
+ Handle<Object> value;
+ if (!ReadObject().ToHandle(&value)) return Nothing<uint32_t>();
+
+ // If still transitioning and the value fits the field representation
+ // (though generalization may be required), store the property value so
+ // that we can copy them all at once. Otherwise, stop transitioning.
+ if (transitioning) {
+ int descriptor = static_cast<int>(properties.size());
+ PropertyDetails details =
+ target->instance_descriptors().GetDetails(descriptor);
+ Representation expected_representation = details.representation();
+ if (value->FitsRepresentation(expected_representation)) {
+ if (expected_representation.IsHeapObject() &&
+ !target->instance_descriptors()
+ .GetFieldType(descriptor)
+ .NowContains(value)) {
+ Handle<FieldType> value_type =
+ value->OptimalType(isolate_, expected_representation);
+ Map::GeneralizeField(isolate_, target, descriptor,
+ details.constness(), expected_representation,
+ value_type);
+ }
+ DCHECK(target->instance_descriptors()
+ .GetFieldType(descriptor)
+ .NowContains(value));
+ properties.push_back(value);
+ map = target;
+ continue;
+ } else {
+ transitioning = false;
+ }
+ }
+
+ // Fell out of transitioning fast path. Commit the properties gathered so
+ // far, and then start setting properties slowly instead.
+ DCHECK(!transitioning);
+ CHECK_LT(properties.size(), std::numeric_limits<uint32_t>::max());
+ CommitProperties(object, map, properties);
+ num_properties = static_cast<uint32_t>(properties.size());
+
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate_, object, key, &success, LookupIterator::OWN);
+ if (!success || it.state() != LookupIterator::NOT_FOUND ||
+ JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
+ .is_null()) {
+ return Nothing<uint32_t>();
+ }
+ num_properties++;
+ }
+
+ // At this point, transitioning should be done, but at least one property
+ // should have been written (in the zero-property case, there is an early
+ // return).
+ DCHECK(!transitioning);
+ DCHECK_GE(num_properties, 1u);
+ }
+
+ // Slow path.
+ for (;; num_properties++) {
+ SerializationTag tag;
+ if (!PeekTag().To(&tag)) return Nothing<uint32_t>();
+ if (tag == end_tag) {
+ ConsumeTag(end_tag);
+ return Just(num_properties);
+ }
+
+ Handle<Object> key;
+ if (!ReadObject().ToHandle(&key) || !IsValidObjectKey(key)) {
+ return Nothing<uint32_t>();
+ }
+ Handle<Object> value;
+ if (!ReadObject().ToHandle(&value)) return Nothing<uint32_t>();
+
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate_, object, key, &success, LookupIterator::OWN);
+ if (!success || it.state() != LookupIterator::NOT_FOUND ||
+ JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
+ .is_null()) {
+ return Nothing<uint32_t>();
+ }
+ }
+}
+
+bool ValueDeserializer::HasObjectWithID(uint32_t id) {
+ return id < static_cast<unsigned>(id_map_->length()) &&
+ !id_map_->get(id).IsTheHole(isolate_);
+}
+
+MaybeHandle<JSReceiver> ValueDeserializer::GetObjectWithID(uint32_t id) {
+ if (id >= static_cast<unsigned>(id_map_->length())) {
+ return MaybeHandle<JSReceiver>();
+ }
+ Object value = id_map_->get(id);
+ if (value.IsTheHole(isolate_)) return MaybeHandle<JSReceiver>();
+ DCHECK(value.IsJSReceiver());
+ return Handle<JSReceiver>(JSReceiver::cast(value), isolate_);
+}
+
+void ValueDeserializer::AddObjectWithID(uint32_t id,
+ Handle<JSReceiver> object) {
+ DCHECK(!HasObjectWithID(id));
+ Handle<FixedArray> new_array =
+ FixedArray::SetAndGrow(isolate_, id_map_, id, object);
+
+ // If the dictionary was reallocated, update the global handle.
+ if (!new_array.is_identical_to(id_map_)) {
+ GlobalHandles::Destroy(id_map_.location());
+ id_map_ = isolate_->global_handles()->Create(*new_array);
+ }
+}
+
+static Maybe<bool> SetPropertiesFromKeyValuePairs(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object>* data,
+ uint32_t num_properties) {
+ for (unsigned i = 0; i < 2 * num_properties; i += 2) {
+ Handle<Object> key = data[i];
+ if (!IsValidObjectKey(key)) return Nothing<bool>();
+ Handle<Object> value = data[i + 1];
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, key, &success, LookupIterator::OWN);
+ if (!success || it.state() != LookupIterator::NOT_FOUND ||
+ JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
+ .is_null()) {
+ return Nothing<bool>();
+ }
+ }
+ return Just(true);
+}
+
+namespace {
+
+// Throws a generic "deserialization failed" exception by default, unless a more
+// specific exception has already been thrown.
+void ThrowDeserializationExceptionIfNonePending(Isolate* isolate) {
+ if (!isolate->has_pending_exception()) {
+ isolate->Throw(*isolate->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
+ }
+ DCHECK(isolate->has_pending_exception());
+}
+
+} // namespace
+
+MaybeHandle<Object>
+ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
+ DCHECK_EQ(version_, 0u);
+ HandleScope scope(isolate_);
+ std::vector<Handle<Object>> stack;
+ while (position_ < end_) {
+ SerializationTag tag;
+ if (!PeekTag().To(&tag)) break;
+
+ Handle<Object> new_object;
+ switch (tag) {
+ case SerializationTag::kEndJSObject: {
+ ConsumeTag(SerializationTag::kEndJSObject);
+
+ // JS Object: Read the last 2*n values from the stack and use them as
+ // key-value pairs.
+ uint32_t num_properties;
+ if (!ReadVarint<uint32_t>().To(&num_properties) ||
+ stack.size() / 2 < num_properties) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
+ return MaybeHandle<Object>();
+ }
+
+ size_t begin_properties =
+ stack.size() - 2 * static_cast<size_t>(num_properties);
+ Handle<JSObject> js_object = isolate_->factory()->NewJSObject(
+ isolate_->object_function(), allocation_);
+ if (num_properties &&
+ !SetPropertiesFromKeyValuePairs(
+ isolate_, js_object, &stack[begin_properties], num_properties)
+ .FromMaybe(false)) {
+ ThrowDeserializationExceptionIfNonePending(isolate_);
+ return MaybeHandle<Object>();
+ }
+
+ stack.resize(begin_properties);
+ new_object = js_object;
+ break;
+ }
+ case SerializationTag::kEndSparseJSArray: {
+ ConsumeTag(SerializationTag::kEndSparseJSArray);
+
+ // Sparse JS Array: Read the last 2*|num_properties| from the stack.
+ uint32_t num_properties;
+ uint32_t length;
+ if (!ReadVarint<uint32_t>().To(&num_properties) ||
+ !ReadVarint<uint32_t>().To(&length) ||
+ stack.size() / 2 < num_properties) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
+ return MaybeHandle<Object>();
+ }
+
+ Handle<JSArray> js_array = isolate_->factory()->NewJSArray(
+ 0, TERMINAL_FAST_ELEMENTS_KIND, allocation_);
+ JSArray::SetLength(js_array, length);
+ size_t begin_properties =
+ stack.size() - 2 * static_cast<size_t>(num_properties);
+ if (num_properties &&
+ !SetPropertiesFromKeyValuePairs(
+ isolate_, js_array, &stack[begin_properties], num_properties)
+ .FromMaybe(false)) {
+ ThrowDeserializationExceptionIfNonePending(isolate_);
+ return MaybeHandle<Object>();
+ }
+
+ stack.resize(begin_properties);
+ new_object = js_array;
+ break;
+ }
+ case SerializationTag::kEndDenseJSArray: {
+ // This was already broken in Chromium, and apparently wasn't missed.
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
+ return MaybeHandle<Object>();
+ }
+ default:
+ if (!ReadObject().ToHandle(&new_object)) return MaybeHandle<Object>();
+ break;
+ }
+ stack.push_back(new_object);
+ }
+
+// Nothing remains but padding.
+#ifdef DEBUG
+ while (position_ < end_) {
+ DCHECK(*position_++ == static_cast<uint8_t>(SerializationTag::kPadding));
+ }
+#endif
+ position_ = end_;
+
+ if (stack.size() != 1) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
+ return MaybeHandle<Object>();
+ }
+ return scope.CloseAndEscape(stack[0]);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
new file mode 100644
index 0000000000..b83227d9d3
--- /dev/null
+++ b/deps/v8/src/objects/value-serializer.h
@@ -0,0 +1,314 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_VALUE_SERIALIZER_H_
+#define V8_OBJECTS_VALUE_SERIALIZER_H_
+
+#include <cstdint>
+#include <vector>
+
+#include "include/v8.h"
+#include "src/base/compiler-specific.h"
+#include "src/base/macros.h"
+#include "src/execution/message-template.h"
+#include "src/handles/maybe-handles.h"
+#include "src/utils/identity-map.h"
+#include "src/utils/vector.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class BigInt;
+class HeapNumber;
+class Isolate;
+class JSArrayBuffer;
+class JSArrayBufferView;
+class JSDate;
+class JSMap;
+class JSRegExp;
+class JSSet;
+class JSValue;
+class MutableHeapNumber;
+class Object;
+class Oddball;
+class Smi;
+class WasmMemoryObject;
+class WasmModuleObject;
+
+enum class SerializationTag : uint8_t;
+
+/**
+ * Writes V8 objects in a binary format that allows the objects to be cloned
+ * according to the HTML structured clone algorithm.
+ *
+ * Format is based on Blink's previous serialization logic.
+ */
+class ValueSerializer {
+ public:
+ ValueSerializer(Isolate* isolate, v8::ValueSerializer::Delegate* delegate);
+ ~ValueSerializer();
+
+ /*
+ * Writes out a header, which includes the format version.
+ */
+ void WriteHeader();
+
+ /*
+ * Serializes a V8 object into the buffer.
+ */
+ Maybe<bool> WriteObject(Handle<Object> object) V8_WARN_UNUSED_RESULT;
+
+ /*
+ * Returns the buffer, allocated via the delegate, and its size.
+ * Caller assumes ownership of the buffer.
+ */
+ std::pair<uint8_t*, size_t> Release();
+
+ /*
+ * Marks an ArrayBuffer as havings its contents transferred out of band.
+ * Pass the corresponding JSArrayBuffer in the deserializing context to
+ * ValueDeserializer::TransferArrayBuffer.
+ */
+ void TransferArrayBuffer(uint32_t transfer_id,
+ Handle<JSArrayBuffer> array_buffer);
+
+ /*
+ * Publicly exposed wire format writing methods.
+ * These are intended for use within the delegate's WriteHostObject method.
+ */
+ void WriteUint32(uint32_t value);
+ void WriteUint64(uint64_t value);
+ void WriteRawBytes(const void* source, size_t length);
+ void WriteDouble(double value);
+
+ /*
+ * Indicate whether to treat ArrayBufferView objects as host objects,
+ * i.e. pass them to Delegate::WriteHostObject. This should not be
+ * called when no Delegate was passed.
+ *
+ * The default is not to treat ArrayBufferViews as host objects.
+ */
+ void SetTreatArrayBufferViewsAsHostObjects(bool mode);
+
+ private:
+ // Managing allocations of the internal buffer.
+ Maybe<bool> ExpandBuffer(size_t required_capacity);
+
+ // Writing the wire format.
+ void WriteTag(SerializationTag tag);
+ template <typename T>
+ void WriteVarint(T value);
+ template <typename T>
+ void WriteZigZag(T value);
+ void WriteOneByteString(Vector<const uint8_t> chars);
+ void WriteTwoByteString(Vector<const uc16> chars);
+ void WriteBigIntContents(BigInt bigint);
+ Maybe<uint8_t*> ReserveRawBytes(size_t bytes);
+
+ // Writing V8 objects of various kinds.
+ void WriteOddball(Oddball oddball);
+ void WriteSmi(Smi smi);
+ void WriteHeapNumber(HeapNumber number);
+ void WriteMutableHeapNumber(MutableHeapNumber number);
+ void WriteBigInt(BigInt bigint);
+ void WriteString(Handle<String> string);
+ Maybe<bool> WriteJSReceiver(Handle<JSReceiver> receiver)
+ V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSObject(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSObjectSlow(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSArray(Handle<JSArray> array) V8_WARN_UNUSED_RESULT;
+ void WriteJSDate(JSDate date);
+ Maybe<bool> WriteJSValue(Handle<JSValue> value) V8_WARN_UNUSED_RESULT;
+ void WriteJSRegExp(JSRegExp regexp);
+ Maybe<bool> WriteJSMap(Handle<JSMap> map) V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSSet(Handle<JSSet> map) V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSArrayBuffer(Handle<JSArrayBuffer> array_buffer)
+ V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView array_buffer);
+ Maybe<bool> WriteWasmModule(Handle<WasmModuleObject> object)
+ V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteWasmMemory(Handle<WasmMemoryObject> object)
+ V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteHostObject(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
+
+ /*
+ * Reads the specified keys from the object and writes key-value pairs to the
+ * buffer. Returns the number of keys actually written, which may be smaller
+ * if some keys are not own properties when accessed.
+ */
+ Maybe<uint32_t> WriteJSObjectPropertiesSlow(
+ Handle<JSObject> object, Handle<FixedArray> keys) V8_WARN_UNUSED_RESULT;
+
+ /*
+ * Asks the delegate to handle an error that occurred during data cloning, by
+ * throwing an exception appropriate for the host.
+ */
+ void ThrowDataCloneError(MessageTemplate template_index);
+ V8_NOINLINE void ThrowDataCloneError(MessageTemplate template_index,
+ Handle<Object> arg0);
+
+ Maybe<bool> ThrowIfOutOfMemory();
+
+ Isolate* const isolate_;
+ v8::ValueSerializer::Delegate* const delegate_;
+ uint8_t* buffer_ = nullptr;
+ size_t buffer_size_ = 0;
+ size_t buffer_capacity_ = 0;
+ bool treat_array_buffer_views_as_host_objects_ = false;
+ bool out_of_memory_ = false;
+ Zone zone_;
+
+ // To avoid extra lookups in the identity map, ID+1 is actually stored in the
+ // map (checking if the used identity is zero is the fast way of checking if
+ // the entry is new).
+ IdentityMap<uint32_t, ZoneAllocationPolicy> id_map_;
+ uint32_t next_id_ = 0;
+
+ // A similar map, for transferred array buffers.
+ IdentityMap<uint32_t, ZoneAllocationPolicy> array_buffer_transfer_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ValueSerializer);
+};
+
+/*
+ * Deserializes values from data written with ValueSerializer, or a compatible
+ * implementation.
+ */
+class ValueDeserializer {
+ public:
+ ValueDeserializer(Isolate* isolate, Vector<const uint8_t> data,
+ v8::ValueDeserializer::Delegate* delegate);
+ ~ValueDeserializer();
+
+ /*
+ * Runs version detection logic, which may fail if the format is invalid.
+ */
+ Maybe<bool> ReadHeader() V8_WARN_UNUSED_RESULT;
+
+ /*
+ * Reads the underlying wire format version. Likely mostly to be useful to
+ * legacy code reading old wire format versions. Must be called after
+ * ReadHeader.
+ */
+ uint32_t GetWireFormatVersion() const { return version_; }
+
+ /*
+ * Deserializes a V8 object from the buffer.
+ */
+ MaybeHandle<Object> ReadObject() V8_WARN_UNUSED_RESULT;
+
+ /*
+ * Reads an object, consuming the entire buffer.
+ *
+ * This is required for the legacy "version 0" format, which did not allow
+ * reference deduplication, and instead relied on a "stack" model for
+ * deserializing, with the contents of objects and arrays provided first.
+ */
+ MaybeHandle<Object> ReadObjectUsingEntireBufferForLegacyFormat()
+ V8_WARN_UNUSED_RESULT;
+
+ /*
+ * Accepts the array buffer corresponding to the one passed previously to
+ * ValueSerializer::TransferArrayBuffer.
+ */
+ void TransferArrayBuffer(uint32_t transfer_id,
+ Handle<JSArrayBuffer> array_buffer);
+
+ /*
+ * Publicly exposed wire format writing methods.
+ * These are intended for use within the delegate's WriteHostObject method.
+ */
+ bool ReadUint32(uint32_t* value) V8_WARN_UNUSED_RESULT;
+ bool ReadUint64(uint64_t* value) V8_WARN_UNUSED_RESULT;
+ bool ReadDouble(double* value) V8_WARN_UNUSED_RESULT;
+ bool ReadRawBytes(size_t length, const void** data) V8_WARN_UNUSED_RESULT;
+ void set_expect_inline_wasm(bool expect_inline_wasm) {
+ expect_inline_wasm_ = expect_inline_wasm;
+ }
+
+ private:
+ // Reading the wire format.
+ Maybe<SerializationTag> PeekTag() const V8_WARN_UNUSED_RESULT;
+ void ConsumeTag(SerializationTag peeked_tag);
+ Maybe<SerializationTag> ReadTag() V8_WARN_UNUSED_RESULT;
+ template <typename T>
+ Maybe<T> ReadVarint() V8_WARN_UNUSED_RESULT;
+ template <typename T>
+ Maybe<T> ReadZigZag() V8_WARN_UNUSED_RESULT;
+ Maybe<double> ReadDouble() V8_WARN_UNUSED_RESULT;
+ Maybe<Vector<const uint8_t>> ReadRawBytes(int size) V8_WARN_UNUSED_RESULT;
+ bool expect_inline_wasm() const { return expect_inline_wasm_; }
+
+ // Reads a string if it matches the one provided.
+ // Returns true if this was the case. Otherwise, nothing is consumed.
+ bool ReadExpectedString(Handle<String> expected) V8_WARN_UNUSED_RESULT;
+
+ // Like ReadObject, but skips logic for special cases in simulating the
+ // "stack machine".
+ MaybeHandle<Object> ReadObjectInternal() V8_WARN_UNUSED_RESULT;
+
+ // Reads a string intended to be part of a more complicated object.
+ // Before v12, these are UTF-8 strings. After, they can be any encoding
+ // permissible for a string (with the relevant tag).
+ MaybeHandle<String> ReadString() V8_WARN_UNUSED_RESULT;
+
+ // Reading V8 objects of specific kinds.
+ // The tag is assumed to have already been read.
+ MaybeHandle<BigInt> ReadBigInt() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadUtf8String() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadOneByteString() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadTwoByteString() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadJSObject() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSArray> ReadSparseJSArray() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSArray> ReadDenseJSArray() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSDate> ReadJSDate() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSValue> ReadJSValue(SerializationTag tag) V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSRegExp> ReadJSRegExp() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSMap> ReadJSMap() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSSet> ReadJSSet() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSArrayBuffer> ReadJSArrayBuffer(bool is_shared)
+ V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSArrayBuffer> ReadTransferredJSArrayBuffer()
+ V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
+ Handle<JSArrayBuffer> buffer) V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadWasmModule() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadWasmModuleTransfer() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<WasmMemoryObject> ReadWasmMemory() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadHostObject() V8_WARN_UNUSED_RESULT;
+
+ /*
+ * Reads key-value pairs into the object until the specified end tag is
+ * encountered. If successful, returns the number of properties read.
+ */
+ Maybe<uint32_t> ReadJSObjectProperties(Handle<JSObject> object,
+ SerializationTag end_tag,
+ bool can_use_transitions);
+
+ // Manipulating the map from IDs to reified objects.
+ bool HasObjectWithID(uint32_t id);
+ MaybeHandle<JSReceiver> GetObjectWithID(uint32_t id);
+ void AddObjectWithID(uint32_t id, Handle<JSReceiver> object);
+
+ Isolate* const isolate_;
+ v8::ValueDeserializer::Delegate* const delegate_;
+ const uint8_t* position_;
+ const uint8_t* const end_;
+ AllocationType allocation_;
+ uint32_t version_ = 0;
+ uint32_t next_id_ = 0;
+ bool expect_inline_wasm_ = false;
+
+ // Always global handles.
+ Handle<FixedArray> id_map_;
+ MaybeHandle<SimpleNumberDictionary> array_buffer_transfer_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ValueDeserializer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_VALUE_SERIALIZER_H_
diff --git a/deps/v8/src/objects/visitors.cc b/deps/v8/src/objects/visitors.cc
new file mode 100644
index 0000000000..7621ce1b9f
--- /dev/null
+++ b/deps/v8/src/objects/visitors.cc
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/visitors.h"
+
+#include "src/codegen/reloc-info.h"
+
+namespace v8 {
+namespace internal {
+
+const char* RootVisitor::RootName(Root root) {
+ switch (root) {
+#define ROOT_CASE(root_id, description) \
+ case Root::root_id: \
+ return description;
+ ROOT_ID_LIST(ROOT_CASE)
+#undef ROOT_CASE
+ case Root::kNumberOfRoots:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+void ObjectVisitor::VisitRelocInfo(RelocIterator* it) {
+ for (; !it->done(); it->next()) {
+ it->rinfo()->Visit(this);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/visitors.h b/deps/v8/src/objects/visitors.h
new file mode 100644
index 0000000000..d36723b440
--- /dev/null
+++ b/deps/v8/src/objects/visitors.h
@@ -0,0 +1,157 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_VISITORS_H_
+#define V8_OBJECTS_VISITORS_H_
+
+#include "src/common/globals.h"
+#include "src/objects/code.h"
+#include "src/objects/compressed-slots.h"
+#include "src/objects/foreign.h"
+#include "src/objects/slots.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeDataContainer;
+
+#define ROOT_ID_LIST(V) \
+ V(kStringTable, "(Internalized strings)") \
+ V(kExternalStringsTable, "(External strings)") \
+ V(kReadOnlyRootList, "(Read-only roots)") \
+ V(kStrongRootList, "(Strong roots)") \
+ V(kSmiRootList, "(Smi roots)") \
+ V(kBootstrapper, "(Bootstrapper)") \
+ V(kTop, "(Isolate)") \
+ V(kRelocatable, "(Relocatable)") \
+ V(kDebug, "(Debugger)") \
+ V(kCompilationCache, "(Compilation cache)") \
+ V(kHandleScope, "(Handle scope)") \
+ V(kDispatchTable, "(Dispatch table)") \
+ V(kBuiltins, "(Builtins)") \
+ V(kGlobalHandles, "(Global handles)") \
+ V(kEternalHandles, "(Eternal handles)") \
+ V(kThreadManager, "(Thread manager)") \
+ V(kStrongRoots, "(Strong roots)") \
+ V(kExtensions, "(Extensions)") \
+ V(kCodeFlusher, "(Code flusher)") \
+ V(kPartialSnapshotCache, "(Partial snapshot cache)") \
+ V(kReadOnlyObjectCache, "(Read-only object cache)") \
+ V(kWeakCollections, "(Weak collections)") \
+ V(kWrapperTracing, "(Wrapper tracing)") \
+ V(kUnknown, "(Unknown)")
+
+class VisitorSynchronization : public AllStatic {
+ public:
+#define DECLARE_ENUM(enum_item, ignore) enum_item,
+ enum SyncTag { ROOT_ID_LIST(DECLARE_ENUM) kNumberOfSyncTags };
+#undef DECLARE_ENUM
+};
+
+enum class Root {
+#define DECLARE_ENUM(enum_item, ignore) enum_item,
+ ROOT_ID_LIST(DECLARE_ENUM)
+#undef DECLARE_ENUM
+ kNumberOfRoots
+};
+
+// Abstract base class for visiting, and optionally modifying, the
+// pointers contained in roots. Used in GC and serialization/deserialization.
+class RootVisitor {
+ public:
+ virtual ~RootVisitor() = default;
+
+ // Visits a contiguous arrays of pointers in the half-open range
+ // [start, end). Any or all of the values may be modified on return.
+ virtual void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) = 0;
+
+ // Handy shorthand for visiting a single pointer.
+ virtual void VisitRootPointer(Root root, const char* description,
+ FullObjectSlot p) {
+ VisitRootPointers(root, description, p, p + 1);
+ }
+
+ // Intended for serialization/deserialization checking: insert, or
+ // check for the presence of, a tag at this position in the stream.
+ // Also used for marking up GC roots in heap snapshots.
+ // TODO(ulan): Remove this.
+ virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
+
+ static const char* RootName(Root root);
+};
+
+class RelocIterator;
+
+// Abstract base class for visiting, and optionally modifying, the
+// pointers contained in Objects. Used in GC and serialization/deserialization.
+class ObjectVisitor {
+ public:
+ virtual ~ObjectVisitor() = default;
+
+ // Visits a contiguous arrays of pointers in the half-open range
+ // [start, end). Any or all of the values may be modified on return.
+ virtual void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) = 0;
+ virtual void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) = 0;
+
+ // Custom weak pointers must be ignored by the GC but not other
+ // visitors. They're used for e.g., lists that are recreated after GC. The
+ // default implementation treats them as strong pointers. Visitors who want to
+ // ignore them must override this function with empty.
+ virtual void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) {
+ VisitPointers(host, start, end);
+ }
+
+ // Handy shorthand for visiting a single pointer.
+ virtual void VisitPointer(HeapObject host, ObjectSlot p) {
+ VisitPointers(host, p, p + 1);
+ }
+ virtual void VisitPointer(HeapObject host, MaybeObjectSlot p) {
+ VisitPointers(host, p, p + 1);
+ }
+ virtual void VisitCustomWeakPointer(HeapObject host, ObjectSlot p) {
+ VisitCustomWeakPointers(host, p, p + 1);
+ }
+
+ virtual void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
+ ObjectSlot value) {
+ VisitPointer(host, key);
+ VisitPointer(host, value);
+ }
+
+ // To allow lazy clearing of inline caches the visitor has
+ // a rich interface for iterating over Code objects ...
+
+ // Visits a code target in the instruction stream.
+ virtual void VisitCodeTarget(Code host, RelocInfo* rinfo) = 0;
+
+ // Visit pointer embedded into a code object.
+ virtual void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) = 0;
+
+ // Visits a runtime entry in the instruction stream.
+ virtual void VisitRuntimeEntry(Code host, RelocInfo* rinfo) {}
+
+ // Visits an external reference embedded into a code object.
+ virtual void VisitExternalReference(Code host, RelocInfo* rinfo) {}
+
+ // Visits an external reference.
+ virtual void VisitExternalReference(Foreign host, Address* p) {}
+
+ // Visits an (encoded) internal reference.
+ virtual void VisitInternalReference(Code host, RelocInfo* rinfo) {}
+
+ // Visits an off-heap target in the instruction stream.
+ virtual void VisitOffHeapTarget(Code host, RelocInfo* rinfo) {}
+
+ // Visits the relocation info using the given iterator.
+ virtual void VisitRelocInfo(RelocIterator* it);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_VISITORS_H_