summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2018-05-31 11:11:57 +0200
committerMyles Borins <mylesborins@google.com>2018-06-01 09:58:27 +0200
commit352a525eb984b8fa2d6f0f6fd68395e6a080bba4 (patch)
treea105ae93f8fd8f533cce19a429f1b6e95d6e11ca /deps/v8/src/heap
parentfaf449ca0490f5371dc6cbbc94a87eb697b00fcc (diff)
downloadandroid-node-v8-352a525eb984b8fa2d6f0f6fd68395e6a080bba4.tar.gz
android-node-v8-352a525eb984b8fa2d6f0f6fd68395e6a080bba4.tar.bz2
android-node-v8-352a525eb984b8fa2d6f0f6fd68395e6a080bba4.zip
deps: update V8 to 6.7.288.43
PR-URL: https://github.com/nodejs/node/pull/19989 Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Matheus Marchini <matheus@sthima.com> Reviewed-By: Gus Caplan <me@gus.host> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc8
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h12
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc35
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h12
-rw-r--r--deps/v8/src/heap/code-stats.cc8
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc199
-rw-r--r--deps/v8/src/heap/factory-inl.h156
-rw-r--r--deps/v8/src/heap/factory.cc3930
-rw-r--r--deps/v8/src/heap/factory.h1016
-rw-r--r--deps/v8/src/heap/gc-tracer.cc58
-rw-r--r--deps/v8/src/heap/gc-tracer.h14
-rw-r--r--deps/v8/src/heap/heap-inl.h235
-rw-r--r--deps/v8/src/heap/heap.cc1949
-rw-r--r--deps/v8/src/heap/heap.h556
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h18
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc4
-rw-r--r--deps/v8/src/heap/incremental-marking.cc66
-rw-r--r--deps/v8/src/heap/incremental-marking.h10
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h2
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc12
-rw-r--r--deps/v8/src/heap/item-parallel-job.h4
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h74
-rw-r--r--deps/v8/src/heap/mark-compact.cc2504
-rw-r--r--deps/v8/src/heap/mark-compact.h184
-rw-r--r--deps/v8/src/heap/marking.cc10
-rw-r--r--deps/v8/src/heap/marking.h2
-rw-r--r--deps/v8/src/heap/memory-reducer.cc2
-rw-r--r--deps/v8/src/heap/object-stats.cc77
-rw-r--r--deps/v8/src/heap/object-stats.h3
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h39
-rw-r--r--deps/v8/src/heap/objects-visiting.h8
-rw-r--r--deps/v8/src/heap/remembered-set.h19
-rw-r--r--deps/v8/src/heap/scavenge-job.cc2
-rw-r--r--deps/v8/src/heap/scavenger-inl.h88
-rw-r--r--deps/v8/src/heap/scavenger.cc77
-rw-r--r--deps/v8/src/heap/scavenger.h16
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc250
-rw-r--r--deps/v8/src/heap/spaces-inl.h6
-rw-r--r--deps/v8/src/heap/spaces.cc115
-rw-r--r--deps/v8/src/heap/spaces.h75
-rw-r--r--deps/v8/src/heap/store-buffer.cc10
-rw-r--r--deps/v8/src/heap/sweeper.cc77
-rw-r--r--deps/v8/src/heap/sweeper.h20
43 files changed, 8288 insertions, 3674 deletions
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index cf0297bb2a..412e4ad05a 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -4,6 +4,7 @@
#include "src/heap/array-buffer-collector.h"
+#include "src/base/template-utils.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/heap-inl.h"
@@ -47,10 +48,9 @@ class ArrayBufferCollector::FreeingTask final : public CancelableTask {
void ArrayBufferCollector::FreeAllocationsOnBackgroundThread() {
heap_->account_external_memory_concurrently_freed();
- if (heap_->use_tasks() && FLAG_concurrent_array_buffer_freeing) {
- FreeingTask* task = new FreeingTask(heap_);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ if (!heap_->IsTearingDown() && FLAG_concurrent_array_buffer_freeing) {
+ V8::GetCurrentPlatform()->CallOnWorkerThread(
+ base::make_unique<FreeingTask>(heap_));
} else {
// Fallback for when concurrency is disabled/restricted.
FreeAllocations();
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 568f149b04..8ed4a66664 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -51,12 +51,15 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t new_retained_size = 0;
+ Isolate* isolate = heap_->isolate();
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
- JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(*it);
- const size_t length = buffer->allocation_length();
+ JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
+ const size_t length = it->second;
if (should_free(buffer)) {
- buffer->FreeBackingStore();
+ JSArrayBuffer::FreeBackingStore(
+ isolate, {buffer->backing_store(), length, buffer->backing_store(),
+ buffer->allocation_mode(), buffer->is_wasm_memory()});
it = array_buffers_.erase(it);
} else {
new_retained_size += length;
@@ -87,7 +90,7 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) {
DCHECK_GE(retained_size_ + length, retained_size_);
retained_size_ += length;
- auto ret = array_buffers_.insert(buffer);
+ auto ret = array_buffers_.insert({buffer, length});
USE(ret);
// Check that we indeed inserted a new value and did not overwrite an existing
// one (which would be a bug).
@@ -100,6 +103,7 @@ void LocalArrayBufferTracker::Remove(JSArrayBuffer* buffer, size_t length) {
TrackingData::iterator it = array_buffers_.find(buffer);
// Check that we indeed find a key to remove.
DCHECK(it != array_buffers_.end());
+ DCHECK_EQ(length, it->second);
array_buffers_.erase(it);
}
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 5acf9b9bfb..589756fdc3 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -29,7 +29,7 @@ void LocalArrayBufferTracker::Process(Callback callback) {
size_t moved_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
- old_buffer = reinterpret_cast<JSArrayBuffer*>(*it);
+ old_buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
new_retained_size += NumberToSize(old_buffer->byte_length());
@@ -51,13 +51,12 @@ void LocalArrayBufferTracker::Process(Callback callback) {
}
it = array_buffers_.erase(it);
} else if (result == kRemoveEntry) {
- // Size of freed memory is computed to avoid looking at dead objects.
- void* allocation_base = old_buffer->allocation_base();
- DCHECK_NOT_NULL(allocation_base);
-
- backing_stores_to_free->emplace_back(allocation_base,
- old_buffer->allocation_length(),
- old_buffer->allocation_mode());
+ // We pass backing_store() and stored length to the collector for freeing
+ // the backing store. Wasm allocations will go through their own tracker
+ // based on the backing store.
+ backing_stores_to_free->emplace_back(
+ old_buffer->backing_store(), it->second, old_buffer->backing_store(),
+ old_buffer->allocation_mode(), old_buffer->is_wasm_memory());
it = array_buffers_.erase(it);
} else {
UNREACHABLE();
@@ -135,5 +134,25 @@ bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
}
}
+void ArrayBufferTracker::TearDown(Heap* heap) {
+ // ArrayBuffers can only be found in NEW_SPACE and OLD_SPACE.
+ for (Page* p : *heap->old_space()) {
+ FreeAll(p);
+ }
+ NewSpace* new_space = heap->new_space();
+ if (new_space->to_space().is_committed()) {
+ for (Page* p : new_space->to_space()) {
+ FreeAll(p);
+ }
+ }
+#ifdef DEBUG
+ if (new_space->from_space().is_committed()) {
+ for (Page* p : new_space->from_space()) {
+ DCHECK(!p->contains_array_buffers());
+ }
+ }
+#endif // DEBUG
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 7bfc1b83f6..c9c1a5b645 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_ARRAY_BUFFER_TRACKER_H_
#define V8_HEAP_ARRAY_BUFFER_TRACKER_H_
-#include <unordered_set>
+#include <unordered_map>
#include "src/allocation.h"
#include "src/base/platform/mutex.h"
@@ -57,6 +57,9 @@ class ArrayBufferTracker : public AllStatic {
// Returns whether a buffer is currently tracked.
static bool IsTracked(JSArrayBuffer* buffer);
+
+ // Tears down the tracker and frees up all registered array buffers.
+ static void TearDown(Heap* heap);
};
// LocalArrayBufferTracker tracks internalized array buffers.
@@ -108,7 +111,12 @@ class LocalArrayBufferTracker {
}
};
- typedef std::unordered_set<JSArrayBuffer*, Hasher> TrackingData;
+ // Keep track of the backing store and the corresponding length at time of
+ // registering. The length is accessed from JavaScript and can be a
+ // HeapNumber. The reason for tracking the length is that in the case of
+ // length being a HeapNumber, the buffer and its length may be stored on
+ // different memory pages, making it impossible to guarantee order of freeing.
+ typedef std::unordered_map<JSArrayBuffer*, size_t, Hasher> TrackingData;
Heap* heap_;
// The set contains raw heap pointers which are removed by the GC upon
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index e404101753..f2c1985296 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -199,7 +199,7 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject* obj,
Code* code = Code::cast(obj);
RelocIterator it(code);
int delta = 0;
- const byte* prev_pc = code->instruction_start();
+ const byte* prev_pc = code->raw_instruction_start();
while (!it.done()) {
if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
@@ -209,9 +209,9 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject* obj,
it.next();
}
- DCHECK(code->instruction_start() <= prev_pc &&
- prev_pc <= code->instruction_end());
- delta += static_cast<int>(code->instruction_end() - prev_pc);
+ DCHECK(code->raw_instruction_start() <= prev_pc &&
+ prev_pc <= code->raw_instruction_end());
+ delta += static_cast<int>(code->raw_instruction_end() - prev_pc);
EnterComment(isolate, "NoComment", delta);
}
#endif
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 3aafd191cc..ca2afb8cdf 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -7,6 +7,8 @@
#include <stack>
#include <unordered_map>
+#include "include/v8config.h"
+#include "src/base/template-utils.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
@@ -90,12 +92,61 @@ class ConcurrentMarkingVisitor final
return marking_state_.GreyToBlack(object);
}
+ bool AllowDefaultJSObjectVisit() { return false; }
+
+ void ProcessStrongHeapObject(HeapObject* host, Object** slot,
+ HeapObject* heap_object) {
+ MarkObject(heap_object);
+ MarkCompactCollector::RecordSlot(host, slot, heap_object);
+ }
+
+ void ProcessWeakHeapObject(HeapObject* host, HeapObjectReference** slot,
+ HeapObject* heap_object) {
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race
+ // in mark-bit initialization. See MemoryChunk::Initialize for the
+ // corresponding release store.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
+ CHECK_NOT_NULL(chunk->synchronized_heap());
+#endif
+ if (marking_state_.IsBlackOrGrey(heap_object)) {
+ // Weak references with live values are directly processed here to
+ // reduce the processing time of weak cells during the main GC
+ // pause.
+ MarkCompactCollector::RecordSlot(host, slot, heap_object);
+ } else {
+ // If we do not know about liveness of the value, we have to process
+ // the reference when we know the liveness of the whole transitive
+ // closure.
+ weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
+ }
+ }
+
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
for (Object** slot = start; slot < end; slot++) {
Object* object = base::AsAtomicPointer::Relaxed_Load(slot);
- if (!object->IsHeapObject()) continue;
- MarkObject(HeapObject::cast(object));
- MarkCompactCollector::RecordSlot(host, slot, object);
+ DCHECK(!HasWeakHeapObjectTag(object));
+ if (object->IsHeapObject()) {
+ ProcessStrongHeapObject(host, slot, HeapObject::cast(object));
+ }
+ }
+ }
+
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ for (MaybeObject** slot = start; slot < end; slot++) {
+ MaybeObject* object = base::AsAtomicPointer::Relaxed_Load(slot);
+ HeapObject* heap_object;
+ if (object->ToStrongHeapObject(&heap_object)) {
+ // If the reference changes concurrently from strong to weak, the write
+ // barrier will treat the weak reference as strong, so we won't miss the
+ // weak reference.
+ ProcessStrongHeapObject(host, reinterpret_cast<Object**>(slot),
+ heap_object);
+ } else if (object->ToWeakHeapObject(&heap_object)) {
+ ProcessWeakHeapObject(
+ host, reinterpret_cast<HeapObjectReference**>(slot), heap_object);
+ }
}
}
@@ -103,6 +154,7 @@ class ConcurrentMarkingVisitor final
for (int i = 0; i < snapshot.number_of_slots(); i++) {
Object** slot = snapshot.slot(i);
Object* object = snapshot.value(i);
+ DCHECK(!HasWeakHeapObjectTag(object));
if (!object->IsHeapObject()) continue;
MarkObject(HeapObject::cast(object));
MarkCompactCollector::RecordSlot(host, slot, object);
@@ -114,18 +166,19 @@ class ConcurrentMarkingVisitor final
// ===========================================================================
int VisitJSObject(Map* map, JSObject* object) {
- int size = JSObject::BodyDescriptor::SizeOf(map, object);
- int used_size = map->UsedInstanceSize();
- DCHECK_LE(used_size, size);
- DCHECK_GE(used_size, JSObject::kHeaderSize);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ return VisitJSObjectSubclass(map, object);
}
int VisitJSObjectFast(Map* map, JSObject* object) {
- return VisitJSObject(map, object);
+ return VisitJSObjectSubclass(map, object);
+ }
+
+ int VisitJSArrayBuffer(Map* map, JSArrayBuffer* object) {
+ return VisitJSObjectSubclass(map, object);
+ }
+
+ int VisitWasmInstanceObject(Map* map, WasmInstanceObject* object) {
+ return VisitJSObjectSubclass(map, object);
}
int VisitJSApiObject(Map* map, JSObject* object) {
@@ -136,6 +189,17 @@ class ConcurrentMarkingVisitor final
return 0;
}
+ int VisitJSFunction(Map* map, JSFunction* object) {
+ int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
+ int used_size = map->UsedInstanceSize();
+ DCHECK_LE(used_size, size);
+ DCHECK_GE(used_size, JSObject::kHeaderSize);
+ const SlotSnapshot& snapshot = MakeSlotSnapshotWeak(map, object, used_size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
// ===========================================================================
// Strings with pointers =====================================================
// ===========================================================================
@@ -187,17 +251,11 @@ class ConcurrentMarkingVisitor final
// ===========================================================================
int VisitFixedArray(Map* map, FixedArray* object) {
- // The synchronized_length() function checks that the length is a Smi.
- // This is not necessarily the case if the array is being left-trimmed.
- Object* length = object->unchecked_synchronized_length();
- if (!ShouldVisit(object)) return 0;
- // The cached length must be the actual length as the array is not black.
- // Left trimming marks the array black before over-writing the length.
- DCHECK(length->IsSmi());
- int size = FixedArray::SizeFor(Smi::ToInt(length));
- VisitMapPointer(object, object->map_slot());
- FixedArray::BodyDescriptor::IterateBody(object, size, this);
- return size;
+ return VisitLeftTrimmableArray(map, object);
+ }
+
+ int VisitFixedDoubleArray(Map* map, FixedDoubleArray* object) {
+ return VisitLeftTrimmableArray(map, object);
}
// ===========================================================================
@@ -217,7 +275,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
- BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this);
+ BytecodeArray::BodyDescriptorWeak::IterateBody(map, object, size, this);
object->MakeOlder();
return size;
}
@@ -226,7 +284,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(object)) return 0;
int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
- AllocationSite::BodyDescriptorWeak::IterateBody(object, size, this);
+ AllocationSite::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -234,15 +292,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(object)) return 0;
int size = CodeDataContainer::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
- CodeDataContainer::BodyDescriptorWeak::IterateBody(object, size, this);
- return size;
- }
-
- int VisitJSFunction(Map* map, JSFunction* object) {
- if (!ShouldVisit(object)) return 0;
- int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- JSFunction::BodyDescriptorWeak::IterateBody(object, size, this);
+ CodeDataContainer::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -255,7 +305,7 @@ class ConcurrentMarkingVisitor final
VisitPointer(map, HeapObject::RawField(map, Map::kPrototypeOffset));
VisitPointer(
map, HeapObject::RawField(map, Map::kConstructorOrBackPointerOffset));
- VisitPointer(map, HeapObject::RawField(
+ VisitPointer(map, HeapObject::RawMaybeWeakField(
map, Map::kTransitionsOrPrototypeInfoOffset));
VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
VisitPointer(map, HeapObject::RawField(map, Map::kWeakCellCacheOffset));
@@ -268,7 +318,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(object)) return 0;
int size = Context::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
- Context::BodyDescriptorWeak::IterateBody(object, size, this);
+ Context::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -276,7 +326,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array, array->map_slot());
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
- TransitionArray::BodyDescriptor::IterateBody(array, size, this);
+ TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
weak_objects_->transition_arrays.Push(task_id_, array);
return size;
}
@@ -338,18 +388,59 @@ class ConcurrentMarkingVisitor final
}
}
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ // This should never happen, because we don't use snapshotting for objects
+ // which contain weak references.
+ UNREACHABLE();
+ }
+
private:
SlotSnapshot* slot_snapshot_;
};
template <typename T>
+ int VisitJSObjectSubclass(Map* map, T* object) {
+ int size = T::BodyDescriptor::SizeOf(map, object);
+ int used_size = map->UsedInstanceSize();
+ DCHECK_LE(used_size, size);
+ DCHECK_GE(used_size, T::kHeaderSize);
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
+ template <typename T>
+ int VisitLeftTrimmableArray(Map* map, T* object) {
+ // The synchronized_length() function checks that the length is a Smi.
+ // This is not necessarily the case if the array is being left-trimmed.
+ Object* length = object->unchecked_synchronized_length();
+ if (!ShouldVisit(object)) return 0;
+ // The cached length must be the actual length as the array is not black.
+ // Left trimming marks the array black before over-writing the length.
+ DCHECK(length->IsSmi());
+ int size = T::SizeFor(Smi::ToInt(length));
+ VisitMapPointer(object, object->map_slot());
+ T::BodyDescriptor::IterateBody(map, object, size, this);
+ return size;
+ }
+
+ template <typename T>
const SlotSnapshot& MakeSlotSnapshot(Map* map, T* object, int size) {
- // TODO(ulan): Iterate only the existing fields and skip slack at the end
- // of the object.
SlotSnapshottingVisitor visitor(&slot_snapshot_);
visitor.VisitPointer(object,
reinterpret_cast<Object**>(object->map_slot()));
- T::BodyDescriptor::IterateBody(object, size, &visitor);
+ T::BodyDescriptor::IterateBody(map, object, size, &visitor);
+ return slot_snapshot_;
+ }
+
+ template <typename T>
+ const SlotSnapshot& MakeSlotSnapshotWeak(Map* map, T* object, int size) {
+ SlotSnapshottingVisitor visitor(&slot_snapshot_);
+ visitor.VisitPointer(object,
+ reinterpret_cast<Object**>(object->map_slot()));
+ T::BodyDescriptorWeak::IterateBody(map, object, size, &visitor);
return slot_snapshot_;
}
ConcurrentMarking::MarkingWorklist::View shared_;
@@ -484,6 +575,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
+ weak_objects_->weak_references.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_.Increment(marked_bytes);
{
@@ -501,15 +593,24 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
void ConcurrentMarking::ScheduleTasks() {
- DCHECK(heap_->use_tasks());
+ DCHECK(!heap_->IsTearingDown());
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
DCHECK_EQ(0, pending_task_count_);
if (task_count_ == 0) {
- task_count_ = Max(
- 1, Min(kMaxTasks,
- static_cast<int>(V8::GetCurrentPlatform()
- ->NumberOfAvailableBackgroundThreads())));
+ static const int num_cores =
+ V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
+#if defined(V8_OS_MACOSX)
+ // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
+ // marking on competing hyper-threads (regresses Octane/Splay). As such,
+ // only use num_cores/2, leaving one of those for the main thread.
+ // TODO(ulan): Use all cores on Mac 10.12+.
+ task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
+#else // defined(OS_MACOSX)
+ // On other platforms use all logical cores, leaving one for the main
+ // thread.
+ task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
+#endif // defined(OS_MACOSX)
}
// Task id 0 is for the main thread.
for (int i = 1; i <= task_count_; i++) {
@@ -521,17 +622,17 @@ void ConcurrentMarking::ScheduleTasks() {
task_state_[i].preemption_request.SetValue(false);
is_pending_[i] = true;
++pending_task_count_;
- Task* task = new Task(heap_->isolate(), this, &task_state_[i], i);
+ auto task =
+ base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
cancelable_id_[i] = task->id();
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
DCHECK_EQ(task_count_, pending_task_count_);
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
- if (!FLAG_concurrent_marking || !heap_->use_tasks()) return;
+ if (!FLAG_concurrent_marking || heap_->IsTearingDown()) return;
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ > 0) return;
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
new file mode 100644
index 0000000000..21ee6dc251
--- /dev/null
+++ b/deps/v8/src/heap/factory-inl.h
@@ -0,0 +1,156 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_FACTORY_INL_H_
+#define V8_HEAP_FACTORY_INL_H_
+
+#include "src/heap/factory.h"
+
+#include "src/handles-inl.h"
+#include "src/objects-inl.h"
+#include "src/string-hasher.h"
+
+namespace v8 {
+namespace internal {
+
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ Handle<type> Factory::name() { \
+ return Handle<type>(bit_cast<type**>( \
+ &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
+ }
+ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ Handle<Map> Factory::name##_map() { \
+ return Handle<Map>(bit_cast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
+ }
+STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
+ Handle<Map> Factory::name##_map() { \
+ return Handle<Map>(bit_cast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##Size##MapRootIndex])); \
+ }
+DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
+#undef DATA_HANDLER_MAP_ACCESSOR
+
+#define STRING_ACCESSOR(name, str) \
+ Handle<String> Factory::name() { \
+ return Handle<String>(bit_cast<String**>( \
+ &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
+ }
+INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name) \
+ Handle<Symbol> Factory::name() { \
+ return Handle<Symbol>(bit_cast<Symbol**>( \
+ &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
+ }
+PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, description) \
+ Handle<Symbol> Factory::name() { \
+ return Handle<Symbol>(bit_cast<Symbol**>( \
+ &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
+ }
+PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
+ Handle<AccessorInfo> Factory::accessor_name##_accessor() { \
+ return Handle<AccessorInfo>(bit_cast<AccessorInfo**>( \
+ &isolate() \
+ ->heap() \
+ ->roots_[Heap::k##AccessorName##AccessorRootIndex])); \
+ }
+ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
+#undef ACCESSOR_INFO_ACCESSOR
+
+Handle<String> Factory::InternalizeString(Handle<String> string) {
+ if (string->IsInternalizedString()) return string;
+ return StringTable::LookupString(isolate(), string);
+}
+
+Handle<Name> Factory::InternalizeName(Handle<Name> name) {
+ if (name->IsUniqueName()) return name;
+ return StringTable::LookupString(isolate(), Handle<String>::cast(name));
+}
+
+Handle<String> Factory::NewSubString(Handle<String> str, int begin, int end) {
+ if (begin == 0 && end == str->length()) return str;
+ return NewProperSubString(str, begin, end);
+}
+
+Handle<Object> Factory::NewNumberFromSize(size_t value,
+ PretenureFlag pretenure) {
+ // We can't use Smi::IsValid() here because that operates on a signed
+ // intptr_t, and casting from size_t could create a bogus sign bit.
+ if (value <= static_cast<size_t>(Smi::kMaxValue)) {
+ return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
+ isolate());
+ }
+ return NewNumber(static_cast<double>(value), pretenure);
+}
+
+Handle<Object> Factory::NewNumberFromInt64(int64_t value,
+ PretenureFlag pretenure) {
+ if (value <= std::numeric_limits<int32_t>::max() &&
+ value >= std::numeric_limits<int32_t>::min() &&
+ Smi::IsValid(static_cast<int32_t>(value))) {
+ return Handle<Object>(Smi::FromInt(static_cast<int32_t>(value)), isolate());
+ }
+ return NewNumber(static_cast<double>(value), pretenure);
+}
+
+Handle<HeapNumber> Factory::NewHeapNumber(double value, MutableMode mode,
+ PretenureFlag pretenure) {
+ Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
+ heap_number->set_value(value);
+ return heap_number;
+}
+
+Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits,
+ MutableMode mode,
+ PretenureFlag pretenure) {
+ Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
+ heap_number->set_value_as_bits(bits);
+ return heap_number;
+}
+
+Handle<HeapNumber> Factory::NewMutableHeapNumber(PretenureFlag pretenure) {
+ return NewHeapNumberFromBits(kHoleNanInt64, MUTABLE, pretenure);
+}
+
+Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
+ ElementsKind elements_kind,
+ PretenureFlag pretenure) {
+ return NewJSArrayWithElements(elements, elements_kind, elements->length(),
+ pretenure);
+}
+
+Handle<Object> Factory::NewURIError() {
+ return NewError(isolate()->uri_error_function(),
+ MessageTemplate::kURIMalformed);
+}
+
+Handle<String> Factory::Uint32ToString(uint32_t value) {
+ Handle<String> result = NumberToString(NewNumberFromUint(value));
+
+ if (result->length() <= String::kMaxArrayIndexSize) {
+ uint32_t field = StringHasher::MakeArrayIndexHash(value, result->length());
+ result->set_hash_field(field);
+ }
+ return result;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_FACTORY_INL_H_
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
new file mode 100644
index 0000000000..aecbc880e1
--- /dev/null
+++ b/deps/v8/src/heap/factory.cc
@@ -0,0 +1,3930 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/factory.h"
+
+#include "src/accessors.h"
+#include "src/allocation-site-scopes.h"
+#include "src/ast/ast-source-ranges.h"
+#include "src/ast/ast.h"
+#include "src/base/bits.h"
+#include "src/bootstrapper.h"
+#include "src/compiler.h"
+#include "src/conversions.h"
+#include "src/interpreter/interpreter.h"
+#include "src/isolate-inl.h"
+#include "src/macro-assembler.h"
+#include "src/objects/bigint.h"
+#include "src/objects/debug-objects-inl.h"
+#include "src/objects/frame-array-inl.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/module.h"
+#include "src/objects/promise-inl.h"
+#include "src/objects/scope-info.h"
+#include "src/unicode-cache.h"
+#include "src/unicode-decoder.h"
+
+namespace v8 {
+namespace internal {
+
+HeapObject* Factory::AllocateRawWithImmortalMap(int size,
+ PretenureFlag pretenure,
+ Map* map,
+ AllocationAlignment alignment) {
+ HeapObject* result = isolate()->heap()->AllocateRawWithRetry(
+ size, Heap::SelectSpace(pretenure), alignment);
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ return result;
+}
+
+HeapObject* Factory::AllocateRawWithAllocationSite(
+ Handle<Map> map, PretenureFlag pretenure,
+ Handle<AllocationSite> allocation_site) {
+ DCHECK(map->instance_type() != MAP_TYPE);
+ int size = map->instance_size();
+ if (!allocation_site.is_null()) size += AllocationMemento::kSize;
+ AllocationSpace space = Heap::SelectSpace(pretenure);
+ HeapObject* result = isolate()->heap()->AllocateRawWithRetry(size, space);
+ WriteBarrierMode write_barrier_mode =
+ space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ result->set_map_after_allocation(*map, write_barrier_mode);
+ if (!allocation_site.is_null()) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(result) + map->instance_size());
+ InitializeAllocationMemento(alloc_memento, *allocation_site);
+ }
+ return result;
+}
+
+void Factory::InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site) {
+ memento->set_map_after_allocation(*allocation_memento_map(),
+ SKIP_WRITE_BARRIER);
+ DCHECK(allocation_site->map() == *allocation_site_map());
+ memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
+ if (FLAG_allocation_site_pretenuring) {
+ allocation_site->IncrementMementoCreateCount();
+ }
+}
+
+HeapObject* Factory::AllocateRawArray(int size, PretenureFlag pretenure) {
+ AllocationSpace space = Heap::SelectSpace(pretenure);
+ HeapObject* result = isolate()->heap()->AllocateRawWithRetry(size, space);
+ if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ return result;
+}
+
+HeapObject* Factory::AllocateRawFixedArray(int length,
+ PretenureFlag pretenure) {
+ if (length < 0 || length > FixedArray::kMaxLength) {
+ isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
+ }
+ return AllocateRawArray(FixedArray::SizeFor(length), pretenure);
+}
+
+HeapObject* Factory::New(Handle<Map> map, PretenureFlag pretenure) {
+ DCHECK(map->instance_type() != MAP_TYPE);
+ int size = map->instance_size();
+ AllocationSpace space = Heap::SelectSpace(pretenure);
+ HeapObject* result = isolate()->heap()->AllocateRawWithRetry(size, space);
+ // New space objects are allocated white.
+ WriteBarrierMode write_barrier_mode =
+ space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ result->set_map_after_allocation(*map, write_barrier_mode);
+ return result;
+}
+
+Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
+ AllocationSpace space) {
+ AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
+ Heap* heap = isolate()->heap();
+ HeapObject* result = heap->AllocateRawWithRetry(size, space, alignment);
+#ifdef DEBUG
+ MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ DCHECK(chunk->owner()->identity() == space);
+#endif
+ heap->CreateFillerObjectAt(result->address(), size, ClearRecordedSlots::kNo);
+ return Handle<HeapObject>(result, isolate());
+}
+
+Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
+ Handle<PrototypeInfo> result =
+ Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE, TENURED));
+ result->set_prototype_users(FixedArrayOfWeakCells::Empty());
+ result->set_registry_slot(PrototypeInfo::UNREGISTERED);
+ result->set_bit_field(0);
+ return result;
+}
+
+Handle<EnumCache> Factory::NewEnumCache(Handle<FixedArray> keys,
+ Handle<FixedArray> indices) {
+ return Handle<EnumCache>::cast(NewTuple2(keys, indices, TENURED));
+}
+
+Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
+ PretenureFlag pretenure) {
+ Handle<Tuple2> result =
+ Handle<Tuple2>::cast(NewStruct(TUPLE2_TYPE, pretenure));
+ result->set_value1(*value1);
+ result->set_value2(*value2);
+ return result;
+}
+
+Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
+ Handle<Object> value3,
+ PretenureFlag pretenure) {
+ Handle<Tuple3> result =
+ Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE, pretenure));
+ result->set_value1(*value1);
+ result->set_value2(*value2);
+ result->set_value3(*value3);
+ return result;
+}
+
+Handle<ContextExtension> Factory::NewContextExtension(
+ Handle<ScopeInfo> scope_info, Handle<Object> extension) {
+ Handle<ContextExtension> result = Handle<ContextExtension>::cast(
+ NewStruct(CONTEXT_EXTENSION_TYPE, TENURED));
+ result->set_scope_info(*scope_info);
+ result->set_extension(*extension);
+ return result;
+}
+
+Handle<ConstantElementsPair> Factory::NewConstantElementsPair(
+ ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
+ Handle<ConstantElementsPair> result =
+ Handle<ConstantElementsPair>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+ result->set_elements_kind(elements_kind);
+ result->set_constant_values(*constant_values);
+ return result;
+}
+
+Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
+ Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings) {
+ DCHECK_EQ(raw_strings->length(), cooked_strings->length());
+ DCHECK_LT(0, raw_strings->length());
+ Handle<TemplateObjectDescription> result =
+ Handle<TemplateObjectDescription>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+ result->set_raw_strings(*raw_strings);
+ result->set_cooked_strings(*cooked_strings);
+ return result;
+}
+
+Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
+ Handle<Object> to_number,
+ const char* type_of, byte kind) {
+ Handle<Oddball> oddball(Oddball::cast(New(map, TENURED)), isolate());
+ Oddball::Initialize(isolate(), oddball, to_string, to_number, type_of, kind);
+ return oddball;
+}
+
+Handle<PropertyArray> Factory::NewPropertyArray(int length,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_property_array();
+ HeapObject* result = AllocateRawFixedArray(length, pretenure);
+ result->set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
+ Handle<PropertyArray> array(PropertyArray::cast(result), isolate());
+ array->initialize_length(length);
+ MemsetPointer(array->data_start(), *undefined_value(), length);
+ return array;
+}
+
+Handle<FixedArray> Factory::NewFixedArrayWithFiller(
+ Heap::RootListIndex map_root_index, int length, Object* filler,
+ PretenureFlag pretenure) {
+ HeapObject* result = AllocateRawFixedArray(length, pretenure);
+ DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
+ Map* map = Map::cast(isolate()->heap()->root(map_root_index));
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ Handle<FixedArray> array(FixedArray::cast(result), isolate());
+ array->set_length(length);
+ MemsetPointer(array->data_start(), filler, length);
+ return array;
+}
+
+template <typename T>
+Handle<T> Factory::NewFixedArrayWithMap(Heap::RootListIndex map_root_index,
+ int length, PretenureFlag pretenure) {
+ static_assert(std::is_base_of<FixedArray, T>::value,
+ "T must be a descendant of FixedArray");
+ // Zero-length case must be handled outside, where the knowledge about
+ // the map is.
+ DCHECK_LT(0, length);
+ return Handle<T>::cast(NewFixedArrayWithFiller(
+ map_root_index, length, *undefined_value(), pretenure));
+}
+
+template Handle<FixedArray> Factory::NewFixedArrayWithMap<FixedArray>(
+ Heap::RootListIndex, int, PretenureFlag);
+
+Handle<FixedArray> Factory::NewFixedArray(int length, PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_fixed_array();
+ return NewFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
+ *undefined_value(), pretenure);
+}
+
+Handle<WeakFixedArray> Factory::NewWeakFixedArray(int length,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_weak_fixed_array();
+ HeapObject* result =
+ AllocateRawArray(WeakFixedArray::SizeFor(length), pretenure);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kWeakFixedArrayMapRootIndex));
+ result->set_map_after_allocation(*weak_fixed_array_map(), SKIP_WRITE_BARRIER);
+ Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
+ array->set_length(length);
+ MemsetPointer(array->data_start(),
+ HeapObjectReference::Strong(*undefined_value()), length);
+ return array;
+}
+
+MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_fixed_array();
+
+ int size = FixedArray::SizeFor(length);
+ AllocationSpace space = Heap::SelectSpace(pretenure);
+ Heap* heap = isolate()->heap();
+ AllocationResult allocation = heap->AllocateRaw(size, space);
+ HeapObject* result = nullptr;
+ if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
+ if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ result->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
+ Handle<FixedArray> array(FixedArray::cast(result), isolate());
+ array->set_length(length);
+ MemsetPointer(array->data_start(), heap->undefined_value(), length);
+ return array;
+}
+
+Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_fixed_array();
+ return NewFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
+ *the_hole_value(), pretenure);
+}
+
+Handle<FixedArray> Factory::NewUninitializedFixedArray(
+ int length, PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_fixed_array();
+
+ // TODO(ulan): As an experiment this temporarily returns an initialized fixed
+ // array. After getting canary/performance coverage, either remove the
+ // function or revert to returning uninitilized array.
+ return NewFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
+ *undefined_value(), pretenure);
+}
+
+Handle<FeedbackVector> Factory::NewFeedbackVector(
+ Handle<SharedFunctionInfo> shared, PretenureFlag pretenure) {
+ int length = shared->feedback_metadata()->slot_count();
+ DCHECK_LE(0, length);
+ int size = FeedbackVector::SizeFor(length);
+
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, *feedback_vector_map());
+ Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
+ vector->set_shared_function_info(*shared);
+ vector->set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(
+ FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone)));
+ vector->set_length(length);
+ vector->set_invocation_count(0);
+ vector->set_profiler_ticks(0);
+ vector->set_deopt_count(0);
+ // TODO(leszeks): Initialize based on the feedback metadata.
+ MemsetPointer(vector->slots_start(), *undefined_value(), length);
+ return vector;
+}
+
+Handle<BoilerplateDescription> Factory::NewBoilerplateDescription(
+ int boilerplate, int all_properties, int index_keys, bool has_seen_proto) {
+ DCHECK_GE(boilerplate, 0);
+ DCHECK_GE(all_properties, index_keys);
+ DCHECK_GE(index_keys, 0);
+
+ int backing_store_size =
+ all_properties - index_keys - (has_seen_proto ? 1 : 0);
+ DCHECK_GE(backing_store_size, 0);
+ bool has_different_size_backing_store = boilerplate != backing_store_size;
+
+ // Space for name and value for every boilerplate property.
+ int size = 2 * boilerplate;
+
+ if (has_different_size_backing_store) {
+ // An extra entry for the backing store size.
+ size++;
+ }
+
+ if (size == 0) {
+ return isolate()->factory()->empty_boilerplate_description();
+ }
+
+ Handle<BoilerplateDescription> description =
+ Handle<BoilerplateDescription>::cast(NewFixedArrayWithMap(
+ Heap::kBoilerplateDescriptionMapRootIndex, size, TENURED));
+
+ if (has_different_size_backing_store) {
+ DCHECK_IMPLIES((boilerplate == (all_properties - index_keys)),
+ has_seen_proto);
+ description->set_backing_store_size(isolate(), backing_store_size);
+ }
+
+ return description;
+}
+
+Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_fixed_array();
+ if (length > FixedDoubleArray::kMaxLength) {
+ isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
+ }
+ int size = FixedDoubleArray::SizeFor(length);
+ Map* map = *fixed_double_array_map();
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, map, kDoubleAligned);
+ Handle<FixedDoubleArray> array(FixedDoubleArray::cast(result), isolate());
+ array->set_length(length);
+ return array;
+}
+
+Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
+ int length, PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ Handle<FixedArrayBase> array = NewFixedDoubleArray(length, pretenure);
+ if (length > 0) {
+ Handle<FixedDoubleArray>::cast(array)->FillWithHoles(0, length);
+ }
+ return array;
+}
+
+Handle<FeedbackMetadata> Factory::NewFeedbackMetadata(int slot_count) {
+ DCHECK_LE(0, slot_count);
+ int size = FeedbackMetadata::SizeFor(slot_count);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, TENURED, *feedback_metadata_map());
+ Handle<FeedbackMetadata> data(FeedbackMetadata::cast(result), isolate());
+ data->set_slot_count(slot_count);
+
+ // Initialize the data section to 0.
+ int data_size = size - FeedbackMetadata::kHeaderSize;
+ byte* data_start = data->address() + FeedbackMetadata::kHeaderSize;
+ memset(data_start, 0, data_size);
+ // Fields have been zeroed out but not initialized, so this object will not
+ // pass object verification at this point.
+ return data;
+}
+
+Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, number_of_frames);
+ Handle<FixedArray> result =
+ NewFixedArrayWithHoles(FrameArray::LengthFor(number_of_frames));
+ result->set(FrameArray::kFrameCountIndex, Smi::kZero);
+ return Handle<FrameArray>::cast(result);
+}
+
+Handle<SmallOrderedHashSet> Factory::NewSmallOrderedHashSet(
+ int capacity, PretenureFlag pretenure) {
+ DCHECK_LE(0, capacity);
+ CHECK_LE(capacity, SmallOrderedHashSet::kMaxCapacity);
+ DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
+
+ int size = SmallOrderedHashSet::Size(capacity);
+ Map* map = *small_ordered_hash_set_map();
+ HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
+ Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result),
+ isolate());
+ table->Initialize(isolate(), capacity);
+ return table;
+}
+
+Handle<SmallOrderedHashMap> Factory::NewSmallOrderedHashMap(
+ int capacity, PretenureFlag pretenure) {
+ DCHECK_LE(0, capacity);
+ CHECK_LE(capacity, SmallOrderedHashMap::kMaxCapacity);
+ DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
+
+ int size = SmallOrderedHashMap::Size(capacity);
+ Map* map = *small_ordered_hash_map_map();
+ HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
+ Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result),
+ isolate());
+ table->Initialize(isolate(), capacity);
+ return table;
+}
+
+Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
+ return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity);
+}
+
+Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
+ return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity);
+}
+
+Handle<AccessorPair> Factory::NewAccessorPair() {
+ Handle<AccessorPair> accessors =
+ Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE, TENURED));
+ accessors->set_getter(*null_value(), SKIP_WRITE_BARRIER);
+ accessors->set_setter(*null_value(), SKIP_WRITE_BARRIER);
+ return accessors;
+}
+
+// Internalized strings are created in the old generation (data space).
+Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
+ Utf8StringKey key(string, isolate()->heap()->HashSeed());
+ return InternalizeStringWithKey(&key);
+}
+
+Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
+ OneByteStringKey key(string, isolate()->heap()->HashSeed());
+ return InternalizeStringWithKey(&key);
+}
+
+Handle<String> Factory::InternalizeOneByteString(
+ Handle<SeqOneByteString> string, int from, int length) {
+ SeqOneByteSubStringKey key(string, from, length);
+ return InternalizeStringWithKey(&key);
+}
+
+Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) {
+ TwoByteStringKey key(string, isolate()->heap()->HashSeed());
+ return InternalizeStringWithKey(&key);
+}
+
+template <class StringTableKey>
+Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
+ return StringTable::LookupKey(isolate(), key);
+}
+
+MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
+ PretenureFlag pretenure) {
+ int length = string.length();
+ if (length == 0) return empty_string();
+ if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ NewRawOneByteString(string.length(), pretenure),
+ String);
+
+ DisallowHeapAllocation no_gc;
+ // Copy the characters into the new object.
+ CopyChars(SeqOneByteString::cast(*result)->GetChars(), string.start(),
+ length);
+ return result;
+}
+
+MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
+ PretenureFlag pretenure) {
+ // Check for ASCII first since this is the common case.
+ const char* ascii_data = string.start();
+ int length = string.length();
+ int non_ascii_start = String::NonAsciiStart(ascii_data, length);
+ if (non_ascii_start >= length) {
+ // If the string is ASCII, we do not need to convert the characters
+ // since UTF8 is backwards compatible with ASCII.
+ return NewStringFromOneByte(Vector<const uint8_t>::cast(string), pretenure);
+ }
+
+ // Non-ASCII and we need to decode.
+ auto non_ascii = string.SubVector(non_ascii_start, length);
+ Access<UnicodeCache::Utf8Decoder> decoder(
+ isolate()->unicode_cache()->utf8_decoder());
+ decoder->Reset(non_ascii);
+
+ int utf16_length = static_cast<int>(decoder->Utf16Length());
+ DCHECK_GT(utf16_length, 0);
+
+ // Allocate string.
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
+
+ // Copy ASCII portion.
+ uint16_t* data = result->GetChars();
+ for (int i = 0; i < non_ascii_start; i++) {
+ *data++ = *ascii_data++;
+ }
+
+ // Now write the remainder.
+ decoder->WriteUtf16(data, utf16_length, non_ascii);
+ return result;
+}
+
+MaybeHandle<String> Factory::NewStringFromUtf8SubString(
+ Handle<SeqOneByteString> str, int begin, int length,
+ PretenureFlag pretenure) {
+ const char* ascii_data =
+ reinterpret_cast<const char*>(str->GetChars() + begin);
+ int non_ascii_start = String::NonAsciiStart(ascii_data, length);
+ if (non_ascii_start >= length) {
+ // If the string is ASCII, we can just make a substring.
+ // TODO(v8): the pretenure flag is ignored in this case.
+ return NewSubString(str, begin, begin + length);
+ }
+
+ // Non-ASCII and we need to decode.
+ auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
+ length - non_ascii_start);
+ Access<UnicodeCache::Utf8Decoder> decoder(
+ isolate()->unicode_cache()->utf8_decoder());
+ decoder->Reset(non_ascii);
+
+ int utf16_length = static_cast<int>(decoder->Utf16Length());
+ DCHECK_GT(utf16_length, 0);
+
+ // Allocate string.
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
+
+ // Update pointer references, since the original string may have moved after
+ // allocation.
+ ascii_data = reinterpret_cast<const char*>(str->GetChars() + begin);
+ non_ascii = Vector<const char>(ascii_data + non_ascii_start,
+ length - non_ascii_start);
+
+ // Copy ASCII portion.
+ uint16_t* data = result->GetChars();
+ for (int i = 0; i < non_ascii_start; i++) {
+ *data++ = *ascii_data++;
+ }
+
+ // Now write the remainder.
+ decoder->WriteUtf16(data, utf16_length, non_ascii);
+ return result;
+}
+
+MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
+ int length,
+ PretenureFlag pretenure) {
+ if (length == 0) return empty_string();
+ if (String::IsOneByte(string, length)) {
+ if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ NewRawOneByteString(length, pretenure), String);
+ CopyChars(result->GetChars(), string, length);
+ return result;
+ } else {
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ NewRawTwoByteString(length, pretenure), String);
+ CopyChars(result->GetChars(), string, length);
+ return result;
+ }
+}
+
+MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
+ PretenureFlag pretenure) {
+ return NewStringFromTwoByte(string.start(), string.length(), pretenure);
+}
+
+MaybeHandle<String> Factory::NewStringFromTwoByte(
+ const ZoneVector<uc16>* string, PretenureFlag pretenure) {
+ return NewStringFromTwoByte(string->data(), static_cast<int>(string->size()),
+ pretenure);
+}
+
+namespace {
+
+bool inline IsOneByte(Vector<const char> str, int chars) {
+ // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
+ return chars == str.length();
+}
+
+bool inline IsOneByte(Handle<String> str, int chars) {
+ return str->IsOneByteRepresentation();
+}
+
+inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
+ int len) {
+ // Only works for one byte strings.
+ DCHECK(vector.length() == len);
+ MemCopy(chars, vector.start(), len);
+}
+
+inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
+ int len) {
+ unibrow::Utf8Iterator it = unibrow::Utf8Iterator(vector);
+ while (!it.Done()) {
+ DCHECK_GT(len, 0);
+ len -= 1;
+
+ uint16_t c = *it;
+ ++it;
+ DCHECK_NE(unibrow::Utf8::kBadChar, c);
+ *chars++ = c;
+ }
+ DCHECK_EQ(len, 0);
+}
+
+inline void WriteOneByteData(Handle<String> s, uint8_t* chars, int len) {
+ DCHECK(s->length() == len);
+ String::WriteToFlat(*s, chars, 0, len);
+}
+
+inline void WriteTwoByteData(Handle<String> s, uint16_t* chars, int len) {
+ DCHECK(s->length() == len);
+ String::WriteToFlat(*s, chars, 0, len);
+}
+
+} // namespace
+
+Handle<SeqOneByteString> Factory::AllocateRawOneByteInternalizedString(
+ int length, uint32_t hash_field) {
+ CHECK_GE(String::kMaxLength, length);
+ // The canonical empty_string is the only zero-length string we allow.
+ DCHECK_IMPLIES(
+ length == 0,
+ isolate()->heap()->roots_[Heap::kempty_stringRootIndex] == nullptr);
+
+ Map* map = *one_byte_internalized_string_map();
+ int size = SeqOneByteString::SizeFor(length);
+ HeapObject* result = AllocateRawWithImmortalMap(size, TENURED, map);
+ Handle<SeqOneByteString> answer(SeqOneByteString::cast(result), isolate());
+ answer->set_length(length);
+ answer->set_hash_field(hash_field);
+ DCHECK_EQ(size, answer->Size());
+ return answer;
+}
+
+Handle<String> Factory::AllocateTwoByteInternalizedString(
+ Vector<const uc16> str, uint32_t hash_field) {
+ CHECK_GE(String::kMaxLength, str.length());
+ DCHECK_NE(0, str.length()); // Use Heap::empty_string() instead.
+
+ Map* map = *internalized_string_map();
+ int size = SeqTwoByteString::SizeFor(str.length());
+ HeapObject* result = AllocateRawWithImmortalMap(size, TENURED, map);
+ Handle<String> answer(String::cast(result), isolate());
+ answer->set_length(str.length());
+ answer->set_hash_field(hash_field);
+ DCHECK_EQ(size, answer->Size());
+
+ // Fill in the characters.
+ MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
+ str.length() * kUC16Size);
+
+ return answer;
+}
+
+template <bool is_one_byte, typename T>
+Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
+ uint32_t hash_field) {
+ DCHECK_LE(0, chars);
+ DCHECK_GE(String::kMaxLength, chars);
+
+ // Compute map and object size.
+ int size;
+ Map* map;
+ if (is_one_byte) {
+ map = *one_byte_internalized_string_map();
+ size = SeqOneByteString::SizeFor(chars);
+ } else {
+ map = *internalized_string_map();
+ size = SeqTwoByteString::SizeFor(chars);
+ }
+
+ HeapObject* result = AllocateRawWithImmortalMap(size, TENURED, map);
+ Handle<String> answer(String::cast(result), isolate());
+ answer->set_length(chars);
+ answer->set_hash_field(hash_field);
+ DCHECK_EQ(size, answer->Size());
+
+ if (is_one_byte) {
+ WriteOneByteData(t, SeqOneByteString::cast(*answer)->GetChars(), chars);
+ } else {
+ WriteTwoByteData(t, SeqTwoByteString::cast(*answer)->GetChars(), chars);
+ }
+ return answer;
+}
+
+Handle<String> Factory::NewInternalizedStringFromUtf8(Vector<const char> str,
+ int chars,
+ uint32_t hash_field) {
+ if (IsOneByte(str, chars)) {
+ Handle<SeqOneByteString> result =
+ AllocateRawOneByteInternalizedString(str.length(), hash_field);
+ MemCopy(result->GetChars(), str.start(), str.length());
+ return result;
+ }
+ return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
+}
+
+Handle<String> Factory::NewOneByteInternalizedString(Vector<const uint8_t> str,
+ uint32_t hash_field) {
+ Handle<SeqOneByteString> result =
+ AllocateRawOneByteInternalizedString(str.length(), hash_field);
+ MemCopy(result->GetChars(), str.start(), str.length());
+ return result;
+}
+
+Handle<String> Factory::NewOneByteInternalizedSubString(
+ Handle<SeqOneByteString> string, int offset, int length,
+ uint32_t hash_field) {
+ Handle<SeqOneByteString> result =
+ AllocateRawOneByteInternalizedString(length, hash_field);
+ MemCopy(result->GetChars(), string->GetChars() + offset, length);
+ return result;
+}
+
+Handle<String> Factory::NewTwoByteInternalizedString(Vector<const uc16> str,
+ uint32_t hash_field) {
+ return AllocateTwoByteInternalizedString(str, hash_field);
+}
+
+Handle<String> Factory::NewInternalizedStringImpl(Handle<String> string,
+ int chars,
+ uint32_t hash_field) {
+ if (IsOneByte(string, chars)) {
+ return AllocateInternalizedStringImpl<true>(string, chars, hash_field);
+ }
+ return AllocateInternalizedStringImpl<false>(string, chars, hash_field);
+}
+
+namespace {
+
+MaybeHandle<Map> GetInternalizedStringMap(Factory* f, Handle<String> string) {
+ switch (string->map()->instance_type()) {
+ case STRING_TYPE:
+ return f->internalized_string_map();
+ case ONE_BYTE_STRING_TYPE:
+ return f->one_byte_internalized_string_map();
+ case EXTERNAL_STRING_TYPE:
+ return f->external_internalized_string_map();
+ case EXTERNAL_ONE_BYTE_STRING_TYPE:
+ return f->external_one_byte_internalized_string_map();
+ case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return f->external_internalized_string_with_one_byte_data_map();
+ case SHORT_EXTERNAL_STRING_TYPE:
+ return f->short_external_internalized_string_map();
+ case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ return f->short_external_one_byte_internalized_string_map();
+ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return f->short_external_internalized_string_with_one_byte_data_map();
+ default:
+ return MaybeHandle<Map>(); // No match found.
+ }
+}
+
+} // namespace
+
+MaybeHandle<Map> Factory::InternalizedStringMapForString(
+ Handle<String> string) {
+ // If the string is in new space it cannot be used as internalized.
+ if (isolate()->heap()->InNewSpace(*string)) return MaybeHandle<Map>();
+
+ return GetInternalizedStringMap(this, string);
+}
+
+template <class StringClass>
+Handle<StringClass> Factory::InternalizeExternalString(Handle<String> string) {
+ Handle<StringClass> cast_string = Handle<StringClass>::cast(string);
+ Handle<Map> map = GetInternalizedStringMap(this, string).ToHandleChecked();
+ Handle<StringClass> external_string(StringClass::cast(New(map, TENURED)),
+ isolate());
+ external_string->set_length(cast_string->length());
+ external_string->set_hash_field(cast_string->hash_field());
+ external_string->set_resource(nullptr);
+ isolate()->heap()->RegisterExternalString(*external_string);
+ return external_string;
+}
+
+template Handle<ExternalOneByteString>
+ Factory::InternalizeExternalString<ExternalOneByteString>(Handle<String>);
+template Handle<ExternalTwoByteString>
+ Factory::InternalizeExternalString<ExternalTwoByteString>(Handle<String>);
+
+MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
+ int length, PretenureFlag pretenure) {
+ if (length > String::kMaxLength || length < 0) {
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
+ }
+ DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
+ int size = SeqOneByteString::SizeFor(length);
+ DCHECK_GE(SeqOneByteString::kMaxSize, size);
+
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, *one_byte_string_map());
+ Handle<SeqOneByteString> string(SeqOneByteString::cast(result), isolate());
+ string->set_length(length);
+ string->set_hash_field(String::kEmptyHashField);
+ DCHECK_EQ(size, string->Size());
+ return string;
+}
+
+MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
+ int length, PretenureFlag pretenure) {
+ if (length > String::kMaxLength || length < 0) {
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
+ }
+ DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
+ int size = SeqTwoByteString::SizeFor(length);
+ DCHECK_GE(SeqTwoByteString::kMaxSize, size);
+
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, *string_map());
+ Handle<SeqTwoByteString> string(SeqTwoByteString::cast(result), isolate());
+ string->set_length(length);
+ string->set_hash_field(String::kEmptyHashField);
+ DCHECK_EQ(size, string->Size());
+ return string;
+}
+
+Handle<String> Factory::LookupSingleCharacterStringFromCode(uint32_t code) {
+ if (code <= String::kMaxOneByteCharCodeU) {
+ {
+ DisallowHeapAllocation no_allocation;
+ Object* value = single_character_string_cache()->get(code);
+ if (value != *undefined_value()) {
+ return handle(String::cast(value), isolate());
+ }
+ }
+ uint8_t buffer[1];
+ buffer[0] = static_cast<uint8_t>(code);
+ Handle<String> result =
+ InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
+ single_character_string_cache()->set(code, *result);
+ return result;
+ }
+ DCHECK_LE(code, String::kMaxUtf16CodeUnitU);
+
+ Handle<SeqTwoByteString> result = NewRawTwoByteString(1).ToHandleChecked();
+ result->SeqTwoByteStringSet(0, static_cast<uint16_t>(code));
+ return result;
+}
+
+// Returns true for a character in a range. Both limits are inclusive.
+static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
+ // This makes uses of the the unsigned wraparound.
+ return character - from <= to - from;
+}
+
+static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
+ uint16_t c1,
+ uint16_t c2) {
+ // Numeric strings have a different hash algorithm not known by
+ // LookupTwoCharsStringIfExists, so we skip this step for such strings.
+ if (!Between(c1, '0', '9') || !Between(c2, '0', '9')) {
+ Handle<String> result;
+ if (StringTable::LookupTwoCharsStringIfExists(isolate, c1, c2)
+ .ToHandle(&result)) {
+ return result;
+ }
+ }
+
+ // Now we know the length is 2, we might as well make use of that fact
+ // when building the new string.
+ if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
+ // We can do this.
+ DCHECK(base::bits::IsPowerOfTwo(String::kMaxOneByteCharCodeU +
+ 1)); // because of this.
+ Handle<SeqOneByteString> str =
+ isolate->factory()->NewRawOneByteString(2).ToHandleChecked();
+ uint8_t* dest = str->GetChars();
+ dest[0] = static_cast<uint8_t>(c1);
+ dest[1] = static_cast<uint8_t>(c2);
+ return str;
+ } else {
+ Handle<SeqTwoByteString> str =
+ isolate->factory()->NewRawTwoByteString(2).ToHandleChecked();
+ uc16* dest = str->GetChars();
+ dest[0] = c1;
+ dest[1] = c2;
+ return str;
+ }
+}
+
+template <typename SinkChar, typename StringType>
+Handle<String> ConcatStringContent(Handle<StringType> result,
+ Handle<String> first,
+ Handle<String> second) {
+ DisallowHeapAllocation pointer_stays_valid;
+ SinkChar* sink = result->GetChars();
+ String::WriteToFlat(*first, sink, 0, first->length());
+ String::WriteToFlat(*second, sink + first->length(), 0, second->length());
+ return result;
+}
+
+MaybeHandle<String> Factory::NewConsString(Handle<String> left,
+ Handle<String> right) {
+ if (left->IsThinString()) {
+ left = handle(Handle<ThinString>::cast(left)->actual(), isolate());
+ }
+ if (right->IsThinString()) {
+ right = handle(Handle<ThinString>::cast(right)->actual(), isolate());
+ }
+ int left_length = left->length();
+ if (left_length == 0) return right;
+ int right_length = right->length();
+ if (right_length == 0) return left;
+
+ int length = left_length + right_length;
+
+ if (length == 2) {
+ uint16_t c1 = left->Get(0);
+ uint16_t c2 = right->Get(0);
+ return MakeOrFindTwoCharacterString(isolate(), c1, c2);
+ }
+
+ // Make sure that an out of memory exception is thrown if the length
+ // of the new cons string is too large.
+ if (length > String::kMaxLength || length < 0) {
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
+ }
+
+ bool left_is_one_byte = left->IsOneByteRepresentation();
+ bool right_is_one_byte = right->IsOneByteRepresentation();
+ bool is_one_byte = left_is_one_byte && right_is_one_byte;
+ bool is_one_byte_data_in_two_byte_string = false;
+ if (!is_one_byte) {
+ // At least one of the strings uses two-byte representation so we
+ // can't use the fast case code for short one-byte strings below, but
+ // we can try to save memory if all chars actually fit in one-byte.
+ is_one_byte_data_in_two_byte_string =
+ left->HasOnlyOneByteChars() && right->HasOnlyOneByteChars();
+ if (is_one_byte_data_in_two_byte_string) {
+ isolate()->counters()->string_add_runtime_ext_to_one_byte()->Increment();
+ }
+ }
+
+ // If the resulting string is small make a flat string.
+ if (length < ConsString::kMinLength) {
+ // Note that neither of the two inputs can be a slice because:
+ STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
+ DCHECK(left->IsFlat());
+ DCHECK(right->IsFlat());
+
+ STATIC_ASSERT(ConsString::kMinLength <= String::kMaxLength);
+ if (is_one_byte) {
+ Handle<SeqOneByteString> result =
+ NewRawOneByteString(length).ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ uint8_t* dest = result->GetChars();
+ // Copy left part.
+ const uint8_t* src =
+ left->IsExternalString()
+ ? Handle<ExternalOneByteString>::cast(left)->GetChars()
+ : Handle<SeqOneByteString>::cast(left)->GetChars();
+ for (int i = 0; i < left_length; i++) *dest++ = src[i];
+ // Copy right part.
+ src = right->IsExternalString()
+ ? Handle<ExternalOneByteString>::cast(right)->GetChars()
+ : Handle<SeqOneByteString>::cast(right)->GetChars();
+ for (int i = 0; i < right_length; i++) *dest++ = src[i];
+ return result;
+ }
+
+ return (is_one_byte_data_in_two_byte_string)
+ ? ConcatStringContent<uint8_t>(
+ NewRawOneByteString(length).ToHandleChecked(), left, right)
+ : ConcatStringContent<uc16>(
+ NewRawTwoByteString(length).ToHandleChecked(), left,
+ right);
+ }
+
+ bool one_byte = (is_one_byte || is_one_byte_data_in_two_byte_string);
+ return NewConsString(left, right, length, one_byte);
+}
+
+Handle<String> Factory::NewConsString(Handle<String> left, Handle<String> right,
+ int length, bool one_byte) {
+ DCHECK(!left->IsThinString());
+ DCHECK(!right->IsThinString());
+ DCHECK_GE(length, ConsString::kMinLength);
+ DCHECK_LE(length, String::kMaxLength);
+
+ Handle<ConsString> result(
+ ConsString::cast(one_byte ? New(cons_one_byte_string_map(), NOT_TENURED)
+ : New(cons_string_map(), NOT_TENURED)),
+ isolate());
+
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+
+ result->set_hash_field(String::kEmptyHashField);
+ result->set_length(length);
+ result->set_first(*left, mode);
+ result->set_second(*right, mode);
+ return result;
+}
+
+Handle<String> Factory::NewSurrogatePairString(uint16_t lead, uint16_t trail) {
+ DCHECK_GE(lead, 0xD800);
+ DCHECK_LE(lead, 0xDBFF);
+ DCHECK_GE(trail, 0xDC00);
+ DCHECK_LE(trail, 0xDFFF);
+
+ Handle<SeqTwoByteString> str =
+ isolate()->factory()->NewRawTwoByteString(2).ToHandleChecked();
+ uc16* dest = str->GetChars();
+ dest[0] = lead;
+ dest[1] = trail;
+ return str;
+}
+
+Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
+ int end) {
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) str->StringVerify();
+#endif
+ DCHECK(begin > 0 || end < str->length());
+
+ str = String::Flatten(str);
+
+ int length = end - begin;
+ if (length <= 0) return empty_string();
+ if (length == 1) {
+ return LookupSingleCharacterStringFromCode(str->Get(begin));
+ }
+ if (length == 2) {
+ // Optimization for 2-byte strings often used as keys in a decompression
+ // dictionary. Check whether we already have the string in the string
+ // table to prevent creation of many unnecessary strings.
+ uint16_t c1 = str->Get(begin);
+ uint16_t c2 = str->Get(begin + 1);
+ return MakeOrFindTwoCharacterString(isolate(), c1, c2);
+ }
+
+ if (!FLAG_string_slices || length < SlicedString::kMinLength) {
+ if (str->IsOneByteRepresentation()) {
+ Handle<SeqOneByteString> result =
+ NewRawOneByteString(length).ToHandleChecked();
+ uint8_t* dest = result->GetChars();
+ DisallowHeapAllocation no_gc;
+ String::WriteToFlat(*str, dest, begin, end);
+ return result;
+ } else {
+ Handle<SeqTwoByteString> result =
+ NewRawTwoByteString(length).ToHandleChecked();
+ uc16* dest = result->GetChars();
+ DisallowHeapAllocation no_gc;
+ String::WriteToFlat(*str, dest, begin, end);
+ return result;
+ }
+ }
+
+ int offset = begin;
+
+ if (str->IsSlicedString()) {
+ Handle<SlicedString> slice = Handle<SlicedString>::cast(str);
+ str = Handle<String>(slice->parent(), isolate());
+ offset += slice->offset();
+ }
+ if (str->IsThinString()) {
+ Handle<ThinString> thin = Handle<ThinString>::cast(str);
+ str = handle(thin->actual(), isolate());
+ }
+
+ DCHECK(str->IsSeqString() || str->IsExternalString());
+ Handle<Map> map = str->IsOneByteRepresentation()
+ ? sliced_one_byte_string_map()
+ : sliced_string_map();
+ Handle<SlicedString> slice(SlicedString::cast(New(map, NOT_TENURED)),
+ isolate());
+
+ slice->set_hash_field(String::kEmptyHashField);
+ slice->set_length(length);
+ slice->set_parent(*str);
+ slice->set_offset(offset);
+ return slice;
+}
+
+MaybeHandle<String> Factory::NewExternalStringFromOneByte(
+ const ExternalOneByteString::Resource* resource) {
+ size_t length = resource->length();
+ if (length > static_cast<size_t>(String::kMaxLength)) {
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
+ }
+ if (length == 0) return empty_string();
+
+ Handle<Map> map;
+ if (resource->IsCompressible()) {
+ // TODO(hajimehoshi): Rename this to 'uncached_external_one_byte_string_map'
+ map = short_external_one_byte_string_map();
+ } else {
+ map = external_one_byte_string_map();
+ }
+ Handle<ExternalOneByteString> external_string(
+ ExternalOneByteString::cast(New(map, NOT_TENURED)), isolate());
+ external_string->set_length(static_cast<int>(length));
+ external_string->set_hash_field(String::kEmptyHashField);
+ external_string->set_resource(resource);
+
+ return external_string;
+}
+
+MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
+ const ExternalTwoByteString::Resource* resource) {
+ size_t length = resource->length();
+ if (length > static_cast<size_t>(String::kMaxLength)) {
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
+ }
+ if (length == 0) return empty_string();
+
+ // For small strings we check whether the resource contains only
+ // one byte characters. If yes, we use a different string map.
+ static const size_t kOneByteCheckLengthLimit = 32;
+ bool is_one_byte =
+ length <= kOneByteCheckLengthLimit &&
+ String::IsOneByte(resource->data(), static_cast<int>(length));
+ Handle<Map> map;
+ if (resource->IsCompressible()) {
+ // TODO(hajimehoshi): Rename these to 'uncached_external_string_...'.
+ map = is_one_byte ? short_external_string_with_one_byte_data_map()
+ : short_external_string_map();
+ } else {
+ map = is_one_byte ? external_string_with_one_byte_data_map()
+ : external_string_map();
+ }
+ Handle<ExternalTwoByteString> external_string(
+ ExternalTwoByteString::cast(New(map, NOT_TENURED)), isolate());
+ external_string->set_length(static_cast<int>(length));
+ external_string->set_hash_field(String::kEmptyHashField);
+ external_string->set_resource(resource);
+
+ return external_string;
+}
+
+Handle<ExternalOneByteString> Factory::NewNativeSourceString(
+ const ExternalOneByteString::Resource* resource) {
+ size_t length = resource->length();
+ DCHECK_LE(length, static_cast<size_t>(String::kMaxLength));
+
+ Handle<Map> map = native_source_string_map();
+ Handle<ExternalOneByteString> external_string(
+ ExternalOneByteString::cast(New(map, TENURED)), isolate());
+ external_string->set_length(static_cast<int>(length));
+ external_string->set_hash_field(String::kEmptyHashField);
+ external_string->set_resource(resource);
+
+ return external_string;
+}
+
+Handle<JSStringIterator> Factory::NewJSStringIterator(Handle<String> string) {
+ Handle<Map> map(isolate()->native_context()->string_iterator_map(),
+ isolate());
+ Handle<String> flat_string = String::Flatten(string);
+ Handle<JSStringIterator> iterator =
+ Handle<JSStringIterator>::cast(NewJSObjectFromMap(map));
+ iterator->set_string(*flat_string);
+ iterator->set_index(0);
+
+ return iterator;
+}
+
+Handle<Symbol> Factory::NewSymbol() {
+ // Statically ensure that it is safe to allocate symbols in paged spaces.
+ STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
+
+ HeapObject* result =
+ AllocateRawWithImmortalMap(Symbol::kSize, TENURED, *symbol_map());
+
+ // Generate a random hash value.
+ int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
+
+ Handle<Symbol> symbol(Symbol::cast(result), isolate());
+ symbol->set_hash_field(Name::kIsNotArrayIndexMask |
+ (hash << Name::kHashShift));
+ symbol->set_name(*undefined_value());
+ symbol->set_flags(0);
+ DCHECK(!symbol->is_private());
+ return symbol;
+}
+
+Handle<Symbol> Factory::NewPrivateSymbol() {
+ Handle<Symbol> symbol = NewSymbol();
+ symbol->set_is_private(true);
+ return symbol;
+}
+
+Handle<Symbol> Factory::NewPrivateFieldSymbol() {
+ Handle<Symbol> symbol = NewSymbol();
+ symbol->set_is_private_field();
+ return symbol;
+}
+
+Handle<Context> Factory::NewNativeContext() {
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kNativeContextMapRootIndex, Context::NATIVE_CONTEXT_SLOTS, TENURED);
+ context->set_native_context(*context);
+ context->set_errors_thrown(Smi::kZero);
+ context->set_math_random_index(Smi::kZero);
+ Handle<WeakCell> weak_cell = NewWeakCell(context);
+ context->set_self_weak_cell(*weak_cell);
+ context->set_serialized_objects(*empty_fixed_array());
+ DCHECK(context->IsNativeContext());
+ return context;
+}
+
+Handle<Context> Factory::NewScriptContext(Handle<JSFunction> function,
+ Handle<ScopeInfo> scope_info) {
+ DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kScriptContextMapRootIndex, scope_info->ContextLength(), TENURED);
+ context->set_closure(*function);
+ context->set_previous(function->context());
+ context->set_extension(*scope_info);
+ context->set_native_context(function->native_context());
+ DCHECK(context->IsScriptContext());
+ return context;
+}
+
+Handle<ScriptContextTable> Factory::NewScriptContextTable() {
+ Handle<ScriptContextTable> context_table =
+ NewFixedArrayWithMap<ScriptContextTable>(
+ Heap::kScriptContextTableMapRootIndex,
+ ScriptContextTable::kMinLength);
+ context_table->set_used(0);
+ return context_table;
+}
+
+Handle<Context> Factory::NewModuleContext(Handle<Module> module,
+ Handle<JSFunction> function,
+ Handle<ScopeInfo> scope_info) {
+ DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kModuleContextMapRootIndex, scope_info->ContextLength(), TENURED);
+ context->set_closure(*function);
+ context->set_previous(function->context());
+ context->set_extension(*module);
+ context->set_native_context(function->native_context());
+ DCHECK(context->IsModuleContext());
+ return context;
+}
+
+Handle<Context> Factory::NewFunctionContext(int length,
+ Handle<JSFunction> function,
+ ScopeType scope_type) {
+ DCHECK(function->shared()->scope_info()->scope_type() == scope_type);
+ DCHECK(length >= Context::MIN_CONTEXT_SLOTS);
+ Heap::RootListIndex mapRootIndex;
+ switch (scope_type) {
+ case EVAL_SCOPE:
+ mapRootIndex = Heap::kEvalContextMapRootIndex;
+ break;
+ case FUNCTION_SCOPE:
+ mapRootIndex = Heap::kFunctionContextMapRootIndex;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ Handle<Context> context = NewFixedArrayWithMap<Context>(mapRootIndex, length);
+ context->set_closure(*function);
+ context->set_previous(function->context());
+ context->set_extension(*the_hole_value());
+ context->set_native_context(function->native_context());
+ return context;
+}
+
+Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
+ Handle<String> name,
+ Handle<Object> thrown_object) {
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
+ Handle<ContextExtension> extension = NewContextExtension(scope_info, name);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kCatchContextMapRootIndex, Context::MIN_CONTEXT_SLOTS + 1);
+ context->set_closure(*function);
+ context->set_previous(*previous);
+ context->set_extension(*extension);
+ context->set_native_context(previous->native_context());
+ context->set(Context::THROWN_OBJECT_INDEX, *thrown_object);
+ return context;
+}
+
+Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
+ Handle<JSReceiver> extension,
+ Handle<Context> wrapped,
+ Handle<StringSet> whitelist) {
+ STATIC_ASSERT(Context::WHITE_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1);
+ DCHECK(scope_info->IsDebugEvaluateScope());
+ Handle<ContextExtension> context_extension = NewContextExtension(
+ scope_info, extension.is_null() ? Handle<Object>::cast(undefined_value())
+ : Handle<Object>::cast(extension));
+ Handle<Context> c = NewFixedArrayWithMap<Context>(
+ Heap::kDebugEvaluateContextMapRootIndex, Context::MIN_CONTEXT_SLOTS + 2);
+ c->set_closure(wrapped.is_null() ? previous->closure() : wrapped->closure());
+ c->set_previous(*previous);
+ c->set_native_context(previous->native_context());
+ c->set_extension(*context_extension);
+ if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped);
+ if (!whitelist.is_null()) c->set(Context::WHITE_LIST_INDEX, *whitelist);
+ return c;
+}
+
+Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
+ Handle<JSReceiver> extension) {
+ Handle<ContextExtension> context_extension =
+ NewContextExtension(scope_info, extension);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kWithContextMapRootIndex, Context::MIN_CONTEXT_SLOTS);
+ context->set_closure(*function);
+ context->set_previous(*previous);
+ context->set_extension(*context_extension);
+ context->set_native_context(previous->native_context());
+ return context;
+}
+
+Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<ScopeInfo> scope_info) {
+ DCHECK_EQ(scope_info->scope_type(), BLOCK_SCOPE);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kBlockContextMapRootIndex, scope_info->ContextLength());
+ context->set_closure(*function);
+ context->set_previous(*previous);
+ context->set_extension(*scope_info);
+ context->set_native_context(previous->native_context());
+ return context;
+}
+
+Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
+ Map* map;
+ switch (type) {
+#define MAKE_CASE(NAME, Name, name) \
+ case NAME##_TYPE: \
+ map = *name##_map(); \
+ break;
+ STRUCT_LIST(MAKE_CASE)
+#undef MAKE_CASE
+ default:
+ UNREACHABLE();
+ }
+ int size = map->instance_size();
+ HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
+ Handle<Struct> str(Struct::cast(result), isolate());
+ str->InitializeBody(size);
+ return str;
+}
+
+Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
+ int aliased_context_slot) {
+ Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
+ NewStruct(ALIASED_ARGUMENTS_ENTRY_TYPE, NOT_TENURED));
+ entry->set_aliased_context_slot(aliased_context_slot);
+ return entry;
+}
+
+Handle<AccessorInfo> Factory::NewAccessorInfo() {
+ Handle<AccessorInfo> info =
+ Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE, TENURED));
+ info->set_name(*empty_string());
+ info->set_flags(0); // Must clear the flags, it was initialized as undefined.
+ info->set_is_sloppy(true);
+ info->set_initial_property_attributes(NONE);
+ return info;
+}
+
+Handle<Script> Factory::NewScript(Handle<String> source) {
+ // Create and initialize script object.
+ Heap* heap = isolate()->heap();
+ Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE, TENURED));
+ script->set_source(*source);
+ script->set_name(heap->undefined_value());
+ script->set_id(isolate()->heap()->NextScriptId());
+ script->set_line_offset(0);
+ script->set_column_offset(0);
+ script->set_context_data(heap->undefined_value());
+ script->set_type(Script::TYPE_NORMAL);
+ script->set_wrapper(heap->undefined_value());
+ script->set_line_ends(heap->undefined_value());
+ script->set_eval_from_shared_or_wrapped_arguments(heap->undefined_value());
+ script->set_eval_from_position(0);
+ script->set_shared_function_infos(*empty_weak_fixed_array(),
+ SKIP_WRITE_BARRIER);
+ script->set_flags(0);
+ script->set_host_defined_options(*empty_fixed_array());
+ heap->set_script_list(*FixedArrayOfWeakCells::Add(script_list(), script));
+ return script;
+}
+
+Handle<CallableTask> Factory::NewCallableTask(Handle<JSReceiver> callable,
+ Handle<Context> context) {
+ DCHECK(callable->IsCallable());
+ Handle<CallableTask> microtask =
+ Handle<CallableTask>::cast(NewStruct(CALLABLE_TASK_TYPE));
+ microtask->set_callable(*callable);
+ microtask->set_context(*context);
+ return microtask;
+}
+
+Handle<CallbackTask> Factory::NewCallbackTask(Handle<Foreign> callback,
+ Handle<Foreign> data) {
+ Handle<CallbackTask> microtask =
+ Handle<CallbackTask>::cast(NewStruct(CALLBACK_TASK_TYPE));
+ microtask->set_callback(*callback);
+ microtask->set_data(*data);
+ return microtask;
+}
+
+Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
+ Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
+ Handle<JSReceiver> thenable, Handle<Context> context) {
+ DCHECK(then->IsCallable());
+ Handle<PromiseResolveThenableJobTask> microtask =
+ Handle<PromiseResolveThenableJobTask>::cast(
+ NewStruct(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE));
+ microtask->set_promise_to_resolve(*promise_to_resolve);
+ microtask->set_then(*then);
+ microtask->set_thenable(*thenable);
+ microtask->set_context(*context);
+ return microtask;
+}
+
+Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate foreigns in paged spaces.
+ STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
+ Map* map = *foreign_map();
+ HeapObject* result =
+ AllocateRawWithImmortalMap(map->instance_size(), pretenure, map);
+ Handle<Foreign> foreign(Foreign::cast(result), isolate());
+ foreign->set_foreign_address(addr);
+ return foreign;
+}
+
+Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length > ByteArray::kMaxLength) {
+ isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
+ }
+ int size = ByteArray::SizeFor(length);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, *byte_array_map());
+ Handle<ByteArray> array(ByteArray::cast(result), isolate());
+ array->set_length(length);
+ array->clear_padding();
+ return array;
+}
+
+Handle<BytecodeArray> Factory::NewBytecodeArray(
+ int length, const byte* raw_bytecodes, int frame_size, int parameter_count,
+ Handle<FixedArray> constant_pool) {
+ DCHECK_LE(0, length);
+ if (length > BytecodeArray::kMaxLength) {
+ isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
+ }
+ // Bytecode array is pretenured, so constant pool array should be too.
+ DCHECK(!isolate()->heap()->InNewSpace(*constant_pool));
+
+ int size = BytecodeArray::SizeFor(length);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, TENURED, *bytecode_array_map());
+ Handle<BytecodeArray> instance(BytecodeArray::cast(result), isolate());
+ instance->set_length(length);
+ instance->set_frame_size(frame_size);
+ instance->set_parameter_count(parameter_count);
+ instance->set_incoming_new_target_or_generator_register(
+ interpreter::Register::invalid_value());
+ instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
+ instance->set_osr_loop_nesting_level(0);
+ instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
+ instance->set_constant_pool(*constant_pool);
+ instance->set_handler_table(*empty_byte_array());
+ instance->set_source_position_table(*empty_byte_array());
+ CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
+ instance->clear_padding();
+
+ return instance;
+}
+
+Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
+ int length, ExternalArrayType array_type, void* external_pointer,
+ PretenureFlag pretenure) {
+ DCHECK(0 <= length && length <= Smi::kMaxValue);
+ int size = FixedTypedArrayBase::kHeaderSize;
+ HeapObject* result = AllocateRawWithImmortalMap(
+ size, pretenure, isolate()->heap()->MapForFixedTypedArray(array_type));
+ Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(result),
+ isolate());
+ elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
+ elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
+ elements->set_length(length);
+ return elements;
+}
+
+Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
+ size_t length, size_t byte_length, ExternalArrayType array_type,
+ bool initialize, PretenureFlag pretenure) {
+ DCHECK(0 <= length && length <= Smi::kMaxValue);
+ CHECK(byte_length <= kMaxInt - FixedTypedArrayBase::kDataOffset);
+ size_t size =
+ OBJECT_POINTER_ALIGN(byte_length + FixedTypedArrayBase::kDataOffset);
+ Map* map = isolate()->heap()->MapForFixedTypedArray(array_type);
+ AllocationAlignment alignment =
+ array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned;
+ HeapObject* object = AllocateRawWithImmortalMap(static_cast<int>(size),
+ pretenure, map, alignment);
+
+ Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(object),
+ isolate());
+ elements->set_base_pointer(*elements, SKIP_WRITE_BARRIER);
+ elements->set_external_pointer(
+ ExternalReference::fixed_typed_array_base_data_offset(isolate())
+ .address(),
+ SKIP_WRITE_BARRIER);
+ elements->set_length(static_cast<int>(length));
+ if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
+ return elements;
+}
+
+Handle<Cell> Factory::NewCell(Handle<Object> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(Cell::kSize, TENURED, *cell_map());
+ Handle<Cell> cell(Cell::cast(result), isolate());
+ cell->set_value(*value);
+ return cell;
+}
+
+Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *no_closures_cell_map());
+ Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
+ cell->set_value(*value);
+ return cell;
+}
+
+Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *one_closure_cell_map());
+ Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
+ cell->set_value(*value);
+ return cell;
+}
+
+Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *many_closures_cell_map());
+ Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
+ cell->set_value(*value);
+ return cell;
+}
+
+Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name) {
+ DCHECK(name->IsUniqueName());
+ STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
+ HeapObject* result = AllocateRawWithImmortalMap(PropertyCell::kSize, TENURED,
+ *global_property_cell_map());
+ Handle<PropertyCell> cell(PropertyCell::cast(result), isolate());
+ cell->set_dependent_code(DependentCode::cast(*empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+ cell->set_property_details(PropertyDetails(Smi::kZero));
+ cell->set_name(*name);
+ cell->set_value(*the_hole_value());
+ return cell;
+}
+
+Handle<WeakCell> Factory::NewWeakCell(Handle<HeapObject> value) {
+ // It is safe to dereference the value because we are embedding it
+ // in cell and not inspecting its fields.
+ AllowDeferredHandleDereference convert_to_cell;
+ STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(WeakCell::kSize, TENURED, *weak_cell_map());
+ Handle<WeakCell> cell(WeakCell::cast(result), isolate());
+ cell->initialize(*value);
+ return cell;
+}
+
+Handle<TransitionArray> Factory::NewTransitionArray(int capacity) {
+ Handle<TransitionArray> array = NewFixedArrayWithMap<TransitionArray>(
+ Heap::kTransitionArrayMapRootIndex, capacity, TENURED);
+ // Transition arrays are tenured. When black allocation is on we have to
+ // add the transition array to the list of encountered_transition_arrays.
+ Heap* heap = isolate()->heap();
+ if (heap->incremental_marking()->black_allocation()) {
+ heap->mark_compact_collector()->AddTransitionArray(*array);
+ }
+ return array;
+}
+
+Handle<AllocationSite> Factory::NewAllocationSite() {
+ Handle<Map> map = allocation_site_map();
+ Handle<AllocationSite> site(AllocationSite::cast(New(map, TENURED)),
+ isolate());
+ site->Initialize();
+
+ // Link the site
+ site->set_weak_next(isolate()->heap()->allocation_sites_list());
+ isolate()->heap()->set_allocation_sites_list(*site);
+ return site;
+}
+
+Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
+ ElementsKind elements_kind,
+ int inobject_properties) {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ DCHECK_IMPLIES(type >= FIRST_JS_OBJECT_TYPE &&
+ !Map::CanHaveFastTransitionableElementsKind(type),
+ IsDictionaryElementsKind(elements_kind) ||
+ IsTerminalElementsKind(elements_kind));
+ HeapObject* result =
+ isolate()->heap()->AllocateRawWithRetry(Map::kSize, MAP_SPACE);
+ result->set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
+ return handle(InitializeMap(Map::cast(result), type, instance_size,
+ elements_kind, inobject_properties),
+ isolate());
+}
+
+Map* Factory::InitializeMap(Map* map, InstanceType type, int instance_size,
+ ElementsKind elements_kind,
+ int inobject_properties) {
+ map->set_instance_type(type);
+ map->set_prototype(*null_value(), SKIP_WRITE_BARRIER);
+ map->set_constructor_or_backpointer(*null_value(), SKIP_WRITE_BARRIER);
+ map->set_instance_size(instance_size);
+ if (map->IsJSObjectMap()) {
+ map->SetInObjectPropertiesStartInWords(instance_size / kPointerSize -
+ inobject_properties);
+ DCHECK_EQ(map->GetInObjectProperties(), inobject_properties);
+ map->set_prototype_validity_cell(*invalid_prototype_validity_cell());
+ } else {
+ DCHECK_EQ(inobject_properties, 0);
+ map->set_inobject_properties_start_or_constructor_function_index(0);
+ map->set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
+ }
+ map->set_dependent_code(DependentCode::cast(*empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+ map->set_weak_cell_cache(Smi::kZero);
+ map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
+ map->SetInObjectUnusedPropertyFields(inobject_properties);
+ map->set_instance_descriptors(*empty_descriptor_array());
+ if (FLAG_unbox_double_fields) {
+ map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ }
+ // Must be called only after |instance_type|, |instance_size| and
+ // |layout_descriptor| are set.
+ map->set_visitor_id(Map::GetVisitorId(map));
+ map->set_bit_field(0);
+ map->set_bit_field2(Map::IsExtensibleBit::kMask);
+ DCHECK(!map->is_in_retained_map_list());
+ int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
+ Map::OwnsDescriptorsBit::encode(true) |
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
+ map->set_bit_field3(bit_field3);
+ map->set_elements_kind(elements_kind);
+ map->set_new_target_is_base(true);
+ isolate()->counters()->maps_created()->Increment();
+ if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
+ return map;
+}
+
+Handle<JSObject> Factory::CopyJSObject(Handle<JSObject> source) {
+ return CopyJSObjectWithAllocationSite(source, Handle<AllocationSite>());
+}
+
+Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
+ Handle<JSObject> source, Handle<AllocationSite> site) {
+ Handle<Map> map(source->map(), isolate());
+
+ // We can only clone regexps, normal objects, api objects, errors or arrays.
+ // Copying anything else will break invariants.
+ CHECK(map->instance_type() == JS_REGEXP_TYPE ||
+ map->instance_type() == JS_OBJECT_TYPE ||
+ map->instance_type() == JS_ERROR_TYPE ||
+ map->instance_type() == JS_ARRAY_TYPE ||
+ map->instance_type() == JS_API_OBJECT_TYPE ||
+ map->instance_type() == WASM_GLOBAL_TYPE ||
+ map->instance_type() == WASM_INSTANCE_TYPE ||
+ map->instance_type() == WASM_MEMORY_TYPE ||
+ map->instance_type() == WASM_MODULE_TYPE ||
+ map->instance_type() == WASM_TABLE_TYPE ||
+ map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
+ DCHECK(site.is_null() || AllocationSite::CanTrack(map->instance_type()));
+
+ int object_size = map->instance_size();
+ int adjusted_object_size =
+ site.is_null() ? object_size : object_size + AllocationMemento::kSize;
+ HeapObject* raw_clone =
+ isolate()->heap()->AllocateRawWithRetry(adjusted_object_size, NEW_SPACE);
+
+ SLOW_DCHECK(isolate()->heap()->InNewSpace(raw_clone));
+ // Since we know the clone is allocated in new space, we can copy
+ // the contents without worrying about updating the write barrier.
+ Heap::CopyBlock(raw_clone->address(), source->address(), object_size);
+ Handle<JSObject> clone(JSObject::cast(raw_clone), isolate());
+
+ if (!site.is_null()) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(raw_clone) + object_size);
+ InitializeAllocationMemento(alloc_memento, *site);
+ }
+
+ SLOW_DCHECK(clone->GetElementsKind() == source->GetElementsKind());
+ FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
+ // Update elements if necessary.
+ if (elements->length() > 0) {
+ FixedArrayBase* elem = nullptr;
+ if (elements->map() == *fixed_cow_array_map()) {
+ elem = elements;
+ } else if (source->HasDoubleElements()) {
+ elem = *CopyFixedDoubleArray(
+ handle(FixedDoubleArray::cast(elements), isolate()));
+ } else {
+ elem = *CopyFixedArray(handle(FixedArray::cast(elements), isolate()));
+ }
+ clone->set_elements(elem);
+ }
+
+ // Update properties if necessary.
+ if (source->HasFastProperties()) {
+ PropertyArray* properties = source->property_array();
+ if (properties->length() > 0) {
+ // TODO(gsathya): Do not copy hash code.
+ Handle<PropertyArray> prop = CopyArrayWithMap(
+ handle(properties, isolate()), handle(properties->map(), isolate()));
+ clone->set_raw_properties_or_hash(*prop);
+ }
+ } else {
+ Handle<FixedArray> properties(
+ FixedArray::cast(source->property_dictionary()), isolate());
+ Handle<FixedArray> prop = CopyFixedArray(properties);
+ clone->set_raw_properties_or_hash(*prop);
+ }
+ return clone;
+}
+
+namespace {
+template <typename T>
+void initialize_length(T* array, int length) {
+ array->set_length(length);
+}
+
+template <>
+void initialize_length<PropertyArray>(PropertyArray* array, int length) {
+ array->initialize_length(length);
+}
+
+} // namespace
+
+template <typename T>
+Handle<T> Factory::CopyArrayWithMap(Handle<T> src, Handle<Map> map) {
+ int len = src->length();
+ HeapObject* obj = AllocateRawFixedArray(len, NOT_TENURED);
+ obj->set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
+
+ T* result = T::cast(obj);
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+
+ if (mode == SKIP_WRITE_BARRIER) {
+ // Eliminate the write barrier if possible.
+ Heap::CopyBlock(obj->address() + kPointerSize,
+ src->address() + kPointerSize,
+ T::SizeFor(len) - kPointerSize);
+ } else {
+ // Slow case: Just copy the content one-by-one.
+ initialize_length(result, len);
+ for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
+ }
+ return Handle<T>(result, isolate());
+}
+
+template <typename T>
+Handle<T> Factory::CopyArrayAndGrow(Handle<T> src, int grow_by,
+ PretenureFlag pretenure) {
+ DCHECK_LT(0, grow_by);
+ DCHECK_LE(grow_by, kMaxInt - src->length());
+ int old_len = src->length();
+ int new_len = old_len + grow_by;
+ HeapObject* obj = AllocateRawFixedArray(new_len, pretenure);
+ obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
+
+ T* result = T::cast(obj);
+ initialize_length(result, new_len);
+
+ // Copy the content.
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
+ MemsetPointer(result->data_start() + old_len, *undefined_value(), grow_by);
+ return Handle<T>(result, isolate());
+}
+
+Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
+ Handle<Map> map) {
+ return CopyArrayWithMap(array, map);
+}
+
+Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array,
+ int grow_by,
+ PretenureFlag pretenure) {
+ return CopyArrayAndGrow(array, grow_by, pretenure);
+}
+
+Handle<PropertyArray> Factory::CopyPropertyArrayAndGrow(
+ Handle<PropertyArray> array, int grow_by, PretenureFlag pretenure) {
+ return CopyArrayAndGrow(array, grow_by, pretenure);
+}
+
+Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
+ int new_len,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, new_len);
+ DCHECK_LE(new_len, array->length());
+ if (new_len == 0) return empty_fixed_array();
+
+ HeapObject* obj = AllocateRawFixedArray(new_len, pretenure);
+ obj->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
+ Handle<FixedArray> result(FixedArray::cast(obj), isolate());
+ result->set_length(new_len);
+
+ // Copy the content.
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < new_len; i++) result->set(i, array->get(i), mode);
+ return result;
+}
+
+Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
+ if (array->length() == 0) return array;
+ return CopyArrayWithMap(array, handle(array->map(), isolate()));
+}
+
+Handle<FixedArray> Factory::CopyAndTenureFixedCOWArray(
+ Handle<FixedArray> array) {
+ DCHECK(isolate()->heap()->InNewSpace(*array));
+ Handle<FixedArray> result =
+ CopyFixedArrayUpTo(array, array->length(), TENURED);
+
+ // TODO(mvstanton): The map is set twice because of protection against calling
+ // set() on a COW FixedArray. Issue v8:3221 created to track this, and
+ // we might then be able to remove this whole method.
+ result->set_map_after_allocation(*fixed_cow_array_map(), SKIP_WRITE_BARRIER);
+ return result;
+}
+
+Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
+ Handle<FixedDoubleArray> array) {
+ int len = array->length();
+ if (len == 0) return array;
+ Handle<FixedDoubleArray> result =
+ Handle<FixedDoubleArray>::cast(NewFixedDoubleArray(len, NOT_TENURED));
+ Heap::CopyBlock(
+ result->address() + FixedDoubleArray::kLengthOffset,
+ array->address() + FixedDoubleArray::kLengthOffset,
+ FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
+ return result;
+}
+
+Handle<FeedbackVector> Factory::CopyFeedbackVector(
+ Handle<FeedbackVector> array) {
+ int len = array->length();
+ HeapObject* obj = AllocateRawWithImmortalMap(
+ FeedbackVector::SizeFor(len), NOT_TENURED, *feedback_vector_map());
+ Handle<FeedbackVector> result(FeedbackVector::cast(obj), isolate());
+
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+
+ // Eliminate the write barrier if possible.
+ if (mode == SKIP_WRITE_BARRIER) {
+ Heap::CopyBlock(result->address() + kPointerSize,
+ result->address() + kPointerSize,
+ FeedbackVector::SizeFor(len) - kPointerSize);
+ } else {
+ // Slow case: Just copy the content one-by-one.
+ result->set_shared_function_info(array->shared_function_info());
+ result->set_optimized_code_weak_or_smi(array->optimized_code_weak_or_smi());
+ result->set_invocation_count(array->invocation_count());
+ result->set_profiler_ticks(array->profiler_ticks());
+ result->set_deopt_count(array->deopt_count());
+ for (int i = 0; i < len; i++) result->set(i, array->get(i), mode);
+ }
+ return result;
+}
+
+Handle<Object> Factory::NewNumber(double value, PretenureFlag pretenure) {
+ // Materialize as a SMI if possible.
+ int32_t int_value;
+ if (DoubleToSmiInteger(value, &int_value)) {
+ return handle(Smi::FromInt(int_value), isolate());
+ }
+
+ // Materialize the value in the heap.
+ return NewHeapNumber(value, IMMUTABLE, pretenure);
+}
+
+Handle<Object> Factory::NewNumberFromInt(int32_t value,
+ PretenureFlag pretenure) {
+ if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
+ // Bypass NewNumber to avoid various redundant checks.
+ return NewHeapNumber(FastI2D(value), IMMUTABLE, pretenure);
+}
+
+Handle<Object> Factory::NewNumberFromUint(uint32_t value,
+ PretenureFlag pretenure) {
+ int32_t int32v = static_cast<int32_t>(value);
+ if (int32v >= 0 && Smi::IsValid(int32v)) {
+ return handle(Smi::FromInt(int32v), isolate());
+ }
+ return NewHeapNumber(FastUI2D(value), IMMUTABLE, pretenure);
+}
+
+Handle<HeapNumber> Factory::NewHeapNumber(MutableMode mode,
+ PretenureFlag pretenure) {
+ STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
+ Map* map = mode == MUTABLE ? *mutable_heap_number_map() : *heap_number_map();
+ HeapObject* result = AllocateRawWithImmortalMap(HeapNumber::kSize, pretenure,
+ map, kDoubleUnaligned);
+ return handle(HeapNumber::cast(result), isolate());
+}
+
+Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
+ PretenureFlag pretenure) {
+ if (length < 0 || length > BigInt::kMaxLength) {
+ isolate()->heap()->FatalProcessOutOfMemory("invalid BigInt length");
+ }
+ HeapObject* result = AllocateRawWithImmortalMap(BigInt::SizeFor(length),
+ pretenure, *bigint_map());
+ return handle(FreshlyAllocatedBigInt::cast(result), isolate());
+}
+
+Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
+ MessageTemplate::Template template_index,
+ Handle<Object> arg0, Handle<Object> arg1,
+ Handle<Object> arg2) {
+ HandleScope scope(isolate());
+ if (isolate()->bootstrapper()->IsActive()) {
+ // During bootstrapping we cannot construct error objects.
+ return scope.CloseAndEscape(NewStringFromAsciiChecked(
+ MessageTemplate::TemplateString(template_index)));
+ }
+
+ if (arg0.is_null()) arg0 = undefined_value();
+ if (arg1.is_null()) arg1 = undefined_value();
+ if (arg2.is_null()) arg2 = undefined_value();
+
+ Handle<Object> result;
+ if (!ErrorUtils::MakeGenericError(isolate(), constructor, template_index,
+ arg0, arg1, arg2, SKIP_NONE)
+ .ToHandle(&result)) {
+ // If an exception is thrown while
+ // running the factory method, use the exception as the result.
+ DCHECK(isolate()->has_pending_exception());
+ result = handle(isolate()->pending_exception(), isolate());
+ isolate()->clear_pending_exception();
+ }
+
+ return scope.CloseAndEscape(result);
+}
+
+Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
+ Handle<String> message) {
+ // Construct a new error object. If an exception is thrown, use the exception
+ // as the result.
+
+ Handle<Object> no_caller;
+ MaybeHandle<Object> maybe_error =
+ ErrorUtils::Construct(isolate(), constructor, constructor, message,
+ SKIP_NONE, no_caller, false);
+ if (maybe_error.is_null()) {
+ DCHECK(isolate()->has_pending_exception());
+ maybe_error = handle(isolate()->pending_exception(), isolate());
+ isolate()->clear_pending_exception();
+ }
+
+ return maybe_error.ToHandleChecked();
+}
+
+Handle<Object> Factory::NewInvalidStringLengthError() {
+ if (FLAG_abort_on_stack_or_string_length_overflow) {
+ FATAL("Aborting on invalid string length");
+ }
+ // Invalidate the "string length" protector.
+ if (isolate()->IsStringLengthOverflowIntact()) {
+ isolate()->InvalidateStringLengthOverflowProtector();
+ }
+ return NewRangeError(MessageTemplate::kInvalidStringLength);
+}
+
+#define DEFINE_ERROR(NAME, name) \
+ Handle<Object> Factory::New##NAME(MessageTemplate::Template template_index, \
+ Handle<Object> arg0, Handle<Object> arg1, \
+ Handle<Object> arg2) { \
+ return NewError(isolate()->name##_function(), template_index, arg0, arg1, \
+ arg2); \
+ }
+DEFINE_ERROR(Error, error)
+DEFINE_ERROR(EvalError, eval_error)
+DEFINE_ERROR(RangeError, range_error)
+DEFINE_ERROR(ReferenceError, reference_error)
+DEFINE_ERROR(SyntaxError, syntax_error)
+DEFINE_ERROR(TypeError, type_error)
+DEFINE_ERROR(WasmCompileError, wasm_compile_error)
+DEFINE_ERROR(WasmLinkError, wasm_link_error)
+DEFINE_ERROR(WasmRuntimeError, wasm_runtime_error)
+#undef DEFINE_ERROR
+
+Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
+ Handle<SharedFunctionInfo> info,
+ Handle<Object> context_or_undefined,
+ PretenureFlag pretenure) {
+ Handle<JSFunction> function(JSFunction::cast(New(map, pretenure)), isolate());
+ DCHECK(context_or_undefined->IsContext() ||
+ context_or_undefined->IsUndefined(isolate()));
+
+ function->initialize_properties();
+ function->initialize_elements();
+ function->set_shared(*info);
+ function->set_code(info->GetCode());
+ function->set_context(*context_or_undefined);
+ function->set_feedback_cell(*many_closures_cell());
+ int header_size;
+ if (map->has_prototype_slot()) {
+ header_size = JSFunction::kSizeWithPrototype;
+ function->set_prototype_or_initial_map(*the_hole_value());
+ } else {
+ header_size = JSFunction::kSizeWithoutPrototype;
+ }
+ InitializeJSObjectBody(function, map, header_size);
+ return function;
+}
+
+Handle<JSFunction> Factory::NewFunctionForTest(Handle<String> name) {
+ NewFunctionArgs args = NewFunctionArgs::ForFunctionWithoutCode(
+ name, isolate()->sloppy_function_map(), LanguageMode::kSloppy);
+ Handle<JSFunction> result = NewFunction(args);
+ DCHECK(is_sloppy(result->shared()->language_mode()));
+ return result;
+}
+
+Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
+ DCHECK(!args.name_.is_null());
+
+ // Create the SharedFunctionInfo.
+ Handle<Context> context(isolate()->native_context());
+ Handle<Map> map = args.GetMap(isolate());
+ Handle<SharedFunctionInfo> info = NewSharedFunctionInfo(
+ args.name_, args.maybe_code_, args.maybe_builtin_id_, kNormalFunction);
+
+ // Proper language mode in shared function info will be set later.
+ DCHECK(is_sloppy(info->language_mode()));
+ DCHECK(!map->IsUndefined(isolate()));
+
+#ifdef DEBUG
+ if (isolate()->bootstrapper()->IsActive()) {
+ Handle<Code> code;
+ DCHECK(
+ // During bootstrapping some of these maps could be not created yet.
+ (*map == context->get(Context::STRICT_FUNCTION_MAP_INDEX)) ||
+ (*map ==
+ context->get(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)) ||
+ (*map ==
+ context->get(
+ Context::STRICT_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX)) ||
+ // Check if it's a creation of an empty or Proxy function during
+ // bootstrapping.
+ (args.maybe_builtin_id_ == Builtins::kEmptyFunction ||
+ args.maybe_builtin_id_ == Builtins::kProxyConstructor));
+ } else {
+ DCHECK(
+ (*map == *isolate()->sloppy_function_map()) ||
+ (*map == *isolate()->sloppy_function_without_prototype_map()) ||
+ (*map == *isolate()->sloppy_function_with_readonly_prototype_map()) ||
+ (*map == *isolate()->strict_function_map()) ||
+ (*map == *isolate()->strict_function_without_prototype_map()) ||
+ (*map == *isolate()->native_function_map()));
+ }
+#endif
+
+ Handle<JSFunction> result = NewFunction(map, info, context);
+
+ if (args.should_set_prototype_) {
+ result->set_prototype_or_initial_map(
+ *args.maybe_prototype_.ToHandleChecked());
+ }
+
+ if (args.should_set_language_mode_) {
+ result->shared()->set_language_mode(args.language_mode_);
+ }
+
+ if (args.should_create_and_set_initial_map_) {
+ ElementsKind elements_kind;
+ switch (args.type_) {
+ case JS_ARRAY_TYPE:
+ elements_kind = PACKED_SMI_ELEMENTS;
+ break;
+ case JS_ARGUMENTS_TYPE:
+ elements_kind = PACKED_ELEMENTS;
+ break;
+ default:
+ elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
+ break;
+ }
+ Handle<Map> initial_map = NewMap(args.type_, args.instance_size_,
+ elements_kind, args.inobject_properties_);
+ result->shared()->set_expected_nof_properties(args.inobject_properties_);
+ // TODO(littledan): Why do we have this is_generator test when
+ // NewFunctionPrototype already handles finding an appropriately
+ // shared prototype?
+ Handle<Object> prototype = args.maybe_prototype_.ToHandleChecked();
+ if (!IsResumableFunction(result->shared()->kind())) {
+ if (prototype->IsTheHole(isolate())) {
+ prototype = NewFunctionPrototype(result);
+ }
+ }
+ JSFunction::SetInitialMap(result, initial_map, prototype);
+ }
+
+ return result;
+}
+
+Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
+ // Make sure to use globals from the function's context, since the function
+ // can be from a different context.
+ Handle<Context> native_context(function->context()->native_context());
+ Handle<Map> new_map;
+ if (V8_UNLIKELY(IsAsyncGeneratorFunction(function->shared()->kind()))) {
+ new_map = handle(native_context->async_generator_object_prototype_map());
+ } else if (IsResumableFunction(function->shared()->kind())) {
+ // Generator and async function prototypes can share maps since they
+ // don't have "constructor" properties.
+ new_map = handle(native_context->generator_object_prototype_map());
+ } else {
+ // Each function prototype gets a fresh map to avoid unwanted sharing of
+ // maps between prototypes of different constructors.
+ Handle<JSFunction> object_function(native_context->object_function());
+ DCHECK(object_function->has_initial_map());
+ new_map = handle(object_function->initial_map());
+ }
+
+ DCHECK(!new_map->is_prototype_map());
+ Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
+
+ if (!IsResumableFunction(function->shared()->kind())) {
+ JSObject::AddProperty(prototype, constructor_string(), function, DONT_ENUM);
+ }
+
+ return prototype;
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info, Handle<Context> context,
+ PretenureFlag pretenure) {
+ Handle<Map> initial_map(
+ Map::cast(context->native_context()->get(info->function_map_index())));
+ return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
+ pretenure);
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info, Handle<Context> context,
+ Handle<FeedbackCell> feedback_cell, PretenureFlag pretenure) {
+ Handle<Map> initial_map(
+ Map::cast(context->native_context()->get(info->function_map_index())));
+ return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
+ feedback_cell, pretenure);
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
+ Handle<Object> context_or_undefined, PretenureFlag pretenure) {
+ DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
+ Handle<JSFunction> result =
+ NewFunction(initial_map, info, context_or_undefined, pretenure);
+
+ if (context_or_undefined->IsContext()) {
+ // Give compiler a chance to pre-initialize.
+ Compiler::PostInstantiation(result, pretenure);
+ }
+
+ return result;
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
+ Handle<Object> context_or_undefined, Handle<FeedbackCell> feedback_cell,
+ PretenureFlag pretenure) {
+ DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
+ Handle<JSFunction> result =
+ NewFunction(initial_map, info, context_or_undefined, pretenure);
+
+ // Bump the closure count that is encoded in the feedback cell's map.
+ if (feedback_cell->map() == *no_closures_cell_map()) {
+ feedback_cell->set_map(*one_closure_cell_map());
+ } else if (feedback_cell->map() == *one_closure_cell_map()) {
+ feedback_cell->set_map(*many_closures_cell_map());
+ } else {
+ DCHECK_EQ(feedback_cell->map(), *many_closures_cell_map());
+ }
+
+ // Check that the optimized code in the feedback cell wasn't marked for
+ // deoptimization while not pointed to by any live JSFunction.
+ if (feedback_cell->value()->IsFeedbackVector()) {
+ FeedbackVector::cast(feedback_cell->value())
+ ->EvictOptimizedCodeMarkedForDeoptimization(
+ *info, "new function from shared function info");
+ }
+ result->set_feedback_cell(*feedback_cell);
+
+ if (context_or_undefined->IsContext()) {
+ // Give compiler a chance to pre-initialize.
+ Compiler::PostInstantiation(result, pretenure);
+ }
+
+ return result;
+}
+
+Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
+ return NewFixedArrayWithMap<ScopeInfo>(Heap::kScopeInfoMapRootIndex, length,
+ TENURED);
+}
+
+Handle<ModuleInfo> Factory::NewModuleInfo() {
+ return NewFixedArrayWithMap<ModuleInfo>(Heap::kModuleInfoMapRootIndex,
+ ModuleInfo::kLength, TENURED);
+}
+
+Handle<PreParsedScopeData> Factory::NewPreParsedScopeData() {
+ Handle<PreParsedScopeData> result =
+ Handle<PreParsedScopeData>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+ result->set_scope_data(PodArray<uint8_t>::cast(*empty_byte_array()));
+ result->set_child_data(*empty_fixed_array());
+ return result;
+}
+
+Handle<JSObject> Factory::NewExternal(void* value) {
+ Handle<Foreign> foreign = NewForeign(static_cast<Address>(value));
+ Handle<JSObject> external = NewJSObjectFromMap(external_map());
+ external->SetEmbedderField(0, *foreign);
+ return external;
+}
+
+Handle<CodeDataContainer> Factory::NewCodeDataContainer(int flags) {
+ Handle<CodeDataContainer> data_container(
+ CodeDataContainer::cast(New(code_data_container_map(), TENURED)),
+ isolate());
+ data_container->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
+ data_container->set_kind_specific_flags(flags);
+ data_container->clear_padding();
+ return data_container;
+}
+
+Handle<Code> Factory::NewCode(
+ const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
+ int32_t builtin_index, MaybeHandle<ByteArray> maybe_source_position_table,
+ MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
+ uint32_t stub_key, bool is_turbofanned, int stack_slots,
+ int safepoint_table_offset, int handler_table_offset) {
+ Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
+ Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
+
+ Handle<ByteArray> source_position_table =
+ maybe_source_position_table.is_null()
+ ? empty_byte_array()
+ : maybe_source_position_table.ToHandleChecked();
+ Handle<DeoptimizationData> deopt_data =
+ maybe_deopt_data.is_null() ? DeoptimizationData::Empty(isolate())
+ : maybe_deopt_data.ToHandleChecked();
+
+ bool has_unwinding_info = desc.unwinding_info != nullptr;
+ DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
+ (!has_unwinding_info && desc.unwinding_info_size == 0));
+
+ // Compute size.
+ int body_size = desc.instr_size;
+ int unwinding_info_size_field_size = kInt64Size;
+ if (has_unwinding_info) {
+ body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
+ unwinding_info_size_field_size;
+ }
+ int object_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
+ DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
+
+ Heap* heap = isolate()->heap();
+ CodePageCollectionMemoryModificationScope code_allocation(heap);
+ HeapObject* result = heap->AllocateRawWithRetry(object_size, CODE_SPACE);
+
+ if (movability == kImmovable) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ // Code objects which should stay at a fixed address are allocated either
+ // in the first page of code space, in large object space, or (during
+ // snapshot creation) the containing page is marked as immovable.
+ if (!Heap::IsImmovable(result)) {
+ if (isolate()->serializer_enabled() ||
+ heap->code_space_->FirstPage()->Contains(result->address())) {
+ chunk->MarkNeverEvacuate();
+ } else {
+ // Discard the first code allocation, which was on a page where it could
+ // be moved.
+ heap->CreateFillerObjectAt(result->address(), object_size,
+ ClearRecordedSlots::kNo);
+ result = heap->AllocateRawCodeInLargeObjectSpace(object_size);
+ heap->UnprotectAndRegisterMemoryChunk(result);
+ heap->ZapCodeObject(result->address(), object_size);
+ heap->OnAllocationEvent(result, object_size);
+ }
+ }
+ }
+
+ result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
+ Handle<Code> code(Code::cast(result), isolate());
+ DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
+ DCHECK(!heap->memory_allocator()->code_range()->valid() ||
+ heap->memory_allocator()->code_range()->contains(code->address()) ||
+ object_size <= heap->code_space()->AreaSize());
+
+ // The code object has not been fully initialized yet. We rely on the
+ // fact that no allocation will happen from this point on.
+ DisallowHeapAllocation no_gc;
+ code->set_raw_instruction_size(desc.instr_size);
+ code->set_relocation_info(*reloc_info);
+ code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots);
+ code->set_safepoint_table_offset(safepoint_table_offset);
+ code->set_handler_table_offset(handler_table_offset);
+ code->set_code_data_container(*data_container);
+ code->set_deoptimization_data(*deopt_data);
+ code->set_stub_key(stub_key);
+ code->set_source_position_table(*source_position_table);
+ code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
+ code->set_builtin_index(builtin_index);
+
+ // Allow self references to created code object by patching the handle to
+ // point to the newly allocated Code object.
+ if (!self_ref.is_null()) *(self_ref.location()) = *code;
+
+ // Migrate generated code.
+ // The generated code can contain Object** values (typically from handles)
+ // that are dereferenced during the copy to point directly to the actual heap
+ // objects. These pointers can include references to the code object itself,
+ // through the self_reference parameter.
+ code->CopyFrom(desc);
+
+ code->clear_padding();
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) code->ObjectVerify();
+#endif
+ DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
+ DCHECK(!isolate()->heap()->memory_allocator()->code_range()->valid() ||
+ isolate()->heap()->memory_allocator()->code_range()->contains(
+ code->address()) ||
+ object_size <= isolate()->heap()->code_space()->AreaSize());
+ return code;
+}
+
+Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
+ DCHECK(IsAligned(static_cast<intptr_t>(size), kCodeAlignment));
+ Heap* heap = isolate()->heap();
+ HeapObject* result = heap->AllocateRawWithRetry(size, CODE_SPACE);
+ // Unprotect the memory chunk of the object if it was not unprotected
+ // already.
+ heap->UnprotectAndRegisterMemoryChunk(result);
+ heap->ZapCodeObject(result->address(), size);
+ result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
+ DCHECK(IsAligned(bit_cast<intptr_t>(result->address()), kCodeAlignment));
+ DCHECK(!heap->memory_allocator()->code_range()->valid() ||
+ heap->memory_allocator()->code_range()->contains(result->address()) ||
+ static_cast<int>(size) <= heap->code_space()->AreaSize());
+ return handle(Code::cast(result), isolate());
+}
+
+#ifdef V8_EMBEDDED_BUILTINS
+Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
+ Address off_heap_entry) {
+ DCHECK(isolate()->serializer_enabled());
+ DCHECK_NOT_NULL(isolate()->embedded_blob());
+ DCHECK_NE(0, isolate()->embedded_blob_size());
+ DCHECK(Builtins::IsEmbeddedBuiltin(*code));
+
+ Handle<Code> result =
+ Builtins::GenerateOffHeapTrampolineFor(isolate(), off_heap_entry);
+
+ // The trampoline code object must inherit specific flags from the original
+ // builtin (e.g. the safepoint-table offset). We set them manually here.
+
+ const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
+ result->initialize_flags(code->kind(), code->has_unwinding_info(),
+ code->is_turbofanned(), stack_slots);
+ result->set_builtin_index(code->builtin_index());
+ result->set_handler_table_offset(code->handler_table_offset());
+ result->code_data_container()->set_kind_specific_flags(
+ code->code_data_container()->kind_specific_flags());
+ result->set_constant_pool_offset(code->constant_pool_offset());
+ if (code->has_safepoint_info()) {
+ result->set_safepoint_table_offset(code->safepoint_table_offset());
+ }
+
+ return result;
+}
+#endif
+
+Handle<Code> Factory::CopyCode(Handle<Code> code) {
+ Handle<CodeDataContainer> data_container =
+ NewCodeDataContainer(code->code_data_container()->kind_specific_flags());
+
+ Heap* heap = isolate()->heap();
+ int obj_size = code->Size();
+ HeapObject* result = heap->AllocateRawWithRetry(obj_size, CODE_SPACE);
+
+ // Copy code object.
+ Address old_addr = code->address();
+ Address new_addr = result->address();
+ Heap::CopyBlock(new_addr, old_addr, obj_size);
+ Handle<Code> new_code(Code::cast(result), isolate());
+
+ // Set the {CodeDataContainer}, it cannot be shared.
+ new_code->set_code_data_container(*data_container);
+
+ new_code->Relocate(new_addr - old_addr);
+ // We have to iterate over the object and process its pointers when black
+ // allocation is on.
+ heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
+ // Record all references to embedded objects in the new code object.
+ heap->RecordWritesIntoCode(*new_code);
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) new_code->ObjectVerify();
+#endif
+ DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
+ DCHECK(
+ !heap->memory_allocator()->code_range()->valid() ||
+ heap->memory_allocator()->code_range()->contains(new_code->address()) ||
+ obj_size <= heap->code_space()->AreaSize());
+ return new_code;
+}
+
+Handle<BytecodeArray> Factory::CopyBytecodeArray(
+ Handle<BytecodeArray> bytecode_array) {
+ int size = BytecodeArray::SizeFor(bytecode_array->length());
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, TENURED, *bytecode_array_map());
+
+ Handle<BytecodeArray> copy(BytecodeArray::cast(result), isolate());
+ copy->set_length(bytecode_array->length());
+ copy->set_frame_size(bytecode_array->frame_size());
+ copy->set_parameter_count(bytecode_array->parameter_count());
+ copy->set_incoming_new_target_or_generator_register(
+ bytecode_array->incoming_new_target_or_generator_register());
+ copy->set_constant_pool(bytecode_array->constant_pool());
+ copy->set_handler_table(bytecode_array->handler_table());
+ copy->set_source_position_table(bytecode_array->source_position_table());
+ copy->set_interrupt_budget(bytecode_array->interrupt_budget());
+ copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
+ copy->set_bytecode_age(bytecode_array->bytecode_age());
+ bytecode_array->CopyBytecodesTo(*copy);
+ return copy;
+}
+
+Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
+ PretenureFlag pretenure) {
+ JSFunction::EnsureHasInitialMap(constructor);
+ Handle<Map> map(constructor->initial_map());
+ return NewJSObjectFromMap(map, pretenure);
+}
+
+Handle<JSObject> Factory::NewJSObjectWithNullProto(PretenureFlag pretenure) {
+ Handle<JSObject> result =
+ NewJSObject(isolate()->object_function(), pretenure);
+ Handle<Map> new_map =
+ Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
+ Map::SetPrototype(new_map, null_value());
+ JSObject::MigrateToMap(result, new_map);
+ return result;
+}
+
+Handle<JSGlobalObject> Factory::NewJSGlobalObject(
+ Handle<JSFunction> constructor) {
+ DCHECK(constructor->has_initial_map());
+ Handle<Map> map(constructor->initial_map());
+ DCHECK(map->is_dictionary_map());
+
+ // Make sure no field properties are described in the initial map.
+ // This guarantees us that normalizing the properties does not
+ // require us to change property values to PropertyCells.
+ DCHECK_EQ(map->NextFreePropertyIndex(), 0);
+
+ // Make sure we don't have a ton of pre-allocated slots in the
+ // global objects. They will be unused once we normalize the object.
+ DCHECK_EQ(map->UnusedPropertyFields(), 0);
+ DCHECK_EQ(map->GetInObjectProperties(), 0);
+
+ // Initial size of the backing store to avoid resize of the storage during
+ // bootstrapping. The size differs between the JS global object ad the
+ // builtins object.
+ int initial_size = 64;
+
+ // Allocate a dictionary object for backing storage.
+ int at_least_space_for = map->NumberOfOwnDescriptors() * 2 + initial_size;
+ Handle<GlobalDictionary> dictionary =
+ GlobalDictionary::New(isolate(), at_least_space_for);
+
+ // The global object might be created from an object template with accessors.
+ // Fill these accessors into the dictionary.
+ Handle<DescriptorArray> descs(map->instance_descriptors());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ // Only accessors are expected.
+ DCHECK_EQ(kAccessor, details.kind());
+ PropertyDetails d(kAccessor, details.attributes(),
+ PropertyCellType::kMutable);
+ Handle<Name> name(descs->GetKey(i));
+ Handle<PropertyCell> cell = NewPropertyCell(name);
+ cell->set_value(descs->GetValue(i));
+ // |dictionary| already contains enough space for all properties.
+ USE(GlobalDictionary::Add(dictionary, name, cell, d));
+ }
+
+ // Allocate the global object and initialize it with the backing store.
+ Handle<JSGlobalObject> global(JSGlobalObject::cast(New(map, TENURED)),
+ isolate());
+ InitializeJSObjectFromMap(global, dictionary, map);
+
+ // Create a new map for the global object.
+ Handle<Map> new_map = Map::CopyDropDescriptors(map);
+ new_map->set_may_have_interesting_symbols(true);
+ new_map->set_is_dictionary_map(true);
+
+ // Set up the global object as a normalized object.
+ global->set_global_dictionary(*dictionary);
+ global->synchronized_set_map(*new_map);
+
+ // Make sure result is a global object with properties in dictionary.
+ DCHECK(global->IsJSGlobalObject() && !global->HasFastProperties());
+ return global;
+}
+
+void Factory::InitializeJSObjectFromMap(Handle<JSObject> obj,
+ Handle<Object> properties,
+ Handle<Map> map) {
+ obj->set_raw_properties_or_hash(*properties);
+ obj->initialize_elements();
+ // TODO(1240798): Initialize the object's body using valid initial values
+ // according to the object's initial map. For example, if the map's
+ // instance type is JS_ARRAY_TYPE, the length field should be initialized
+ // to a number (e.g. Smi::kZero) and the elements initialized to a
+ // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
+ // verification code has to cope with (temporarily) invalid objects. See
+ // for example, JSArray::JSArrayVerify).
+ InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
+}
+
+void Factory::InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
+ int start_offset) {
+ if (start_offset == map->instance_size()) return;
+ DCHECK_LT(start_offset, map->instance_size());
+
+ // We cannot always fill with one_pointer_filler_map because objects
+ // created from API functions expect their embedder fields to be initialized
+ // with undefined_value.
+ // Pre-allocated fields need to be initialized with undefined_value as well
+ // so that object accesses before the constructor completes (e.g. in the
+ // debugger) will not cause a crash.
+
+ // In case of Array subclassing the |map| could already be transitioned
+ // to different elements kind from the initial map on which we track slack.
+ bool in_progress = map->IsInobjectSlackTrackingInProgress();
+ Object* filler;
+ if (in_progress) {
+ filler = *one_pointer_filler_map();
+ } else {
+ filler = *undefined_value();
+ }
+ obj->InitializeBody(*map, start_offset, *undefined_value(), filler);
+ if (in_progress) {
+ map->FindRootMap()->InobjectSlackTrackingStep();
+ }
+}
+
+Handle<JSObject> Factory::NewJSObjectFromMap(
+ Handle<Map> map, PretenureFlag pretenure,
+ Handle<AllocationSite> allocation_site) {
+ // JSFunctions should be allocated using AllocateFunction to be
+ // properly initialized.
+ DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
+
+ // Both types of global objects should be allocated using
+ // AllocateGlobalObject to be properly initialized.
+ DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
+
+ HeapObject* obj =
+ AllocateRawWithAllocationSite(map, pretenure, allocation_site);
+ Handle<JSObject> js_obj(JSObject::cast(obj), isolate());
+
+ InitializeJSObjectFromMap(js_obj, empty_fixed_array(), map);
+
+ DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
+ js_obj->HasFastStringWrapperElements() ||
+ js_obj->HasFastArgumentsElements());
+ return js_obj;
+}
+
+Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
+ PretenureFlag pretenure) {
+ DCHECK(map->is_dictionary_map());
+ Handle<NameDictionary> object_properties =
+ NameDictionary::New(isolate(), capacity);
+ Handle<JSObject> js_object = NewJSObjectFromMap(map, pretenure);
+ js_object->set_raw_properties_or_hash(*object_properties);
+ return js_object;
+}
+
+Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
+ PretenureFlag pretenure) {
+ Context* native_context = isolate()->raw_native_context();
+ Map* map = native_context->GetInitialJSArrayMap(elements_kind);
+ if (map == nullptr) {
+ JSFunction* array_function = native_context->array_function();
+ map = array_function->initial_map();
+ }
+ return Handle<JSArray>::cast(NewJSObjectFromMap(handle(map), pretenure));
+}
+
+Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length,
+ int capacity,
+ ArrayStorageAllocationMode mode,
+ PretenureFlag pretenure) {
+ Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
+ NewJSArrayStorage(array, length, capacity, mode);
+ return array;
+}
+
+Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
+ ElementsKind elements_kind,
+ int length,
+ PretenureFlag pretenure) {
+ DCHECK(length <= elements->length());
+ Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
+
+ array->set_elements(*elements);
+ array->set_length(Smi::FromInt(length));
+ JSObject::ValidateElements(*array);
+ return array;
+}
+
+void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
+ ArrayStorageAllocationMode mode) {
+ DCHECK(capacity >= length);
+
+ if (capacity == 0) {
+ array->set_length(Smi::kZero);
+ array->set_elements(*empty_fixed_array());
+ return;
+ }
+
+ HandleScope inner_scope(isolate());
+ Handle<FixedArrayBase> elms;
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (IsDoubleElementsKind(elements_kind)) {
+ if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+ elms = NewFixedDoubleArray(capacity);
+ } else {
+ DCHECK(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ elms = NewFixedDoubleArrayWithHoles(capacity);
+ }
+ } else {
+ DCHECK(IsSmiOrObjectElementsKind(elements_kind));
+ if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+ elms = NewUninitializedFixedArray(capacity);
+ } else {
+ DCHECK(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ elms = NewFixedArrayWithHoles(capacity);
+ }
+ }
+
+ array->set_elements(*elms);
+ array->set_length(Smi::FromInt(length));
+}
+
+Handle<JSWeakMap> Factory::NewJSWeakMap() {
+ Context* native_context = isolate()->raw_native_context();
+ Handle<Map> map(native_context->js_weak_map_fun()->initial_map());
+ Handle<JSWeakMap> weakmap(JSWeakMap::cast(*NewJSObjectFromMap(map)));
+ {
+ // Do not leak handles for the hash table, it would make entries strong.
+ HandleScope scope(isolate());
+ JSWeakCollection::Initialize(weakmap, isolate());
+ }
+ return weakmap;
+}
+
+Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
+ Handle<Map> map = isolate()->js_module_namespace_map();
+ Handle<JSModuleNamespace> module_namespace(
+ Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map)));
+ FieldIndex index = FieldIndex::ForDescriptor(
+ *map, JSModuleNamespace::kToStringTagFieldIndex);
+ module_namespace->FastPropertyAtPut(index,
+ isolate()->heap()->Module_string());
+ return module_namespace;
+}
+
+Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
+ Handle<JSFunction> function) {
+ DCHECK(IsResumableFunction(function->shared()->kind()));
+ JSFunction::EnsureHasInitialMap(function);
+ Handle<Map> map(function->initial_map());
+
+ DCHECK(map->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
+ map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
+
+ return Handle<JSGeneratorObject>::cast(NewJSObjectFromMap(map));
+}
+
+Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
+ Handle<ModuleInfo> module_info(code->scope_info()->ModuleDescriptorInfo(),
+ isolate());
+ Handle<ObjectHashTable> exports =
+ ObjectHashTable::New(isolate(), module_info->RegularExportCount());
+ Handle<FixedArray> regular_exports =
+ NewFixedArray(module_info->RegularExportCount());
+ Handle<FixedArray> regular_imports =
+ NewFixedArray(module_info->regular_imports()->length());
+ int requested_modules_length = module_info->module_requests()->length();
+ Handle<FixedArray> requested_modules =
+ requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
+ : empty_fixed_array();
+
+ Handle<Module> module = Handle<Module>::cast(NewStruct(MODULE_TYPE, TENURED));
+ module->set_code(*code);
+ module->set_exports(*exports);
+ module->set_regular_exports(*regular_exports);
+ module->set_regular_imports(*regular_imports);
+ module->set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
+ module->set_module_namespace(isolate()->heap()->undefined_value());
+ module->set_requested_modules(*requested_modules);
+ module->set_script(Script::cast(code->script()));
+ module->set_status(Module::kUninstantiated);
+ module->set_exception(isolate()->heap()->the_hole_value());
+ module->set_import_meta(isolate()->heap()->the_hole_value());
+ module->set_dfs_index(-1);
+ module->set_dfs_ancestor_index(-1);
+ return module;
+}
+
+Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
+ PretenureFlag pretenure) {
+ Handle<JSFunction> array_buffer_fun(
+ shared == SharedFlag::kShared
+ ? isolate()->native_context()->shared_array_buffer_fun()
+ : isolate()->native_context()->array_buffer_fun());
+ Handle<Map> map(array_buffer_fun->initial_map(), isolate());
+ return Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, pretenure));
+}
+
+Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
+ bool done) {
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
+ Handle<JSIteratorResult> js_iter_result =
+ Handle<JSIteratorResult>::cast(NewJSObjectFromMap(map));
+ js_iter_result->set_value(*value);
+ js_iter_result->set_done(*ToBoolean(done));
+ return js_iter_result;
+}
+
+Handle<JSAsyncFromSyncIterator> Factory::NewJSAsyncFromSyncIterator(
+ Handle<JSReceiver> sync_iterator, Handle<Object> next) {
+ Handle<Map> map(isolate()->native_context()->async_from_sync_iterator_map());
+ Handle<JSAsyncFromSyncIterator> iterator =
+ Handle<JSAsyncFromSyncIterator>::cast(NewJSObjectFromMap(map));
+
+ iterator->set_sync_iterator(*sync_iterator);
+ iterator->set_next(*next);
+ return iterator;
+}
+
+Handle<JSMap> Factory::NewJSMap() {
+ Handle<Map> map(isolate()->native_context()->js_map_map());
+ Handle<JSMap> js_map = Handle<JSMap>::cast(NewJSObjectFromMap(map));
+ JSMap::Initialize(js_map, isolate());
+ return js_map;
+}
+
+Handle<JSSet> Factory::NewJSSet() {
+ Handle<Map> map(isolate()->native_context()->js_set_map());
+ Handle<JSSet> js_set = Handle<JSSet>::cast(NewJSObjectFromMap(map));
+ JSSet::Initialize(js_set, isolate());
+ return js_set;
+}
+
+Handle<JSMapIterator> Factory::NewJSMapIterator(Handle<Map> map,
+ Handle<OrderedHashMap> table,
+ int index) {
+ Handle<JSMapIterator> result =
+ Handle<JSMapIterator>::cast(NewJSObjectFromMap(map));
+ result->set_table(*table);
+ result->set_index(Smi::FromInt(index));
+ return result;
+}
+
+Handle<JSSetIterator> Factory::NewJSSetIterator(Handle<Map> map,
+ Handle<OrderedHashSet> table,
+ int index) {
+ Handle<JSSetIterator> result =
+ Handle<JSSetIterator>::cast(NewJSObjectFromMap(map));
+ result->set_table(*table);
+ result->set_index(Smi::FromInt(index));
+ return result;
+}
+
+void Factory::TypeAndSizeForElementsKind(ElementsKind kind,
+ ExternalArrayType* array_type,
+ size_t* element_size) {
+ switch (kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ *array_type = kExternal##Type##Array; \
+ *element_size = size; \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+namespace {
+
+static void ForFixedTypedArray(ExternalArrayType array_type,
+ size_t* element_size,
+ ElementsKind* element_kind) {
+ switch (array_type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ *element_size = size; \
+ *element_kind = TYPE##_ELEMENTS; \
+ return;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ UNREACHABLE();
+}
+
+JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
+ Context* native_context = isolate->context()->native_context();
+ switch (type) {
+#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return native_context->type##_array_fun();
+
+ TYPED_ARRAYS(TYPED_ARRAY_FUN)
+#undef TYPED_ARRAY_FUN
+ }
+ UNREACHABLE();
+}
+
+JSFunction* GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
+ Context* native_context = isolate->context()->native_context();
+ switch (elements_kind) {
+#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ return native_context->type##_array_fun();
+
+ TYPED_ARRAYS(TYPED_ARRAY_FUN)
+#undef TYPED_ARRAY_FUN
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+void SetupArrayBufferView(i::Isolate* isolate,
+ i::Handle<i::JSArrayBufferView> obj,
+ i::Handle<i::JSArrayBuffer> buffer,
+ size_t byte_offset, size_t byte_length,
+ PretenureFlag pretenure = NOT_TENURED) {
+ DCHECK(byte_offset + byte_length <=
+ static_cast<size_t>(buffer->byte_length()->Number()));
+
+ DCHECK_EQ(obj->GetEmbedderFieldCount(),
+ v8::ArrayBufferView::kEmbedderFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) {
+ obj->SetEmbedderField(i, Smi::kZero);
+ }
+
+ obj->set_buffer(*buffer);
+
+ i::Handle<i::Object> byte_offset_object =
+ isolate->factory()->NewNumberFromSize(byte_offset, pretenure);
+ obj->set_byte_offset(*byte_offset_object);
+
+ i::Handle<i::Object> byte_length_object =
+ isolate->factory()->NewNumberFromSize(byte_length, pretenure);
+ obj->set_byte_length(*byte_length_object);
+}
+
+} // namespace
+
+Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
+ PretenureFlag pretenure) {
+ Handle<JSFunction> typed_array_fun(GetTypedArrayFun(type, isolate()));
+ Handle<Map> map(typed_array_fun->initial_map(), isolate());
+ return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, pretenure));
+}
+
+Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
+ PretenureFlag pretenure) {
+ Handle<JSFunction> typed_array_fun(GetTypedArrayFun(elements_kind, isolate()),
+ isolate());
+ Handle<Map> map(typed_array_fun->initial_map(), isolate());
+ return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, pretenure));
+}
+
+Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
+ Handle<JSArrayBuffer> buffer,
+ size_t byte_offset, size_t length,
+ PretenureFlag pretenure) {
+ Handle<JSTypedArray> obj = NewJSTypedArray(type, pretenure);
+
+ size_t element_size;
+ ElementsKind elements_kind;
+ ForFixedTypedArray(type, &element_size, &elements_kind);
+
+ CHECK_EQ(byte_offset % element_size, 0);
+
+ CHECK(length <= (std::numeric_limits<size_t>::max() / element_size));
+ CHECK(length <= static_cast<size_t>(Smi::kMaxValue));
+ size_t byte_length = length * element_size;
+ SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length,
+ pretenure);
+
+ Handle<Object> length_object = NewNumberFromSize(length, pretenure);
+ obj->set_length(*length_object);
+
+ Handle<FixedTypedArrayBase> elements = NewFixedTypedArrayWithExternalPointer(
+ static_cast<int>(length), type,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset, pretenure);
+ Handle<Map> map = JSObject::GetElementsTransitionMap(obj, elements_kind);
+ JSObject::SetMapAndElements(obj, map, elements);
+ return obj;
+}
+
+Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
+ size_t number_of_elements,
+ PretenureFlag pretenure) {
+ Handle<JSTypedArray> obj = NewJSTypedArray(elements_kind, pretenure);
+ DCHECK_EQ(obj->GetEmbedderFieldCount(),
+ v8::ArrayBufferView::kEmbedderFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) {
+ obj->SetEmbedderField(i, Smi::kZero);
+ }
+
+ size_t element_size;
+ ExternalArrayType array_type;
+ TypeAndSizeForElementsKind(elements_kind, &array_type, &element_size);
+
+ CHECK(number_of_elements <=
+ (std::numeric_limits<size_t>::max() / element_size));
+ CHECK(number_of_elements <= static_cast<size_t>(Smi::kMaxValue));
+ size_t byte_length = number_of_elements * element_size;
+
+ obj->set_byte_offset(Smi::kZero);
+ i::Handle<i::Object> byte_length_object =
+ NewNumberFromSize(byte_length, pretenure);
+ obj->set_byte_length(*byte_length_object);
+ Handle<Object> length_object =
+ NewNumberFromSize(number_of_elements, pretenure);
+ obj->set_length(*length_object);
+
+ Handle<JSArrayBuffer> buffer =
+ NewJSArrayBuffer(SharedFlag::kNotShared, pretenure);
+ JSArrayBuffer::Setup(buffer, isolate(), true, nullptr, byte_length,
+ SharedFlag::kNotShared);
+ obj->set_buffer(*buffer);
+ Handle<FixedTypedArrayBase> elements = NewFixedTypedArray(
+ number_of_elements, byte_length, array_type, true, pretenure);
+ obj->set_elements(*elements);
+ return obj;
+}
+
+Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
+ size_t byte_offset,
+ size_t byte_length) {
+ Handle<Map> map(isolate()->native_context()->data_view_fun()->initial_map(),
+ isolate());
+ Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSObjectFromMap(map));
+ SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length);
+ return obj;
+}
+
+MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
+ Handle<JSReceiver> target_function, Handle<Object> bound_this,
+ Vector<Handle<Object>> bound_args) {
+ DCHECK(target_function->IsCallable());
+ STATIC_ASSERT(Code::kMaxArguments <= FixedArray::kMaxLength);
+ if (bound_args.length() >= Code::kMaxArguments) {
+ THROW_NEW_ERROR(isolate(),
+ NewRangeError(MessageTemplate::kTooManyArguments),
+ JSBoundFunction);
+ }
+
+ // Determine the prototype of the {target_function}.
+ Handle<Object> prototype;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), prototype,
+ JSReceiver::GetPrototype(isolate(), target_function), JSBoundFunction);
+
+ SaveContext save(isolate());
+ isolate()->set_context(*target_function->GetCreationContext());
+
+ // Create the [[BoundArguments]] for the result.
+ Handle<FixedArray> bound_arguments;
+ if (bound_args.length() == 0) {
+ bound_arguments = empty_fixed_array();
+ } else {
+ bound_arguments = NewFixedArray(bound_args.length());
+ for (int i = 0; i < bound_args.length(); ++i) {
+ bound_arguments->set(i, *bound_args[i]);
+ }
+ }
+
+ // Setup the map for the JSBoundFunction instance.
+ Handle<Map> map = target_function->IsConstructor()
+ ? isolate()->bound_function_with_constructor_map()
+ : isolate()->bound_function_without_constructor_map();
+ if (map->prototype() != *prototype) {
+ map = Map::TransitionToPrototype(map, prototype);
+ }
+ DCHECK_EQ(target_function->IsConstructor(), map->is_constructor());
+
+ // Setup the JSBoundFunction instance.
+ Handle<JSBoundFunction> result =
+ Handle<JSBoundFunction>::cast(NewJSObjectFromMap(map));
+ result->set_bound_target_function(*target_function);
+ result->set_bound_this(*bound_this);
+ result->set_bound_arguments(*bound_arguments);
+ return result;
+}
+
+// ES6 section 9.5.15 ProxyCreate (target, handler)
+Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
+ Handle<JSReceiver> handler) {
+ // Allocate the proxy object.
+ Handle<Map> map;
+ if (target->IsCallable()) {
+ if (target->IsConstructor()) {
+ map = Handle<Map>(isolate()->proxy_constructor_map());
+ } else {
+ map = Handle<Map>(isolate()->proxy_callable_map());
+ }
+ } else {
+ map = Handle<Map>(isolate()->proxy_map());
+ }
+ DCHECK(map->prototype()->IsNull(isolate()));
+ Handle<JSProxy> result(JSProxy::cast(New(map, NOT_TENURED)), isolate());
+ result->initialize_properties();
+ result->set_target(*target);
+ result->set_handler(*handler);
+ return result;
+}
+
+Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
+ // Create an empty shell of a JSGlobalProxy that needs to be reinitialized
+ // via ReinitializeJSGlobalProxy later.
+ Handle<Map> map = NewMap(JS_GLOBAL_PROXY_TYPE, size);
+ // Maintain invariant expected from any JSGlobalProxy.
+ map->set_is_access_check_needed(true);
+ map->set_may_have_interesting_symbols(true);
+ return Handle<JSGlobalProxy>::cast(NewJSObjectFromMap(map, NOT_TENURED));
+}
+
+void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
+ Handle<JSFunction> constructor) {
+ DCHECK(constructor->has_initial_map());
+ Handle<Map> map(constructor->initial_map(), isolate());
+ Handle<Map> old_map(object->map(), isolate());
+
+ // The proxy's hash should be retained across reinitialization.
+ Handle<Object> raw_properties_or_hash(object->raw_properties_or_hash(),
+ isolate());
+
+ if (old_map->is_prototype_map()) {
+ map = Map::Copy(map, "CopyAsPrototypeForJSGlobalProxy");
+ map->set_is_prototype_map(true);
+ }
+ JSObject::NotifyMapChange(old_map, map, isolate());
+ old_map->NotifyLeafMapLayoutChange();
+
+ // Check that the already allocated object has the same size and type as
+ // objects allocated using the constructor.
+ DCHECK(map->instance_size() == old_map->instance_size());
+ DCHECK(map->instance_type() == old_map->instance_type());
+
+ // In order to keep heap in consistent state there must be no allocations
+ // before object re-initialization is finished.
+ DisallowHeapAllocation no_allocation;
+
+ // Reset the map for the object.
+ object->synchronized_set_map(*map);
+
+ // Reinitialize the object from the constructor map.
+ InitializeJSObjectFromMap(object, raw_properties_or_hash, map);
+}
+
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
+ FunctionLiteral* literal, Handle<Script> script, bool is_toplevel) {
+ FunctionKind kind = literal->kind();
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfoForBuiltin(
+ literal->name(), Builtins::kCompileLazy, kind);
+ SharedFunctionInfo::InitFromFunctionLiteral(shared, literal, is_toplevel);
+ SharedFunctionInfo::SetScript(shared, script, false);
+ return shared;
+}
+
+Handle<JSMessageObject> Factory::NewJSMessageObject(
+ MessageTemplate::Template message, Handle<Object> argument,
+ int start_position, int end_position, Handle<Object> script,
+ Handle<Object> stack_frames) {
+ Handle<Map> map = message_object_map();
+ Handle<JSMessageObject> message_obj(
+ JSMessageObject::cast(New(map, NOT_TENURED)), isolate());
+ message_obj->set_raw_properties_or_hash(*empty_fixed_array(),
+ SKIP_WRITE_BARRIER);
+ message_obj->initialize_elements();
+ message_obj->set_elements(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ message_obj->set_type(message);
+ message_obj->set_argument(*argument);
+ message_obj->set_start_position(start_position);
+ message_obj->set_end_position(end_position);
+ message_obj->set_script(*script);
+ message_obj->set_stack_frames(*stack_frames);
+ message_obj->set_error_level(v8::Isolate::kMessageError);
+ return message_obj;
+}
+
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForApiFunction(
+ MaybeHandle<String> maybe_name,
+ Handle<FunctionTemplateInfo> function_template_info, FunctionKind kind) {
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
+ maybe_name, function_template_info, Builtins::kNoBuiltinId, kind);
+ return shared;
+}
+
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForBuiltin(
+ MaybeHandle<String> maybe_name, int builtin_index, FunctionKind kind) {
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
+ maybe_name, MaybeHandle<Code>(), builtin_index, kind);
+ return shared;
+}
+
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
+ MaybeHandle<String> maybe_name, MaybeHandle<HeapObject> maybe_function_data,
+ int maybe_builtin_index, FunctionKind kind) {
+ // Function names are assumed to be flat elsewhere. Must flatten before
+ // allocating SharedFunctionInfo to avoid GC seeing the uninitialized SFI.
+ Handle<String> shared_name;
+ bool has_shared_name = maybe_name.ToHandle(&shared_name);
+ if (has_shared_name) {
+ shared_name = String::Flatten(shared_name, TENURED);
+ }
+
+ Handle<Map> map = shared_function_info_map();
+ Handle<SharedFunctionInfo> share(SharedFunctionInfo::cast(New(map, TENURED)),
+ isolate());
+ {
+ DisallowHeapAllocation no_allocation;
+
+ // Set pointer fields.
+ share->set_name_or_scope_info(
+ has_shared_name ? *shared_name
+ : SharedFunctionInfo::kNoSharedNameSentinel);
+ Handle<HeapObject> function_data;
+ if (maybe_function_data.ToHandle(&function_data)) {
+ // If we pass function_data then we shouldn't pass a builtin index, and
+ // the function_data should not be code with a builtin.
+ DCHECK(!Builtins::IsBuiltinId(maybe_builtin_index));
+ DCHECK_IMPLIES(function_data->IsCode(),
+ !Code::cast(*function_data)->is_builtin());
+ share->set_function_data(*function_data);
+ } else if (Builtins::IsBuiltinId(maybe_builtin_index)) {
+ DCHECK_NE(maybe_builtin_index, Builtins::kDeserializeLazy);
+ share->set_builtin_id(maybe_builtin_index);
+ } else {
+ share->set_builtin_id(Builtins::kIllegal);
+ }
+ // Generally functions won't have feedback, unless they have been created
+ // from a FunctionLiteral. Those can just reset this field to keep the
+ // SharedFunctionInfo in a consistent state.
+ if (maybe_builtin_index == Builtins::kCompileLazy) {
+ share->set_raw_outer_scope_info_or_feedback_metadata(*the_hole_value(),
+ SKIP_WRITE_BARRIER);
+ } else {
+ share->set_raw_outer_scope_info_or_feedback_metadata(
+ *empty_feedback_metadata(), SKIP_WRITE_BARRIER);
+ }
+ share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_debug_info(Smi::kZero, SKIP_WRITE_BARRIER);
+ share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_function_literal_id(FunctionLiteral::kIdTypeInvalid);
+#if V8_SFI_HAS_UNIQUE_ID
+ share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
+#endif
+
+ // Set integer fields (smi or int, depending on the architecture).
+ share->set_length(0);
+ share->set_internal_formal_parameter_count(0);
+ share->set_expected_nof_properties(0);
+ share->set_raw_start_position_and_type(0);
+ share->set_raw_end_position(0);
+ share->set_function_token_position(0);
+ // All flags default to false or 0.
+ share->set_flags(0);
+ share->CalculateConstructAsBuiltin();
+ share->set_kind(kind);
+
+ share->clear_padding();
+ }
+ // Link into the list.
+ Handle<Object> new_noscript_list =
+ FixedArrayOfWeakCells::Add(noscript_shared_function_infos(), share);
+ isolate()->heap()->set_noscript_shared_function_infos(*new_noscript_list);
+
+ DCHECK_EQ(SharedFunctionInfo::kNoDebuggingId, share->debugging_id());
+#ifdef VERIFY_HEAP
+ share->SharedFunctionInfoVerify();
+#endif
+ return share;
+}
+
+static inline int NumberCacheHash(Handle<FixedArray> cache,
+ Handle<Object> number) {
+ int mask = (cache->length() >> 1) - 1;
+ if (number->IsSmi()) {
+ return Handle<Smi>::cast(number)->value() & mask;
+ } else {
+ int64_t bits = bit_cast<int64_t>(number->Number());
+ return (static_cast<int>(bits) ^ static_cast<int>(bits >> 32)) & mask;
+ }
+}
+
+Handle<Object> Factory::GetNumberStringCache(Handle<Object> number) {
+ DisallowHeapAllocation no_gc;
+ int hash = NumberCacheHash(number_string_cache(), number);
+ Object* key = number_string_cache()->get(hash * 2);
+ if (key == *number || (key->IsHeapNumber() && number->IsHeapNumber() &&
+ key->Number() == number->Number())) {
+ return Handle<String>(
+ String::cast(number_string_cache()->get(hash * 2 + 1)), isolate());
+ }
+ return undefined_value();
+}
+
+void Factory::SetNumberStringCache(Handle<Object> number,
+ Handle<String> string) {
+ int hash = NumberCacheHash(number_string_cache(), number);
+ if (number_string_cache()->get(hash * 2) != *undefined_value()) {
+ int full_size = isolate()->heap()->FullSizeNumberStringCacheLength();
+ if (number_string_cache()->length() != full_size) {
+ Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
+ isolate()->heap()->set_number_string_cache(*new_cache);
+ return;
+ }
+ }
+ number_string_cache()->set(hash * 2, *number);
+ number_string_cache()->set(hash * 2 + 1, *string);
+}
+
+Handle<String> Factory::NumberToString(Handle<Object> number,
+ bool check_number_string_cache) {
+ isolate()->counters()->number_to_string_runtime()->Increment();
+ if (check_number_string_cache) {
+ Handle<Object> cached = GetNumberStringCache(number);
+ if (!cached->IsUndefined(isolate())) return Handle<String>::cast(cached);
+ }
+
+ char arr[100];
+ Vector<char> buffer(arr, arraysize(arr));
+ const char* str;
+ if (number->IsSmi()) {
+ int num = Handle<Smi>::cast(number)->value();
+ str = IntToCString(num, buffer);
+ } else {
+ double num = Handle<HeapNumber>::cast(number)->value();
+ str = DoubleToCString(num, buffer);
+ }
+
+ // We tenure the allocated string since it is referenced from the
+ // number-string cache which lives in the old space.
+ Handle<String> js_string = NewStringFromAsciiChecked(str, TENURED);
+ SetNumberStringCache(number, js_string);
+ return js_string;
+}
+
+Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
+ DCHECK(!shared->HasDebugInfo());
+ Heap* heap = isolate()->heap();
+
+ Handle<DebugInfo> debug_info =
+ Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE, TENURED));
+ debug_info->set_flags(DebugInfo::kNone);
+ debug_info->set_shared(*shared);
+ debug_info->set_debugger_hints(shared->debugger_hints());
+ debug_info->set_debug_bytecode_array(heap->undefined_value());
+ debug_info->set_break_points(heap->empty_fixed_array());
+
+ // Link debug info to function.
+ shared->set_debug_info(*debug_info);
+
+ return debug_info;
+}
+
+Handle<CoverageInfo> Factory::NewCoverageInfo(
+ const ZoneVector<SourceRange>& slots) {
+ const int slot_count = static_cast<int>(slots.size());
+
+ const int length = CoverageInfo::FixedArrayLengthForSlotCount(slot_count);
+ Handle<CoverageInfo> info =
+ Handle<CoverageInfo>::cast(NewUninitializedFixedArray(length));
+
+ for (int i = 0; i < slot_count; i++) {
+ SourceRange range = slots[i];
+ info->InitializeSlot(i, range.start, range.end);
+ }
+
+ return info;
+}
+
+Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
+ Handle<BreakPointInfo> new_break_point_info =
+ Handle<BreakPointInfo>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+ new_break_point_info->set_source_position(source_position);
+ new_break_point_info->set_break_points(*undefined_value());
+ return new_break_point_info;
+}
+
+Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
+ Handle<BreakPoint> new_break_point =
+ Handle<BreakPoint>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+ new_break_point->set_id(id);
+ new_break_point->set_condition(*condition);
+ return new_break_point;
+}
+
+Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
+ Handle<StackFrameInfo> stack_frame_info = Handle<StackFrameInfo>::cast(
+ NewStruct(STACK_FRAME_INFO_TYPE, NOT_TENURED));
+ stack_frame_info->set_line_number(0);
+ stack_frame_info->set_column_number(0);
+ stack_frame_info->set_script_id(0);
+ stack_frame_info->set_script_name(Smi::kZero);
+ stack_frame_info->set_script_name_or_source_url(Smi::kZero);
+ stack_frame_info->set_function_name(Smi::kZero);
+ stack_frame_info->set_flag(0);
+ return stack_frame_info;
+}
+
+Handle<SourcePositionTableWithFrameCache>
+Factory::NewSourcePositionTableWithFrameCache(
+ Handle<ByteArray> source_position_table,
+ Handle<SimpleNumberDictionary> stack_frame_cache) {
+ Handle<SourcePositionTableWithFrameCache>
+ source_position_table_with_frame_cache =
+ Handle<SourcePositionTableWithFrameCache>::cast(
+ NewStruct(TUPLE2_TYPE, TENURED));
+ source_position_table_with_frame_cache->set_source_position_table(
+ *source_position_table);
+ source_position_table_with_frame_cache->set_stack_frame_cache(
+ *stack_frame_cache);
+ return source_position_table_with_frame_cache;
+}
+
+Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
+ int length) {
+ bool strict_mode_callee = is_strict(callee->shared()->language_mode()) ||
+ !callee->shared()->has_simple_parameters();
+ Handle<Map> map = strict_mode_callee ? isolate()->strict_arguments_map()
+ : isolate()->sloppy_arguments_map();
+ AllocationSiteUsageContext context(isolate(), Handle<AllocationSite>(),
+ false);
+ DCHECK(!isolate()->has_pending_exception());
+ Handle<JSObject> result = NewJSObjectFromMap(map);
+ Handle<Smi> value(Smi::FromInt(length), isolate());
+ Object::SetProperty(result, length_string(), value, LanguageMode::kStrict)
+ .Assert();
+ if (!strict_mode_callee) {
+ Object::SetProperty(result, callee_string(), callee, LanguageMode::kStrict)
+ .Assert();
+ }
+ return result;
+}
+
+Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> native_context,
+ int number_of_properties) {
+ DCHECK(native_context->IsNativeContext());
+ const int kMapCacheSize = 128;
+ // We do not cache maps for too many properties or when running builtin code.
+ if (isolate()->bootstrapper()->IsActive()) {
+ return Map::Create(isolate(), number_of_properties);
+ }
+ // Use initial slow object proto map for too many properties.
+ if (number_of_properties > kMapCacheSize) {
+ return handle(native_context->slow_object_with_object_prototype_map(),
+ isolate());
+ }
+ if (number_of_properties == 0) {
+ // Reuse the initial map of the Object function if the literal has no
+ // predeclared properties.
+ return handle(native_context->object_function()->initial_map(), isolate());
+ }
+
+ int cache_index = number_of_properties - 1;
+ Handle<Object> maybe_cache(native_context->map_cache(), isolate());
+ if (maybe_cache->IsUndefined(isolate())) {
+ // Allocate the new map cache for the native context.
+ maybe_cache = NewFixedArray(kMapCacheSize, TENURED);
+ native_context->set_map_cache(*maybe_cache);
+ } else {
+ // Check to see whether there is a matching element in the cache.
+ Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
+ Object* result = cache->get(cache_index);
+ if (result->IsWeakCell()) {
+ WeakCell* cell = WeakCell::cast(result);
+ if (!cell->cleared()) {
+ Map* map = Map::cast(cell->value());
+ DCHECK(!map->is_dictionary_map());
+ return handle(map, isolate());
+ }
+ }
+ }
+ // Create a new map and add it to the cache.
+ Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
+ Handle<Map> map = Map::Create(isolate(), number_of_properties);
+ DCHECK(!map->is_dictionary_map());
+ Handle<WeakCell> cell = NewWeakCell(map);
+ cache->set(cache_index, *cell);
+ return map;
+}
+
+Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
+ Handle<Map> map;
+ switch (data_count) {
+ case 1:
+ map = load_handler1_map();
+ break;
+ case 2:
+ map = load_handler2_map();
+ break;
+ case 3:
+ map = load_handler3_map();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return handle(LoadHandler::cast(New(map, TENURED)), isolate());
+}
+
+Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
+ Handle<Map> map;
+ switch (data_count) {
+ case 0:
+ map = store_handler0_map();
+ break;
+ case 1:
+ map = store_handler1_map();
+ break;
+ case 2:
+ map = store_handler2_map();
+ break;
+ case 3:
+ map = store_handler3_map();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return handle(StoreHandler::cast(New(map, TENURED)), isolate());
+}
+
+void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp, JSRegExp::Type type,
+ Handle<String> source, JSRegExp::Flags flags,
+ Handle<Object> data) {
+ Handle<FixedArray> store = NewFixedArray(JSRegExp::kAtomDataSize);
+
+ store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
+ store->set(JSRegExp::kSourceIndex, *source);
+ store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
+ store->set(JSRegExp::kAtomPatternIndex, *data);
+ regexp->set_data(*store);
+}
+
+void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
+ JSRegExp::Type type, Handle<String> source,
+ JSRegExp::Flags flags, int capture_count) {
+ Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
+ Smi* uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
+ store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
+ store->set(JSRegExp::kSourceIndex, *source);
+ store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
+ store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
+ store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
+ store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::kZero);
+ store->set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count));
+ store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
+ regexp->set_data(*store);
+}
+
+Handle<RegExpMatchInfo> Factory::NewRegExpMatchInfo() {
+ // Initially, the last match info consists of all fixed fields plus space for
+ // the match itself (i.e., 2 capture indices).
+ static const int kInitialSize = RegExpMatchInfo::kFirstCaptureIndex +
+ RegExpMatchInfo::kInitialCaptureIndices;
+
+ Handle<FixedArray> elems = NewFixedArray(kInitialSize);
+ Handle<RegExpMatchInfo> result = Handle<RegExpMatchInfo>::cast(elems);
+
+ result->SetNumberOfCaptureRegisters(RegExpMatchInfo::kInitialCaptureIndices);
+ result->SetLastSubject(*empty_string());
+ result->SetLastInput(*undefined_value());
+ result->SetCapture(0, 0);
+ result->SetCapture(1, 0);
+
+ return result;
+}
+
+Handle<Object> Factory::GlobalConstantFor(Handle<Name> name) {
+ if (Name::Equals(name, undefined_string())) return undefined_value();
+ if (Name::Equals(name, NaN_string())) return nan_value();
+ if (Name::Equals(name, Infinity_string())) return infinity_value();
+ return Handle<Object>::null();
+}
+
+Handle<Object> Factory::ToBoolean(bool value) {
+ return value ? true_value() : false_value();
+}
+
+Handle<String> Factory::ToPrimitiveHintString(ToPrimitiveHint hint) {
+ switch (hint) {
+ case ToPrimitiveHint::kDefault:
+ return default_string();
+ case ToPrimitiveHint::kNumber:
+ return number_string();
+ case ToPrimitiveHint::kString:
+ return string_string();
+ }
+ UNREACHABLE();
+}
+
+Handle<Map> Factory::CreateSloppyFunctionMap(
+ FunctionMode function_mode, MaybeHandle<JSFunction> maybe_empty_function) {
+ bool has_prototype = IsFunctionModeWithPrototype(function_mode);
+ int header_size = has_prototype ? JSFunction::kSizeWithPrototype
+ : JSFunction::kSizeWithoutPrototype;
+ int descriptors_count = has_prototype ? 5 : 4;
+ int inobject_properties_count = 0;
+ if (IsFunctionModeWithName(function_mode)) ++inobject_properties_count;
+
+ Handle<Map> map = NewMap(
+ JS_FUNCTION_TYPE, header_size + inobject_properties_count * kPointerSize,
+ TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
+ map->set_has_prototype_slot(has_prototype);
+ map->set_is_constructor(has_prototype);
+ map->set_is_callable(true);
+ Handle<JSFunction> empty_function;
+ if (maybe_empty_function.ToHandle(&empty_function)) {
+ Map::SetPrototype(map, empty_function);
+ }
+
+ //
+ // Setup descriptors array.
+ //
+ Map::EnsureDescriptorSlack(map, descriptors_count);
+
+ PropertyAttributes ro_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ PropertyAttributes rw_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+
+ int field_index = 0;
+ STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
+ { // Add length accessor.
+ Descriptor d = Descriptor::AccessorConstant(
+ length_string(), function_length_accessor(), roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+
+ STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
+ if (IsFunctionModeWithName(function_mode)) {
+ // Add name field.
+ Handle<Name> name = isolate()->factory()->name_string();
+ Descriptor d = Descriptor::DataField(name, field_index++, roc_attribs,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+
+ } else {
+ // Add name accessor.
+ Descriptor d = Descriptor::AccessorConstant(
+ name_string(), function_name_accessor(), roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+ { // Add arguments accessor.
+ Descriptor d = Descriptor::AccessorConstant(
+ arguments_string(), function_arguments_accessor(), ro_attribs);
+ map->AppendDescriptor(&d);
+ }
+ { // Add caller accessor.
+ Descriptor d = Descriptor::AccessorConstant(
+ caller_string(), function_caller_accessor(), ro_attribs);
+ map->AppendDescriptor(&d);
+ }
+ if (IsFunctionModeWithPrototype(function_mode)) {
+ // Add prototype accessor.
+ PropertyAttributes attribs =
+ IsFunctionModeWithWritablePrototype(function_mode) ? rw_attribs
+ : ro_attribs;
+ Descriptor d = Descriptor::AccessorConstant(
+ prototype_string(), function_prototype_accessor(), attribs);
+ map->AppendDescriptor(&d);
+ }
+ DCHECK_EQ(inobject_properties_count, field_index);
+ return map;
+}
+
+Handle<Map> Factory::CreateStrictFunctionMap(
+ FunctionMode function_mode, Handle<JSFunction> empty_function) {
+ bool has_prototype = IsFunctionModeWithPrototype(function_mode);
+ int header_size = has_prototype ? JSFunction::kSizeWithPrototype
+ : JSFunction::kSizeWithoutPrototype;
+ int inobject_properties_count = 0;
+ if (IsFunctionModeWithName(function_mode)) ++inobject_properties_count;
+ if (IsFunctionModeWithHomeObject(function_mode)) ++inobject_properties_count;
+ int descriptors_count = (IsFunctionModeWithPrototype(function_mode) ? 3 : 2) +
+ inobject_properties_count;
+
+ Handle<Map> map = NewMap(
+ JS_FUNCTION_TYPE, header_size + inobject_properties_count * kPointerSize,
+ TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
+ map->set_has_prototype_slot(has_prototype);
+ map->set_is_constructor(has_prototype);
+ map->set_is_callable(true);
+ Map::SetPrototype(map, empty_function);
+
+ //
+ // Setup descriptors array.
+ //
+ Map::EnsureDescriptorSlack(map, descriptors_count);
+
+ PropertyAttributes rw_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ PropertyAttributes ro_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+
+ int field_index = 0;
+ STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
+ { // Add length accessor.
+ Descriptor d = Descriptor::AccessorConstant(
+ length_string(), function_length_accessor(), roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+
+ STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
+ if (IsFunctionModeWithName(function_mode)) {
+ // Add name field.
+ Handle<Name> name = isolate()->factory()->name_string();
+ Descriptor d = Descriptor::DataField(name, field_index++, roc_attribs,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+
+ } else {
+ // Add name accessor.
+ Descriptor d = Descriptor::AccessorConstant(
+ name_string(), function_name_accessor(), roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+
+ STATIC_ASSERT(JSFunction::kMaybeHomeObjectDescriptorIndex == 2);
+ if (IsFunctionModeWithHomeObject(function_mode)) {
+ // Add home object field.
+ Handle<Name> name = isolate()->factory()->home_object_symbol();
+ Descriptor d = Descriptor::DataField(name, field_index++, DONT_ENUM,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ if (IsFunctionModeWithPrototype(function_mode)) {
+ // Add prototype accessor.
+ PropertyAttributes attribs =
+ IsFunctionModeWithWritablePrototype(function_mode) ? rw_attribs
+ : ro_attribs;
+ Descriptor d = Descriptor::AccessorConstant(
+ prototype_string(), function_prototype_accessor(), attribs);
+ map->AppendDescriptor(&d);
+ }
+ DCHECK_EQ(inobject_properties_count, field_index);
+ return map;
+}
+
+Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
+ Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSizeWithPrototype);
+ map->set_has_prototype_slot(true);
+ map->set_is_constructor(true);
+ map->set_is_prototype_map(true);
+ map->set_is_callable(true);
+ Map::SetPrototype(map, empty_function);
+
+ //
+ // Setup descriptors array.
+ //
+ Map::EnsureDescriptorSlack(map, 2);
+
+ PropertyAttributes ro_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+
+ STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
+ { // Add length accessor.
+ Descriptor d = Descriptor::AccessorConstant(
+ length_string(), function_length_accessor(), roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+
+ {
+ // Add prototype accessor.
+ Descriptor d = Descriptor::AccessorConstant(
+ prototype_string(), function_prototype_accessor(), ro_attribs);
+ map->AppendDescriptor(&d);
+ }
+ return map;
+}
+
+Handle<JSPromise> Factory::NewJSPromiseWithoutHook(PretenureFlag pretenure) {
+ Handle<JSPromise> promise = Handle<JSPromise>::cast(
+ NewJSObject(isolate()->promise_function(), pretenure));
+ promise->set_reactions_or_result(Smi::kZero);
+ promise->set_flags(0);
+ for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
+ promise->SetEmbedderField(i, Smi::kZero);
+ }
+ return promise;
+}
+
+Handle<JSPromise> Factory::NewJSPromise(PretenureFlag pretenure) {
+ Handle<JSPromise> promise = NewJSPromiseWithoutHook(pretenure);
+ isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value());
+ return promise;
+}
+
+Handle<CallHandlerInfo> Factory::NewCallHandlerInfo(bool has_no_side_effect) {
+ Handle<Map> map = has_no_side_effect
+ ? side_effect_free_call_handler_info_map()
+ : side_effect_call_handler_info_map();
+ Handle<CallHandlerInfo> info(CallHandlerInfo::cast(New(map, TENURED)),
+ isolate());
+ Object* undefined_value = isolate()->heap()->undefined_value();
+ info->set_callback(undefined_value);
+ info->set_js_callback(undefined_value);
+ info->set_data(undefined_value);
+ return info;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForWasm(Handle<String> name, Handle<Code> code,
+ Handle<Map> map) {
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.maybe_map_ = map;
+ args.maybe_code_ = code;
+ args.language_mode_ = LanguageMode::kSloppy;
+ args.prototype_mutability_ = MUTABLE;
+
+ return args;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForBuiltin(Handle<String> name,
+ Handle<Map> map, int builtin_id) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.maybe_map_ = map;
+ args.maybe_builtin_id_ = builtin_id;
+ args.language_mode_ = LanguageMode::kStrict;
+ args.prototype_mutability_ = MUTABLE;
+
+ args.SetShouldSetLanguageMode();
+
+ return args;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForFunctionWithoutCode(
+ Handle<String> name, Handle<Map> map, LanguageMode language_mode) {
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.maybe_map_ = map;
+ args.maybe_builtin_id_ = Builtins::kIllegal;
+ args.language_mode_ = language_mode;
+ args.prototype_mutability_ = MUTABLE;
+
+ args.SetShouldSetLanguageMode();
+
+ return args;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForBuiltinWithPrototype(
+ Handle<String> name, Handle<Object> prototype, InstanceType type,
+ int instance_size, int inobject_properties, int builtin_id,
+ MutableMode prototype_mutability) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.type_ = type;
+ args.instance_size_ = instance_size;
+ args.inobject_properties_ = inobject_properties;
+ args.maybe_prototype_ = prototype;
+ args.maybe_builtin_id_ = builtin_id;
+ args.language_mode_ = LanguageMode::kStrict;
+ args.prototype_mutability_ = prototype_mutability;
+
+ args.SetShouldCreateAndSetInitialMap();
+ args.SetShouldSetPrototype();
+ args.SetShouldSetLanguageMode();
+
+ return args;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForBuiltinWithoutPrototype(
+ Handle<String> name, int builtin_id, LanguageMode language_mode) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.maybe_builtin_id_ = builtin_id;
+ args.language_mode_ = language_mode;
+ args.prototype_mutability_ = MUTABLE;
+
+ args.SetShouldSetLanguageMode();
+
+ return args;
+}
+
+void NewFunctionArgs::SetShouldCreateAndSetInitialMap() {
+ // Needed to create the initial map.
+ maybe_prototype_.Assert();
+ DCHECK_NE(kUninitialized, instance_size_);
+ DCHECK_NE(kUninitialized, inobject_properties_);
+
+ should_create_and_set_initial_map_ = true;
+}
+
+void NewFunctionArgs::SetShouldSetPrototype() {
+ maybe_prototype_.Assert();
+ should_set_prototype_ = true;
+}
+
+void NewFunctionArgs::SetShouldSetLanguageMode() {
+ DCHECK(language_mode_ == LanguageMode::kStrict ||
+ language_mode_ == LanguageMode::kSloppy);
+ should_set_language_mode_ = true;
+}
+
+Handle<Map> NewFunctionArgs::GetMap(Isolate* isolate) const {
+ if (!maybe_map_.is_null()) {
+ return maybe_map_.ToHandleChecked();
+ } else if (maybe_prototype_.is_null()) {
+ return is_strict(language_mode_)
+ ? isolate->strict_function_without_prototype_map()
+ : isolate->sloppy_function_without_prototype_map();
+ } else {
+ DCHECK(!maybe_prototype_.is_null());
+ switch (prototype_mutability_) {
+ case MUTABLE:
+ return is_strict(language_mode_) ? isolate->strict_function_map()
+ : isolate->sloppy_function_map();
+ case IMMUTABLE:
+ return is_strict(language_mode_)
+ ? isolate->strict_function_with_readonly_prototype_map()
+ : isolate->sloppy_function_with_readonly_prototype_map();
+ }
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
new file mode 100644
index 0000000000..ead1d0d24c
--- /dev/null
+++ b/deps/v8/src/heap/factory.h
@@ -0,0 +1,1016 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_FACTORY_H_
+#define V8_HEAP_FACTORY_H_
+
+#include "src/builtins/builtins.h"
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/heap/heap.h"
+#include "src/messages.h"
+#include "src/objects/code.h"
+#include "src/objects/dictionary.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/js-array.h"
+#include "src/objects/js-regexp.h"
+#include "src/objects/string.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class AliasedArgumentsEntry;
+class BoilerplateDescription;
+class BreakPoint;
+class BreakPointInfo;
+class CallableTask;
+class CallbackTask;
+class ConstantElementsPair;
+class CoverageInfo;
+class DebugInfo;
+class EnumCache;
+class FreshlyAllocatedBigInt;
+class Isolate;
+class JSMap;
+class JSMapIterator;
+class JSModuleNamespace;
+class JSSet;
+class JSSetIterator;
+class JSWeakMap;
+class LoadHandler;
+class ModuleInfo;
+class NewFunctionArgs;
+class PreParsedScopeData;
+class PromiseResolveThenableJobTask;
+class RegExpMatchInfo;
+class ScriptContextTable;
+class StoreHandler;
+class TemplateObjectDescription;
+struct SourceRange;
+template <typename T>
+class ZoneVector;
+
+enum FunctionMode {
+ kWithNameBit = 1 << 0,
+ kWithHomeObjectBit = 1 << 1,
+ kWithWritablePrototypeBit = 1 << 2,
+ kWithReadonlyPrototypeBit = 1 << 3,
+ kWithPrototypeBits = kWithWritablePrototypeBit | kWithReadonlyPrototypeBit,
+
+ // Without prototype.
+ FUNCTION_WITHOUT_PROTOTYPE = 0,
+ METHOD_WITH_NAME = kWithNameBit,
+ METHOD_WITH_HOME_OBJECT = kWithHomeObjectBit,
+ METHOD_WITH_NAME_AND_HOME_OBJECT = kWithNameBit | kWithHomeObjectBit,
+
+ // With writable prototype.
+ FUNCTION_WITH_WRITEABLE_PROTOTYPE = kWithWritablePrototypeBit,
+ FUNCTION_WITH_NAME_AND_WRITEABLE_PROTOTYPE =
+ kWithWritablePrototypeBit | kWithNameBit,
+ FUNCTION_WITH_HOME_OBJECT_AND_WRITEABLE_PROTOTYPE =
+ kWithWritablePrototypeBit | kWithHomeObjectBit,
+ FUNCTION_WITH_NAME_AND_HOME_OBJECT_AND_WRITEABLE_PROTOTYPE =
+ kWithWritablePrototypeBit | kWithNameBit | kWithHomeObjectBit,
+
+ // With readonly prototype.
+ FUNCTION_WITH_READONLY_PROTOTYPE = kWithReadonlyPrototypeBit,
+ FUNCTION_WITH_NAME_AND_READONLY_PROTOTYPE =
+ kWithReadonlyPrototypeBit | kWithNameBit,
+};
+
+// Interface for handle based allocation.
+class V8_EXPORT_PRIVATE Factory final {
+ public:
+ Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
+ Handle<Object> to_number, const char* type_of,
+ byte kind);
+
+ // Allocates a fixed array-like object with given map and initialized with
+ // undefined values.
+ template <typename T = FixedArray>
+ Handle<T> NewFixedArrayWithMap(Heap::RootListIndex map_root_index, int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a fixed array initialized with undefined values.
+ Handle<FixedArray> NewFixedArray(int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a fixed array which may contain in-place weak references. The
+ // array is initialized with undefined values
+ Handle<WeakFixedArray> NewWeakFixedArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a property array initialized with undefined values.
+ Handle<PropertyArray> NewPropertyArray(int length,
+ PretenureFlag pretenure = NOT_TENURED);
+ // Tries allocating a fixed array initialized with undefined values.
+ // In case of an allocation failure (OOM) an empty handle is returned.
+ // The caller has to manually signal an
+ // v8::internal::Heap::FatalProcessOutOfMemory typically by calling
+ // NewFixedArray as a fallback.
+ V8_WARN_UNUSED_RESULT
+ MaybeHandle<FixedArray> TryNewFixedArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocate a new fixed array with non-existing entries (the hole).
+ Handle<FixedArray> NewFixedArrayWithHoles(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates an uninitialized fixed array. It must be filled by the caller.
+ Handle<FixedArray> NewUninitializedFixedArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a feedback vector whose slots are initialized with undefined
+ // values.
+ Handle<FeedbackVector> NewFeedbackVector(
+ Handle<SharedFunctionInfo> shared, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a fixed array for name-value pairs of boilerplate properties and
+ // calculates the number of properties we need to store in the backing store.
+ Handle<BoilerplateDescription> NewBoilerplateDescription(int boilerplate,
+ int all_properties,
+ int index_keys,
+ bool has_seen_proto);
+
+ // Allocate a new uninitialized fixed double array.
+ // The function returns a pre-allocated empty fixed array for length = 0,
+ // so the return type must be the general fixed array class.
+ Handle<FixedArrayBase> NewFixedDoubleArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocate a new fixed double array with hole values.
+ Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(
+ int size, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a FeedbackMedata object and zeroes the data section.
+ Handle<FeedbackMetadata> NewFeedbackMetadata(int slot_count);
+
+ Handle<FrameArray> NewFrameArray(int number_of_frames,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<OrderedHashSet> NewOrderedHashSet();
+ Handle<OrderedHashMap> NewOrderedHashMap();
+
+ Handle<SmallOrderedHashSet> NewSmallOrderedHashSet(
+ int capacity = SmallOrderedHashSet::kMinCapacity,
+ PretenureFlag pretenure = NOT_TENURED);
+ Handle<SmallOrderedHashMap> NewSmallOrderedHashMap(
+ int capacity = SmallOrderedHashMap::kMinCapacity,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Create a new PrototypeInfo struct.
+ Handle<PrototypeInfo> NewPrototypeInfo();
+
+ // Create a new EnumCache struct.
+ Handle<EnumCache> NewEnumCache(Handle<FixedArray> keys,
+ Handle<FixedArray> indices);
+
+ // Create a new Tuple2 struct.
+ Handle<Tuple2> NewTuple2(Handle<Object> value1, Handle<Object> value2,
+ PretenureFlag pretenure);
+
+ // Create a new Tuple3 struct.
+ Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2,
+ Handle<Object> value3, PretenureFlag pretenure);
+
+ // Create a new ContextExtension struct.
+ Handle<ContextExtension> NewContextExtension(Handle<ScopeInfo> scope_info,
+ Handle<Object> extension);
+
+ // Create a new ConstantElementsPair struct.
+ Handle<ConstantElementsPair> NewConstantElementsPair(
+ ElementsKind elements_kind, Handle<FixedArrayBase> constant_values);
+
+ // Create a new TemplateObjectDescription struct.
+ Handle<TemplateObjectDescription> NewTemplateObjectDescription(
+ Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings);
+
+ // Create a pre-tenured empty AccessorPair.
+ Handle<AccessorPair> NewAccessorPair();
+
+ // Finds the internalized copy for string in the string table.
+ // If not found, a new string is added to the table and returned.
+ Handle<String> InternalizeUtf8String(Vector<const char> str);
+ Handle<String> InternalizeUtf8String(const char* str) {
+ return InternalizeUtf8String(CStrVector(str));
+ }
+
+ Handle<String> InternalizeOneByteString(Vector<const uint8_t> str);
+ Handle<String> InternalizeOneByteString(Handle<SeqOneByteString>, int from,
+ int length);
+
+ Handle<String> InternalizeTwoByteString(Vector<const uc16> str);
+
+ template <class StringTableKey>
+ Handle<String> InternalizeStringWithKey(StringTableKey* key);
+
+ // Internalized strings are created in the old generation (data space).
+ inline Handle<String> InternalizeString(Handle<String> string);
+
+ inline Handle<Name> InternalizeName(Handle<Name> name);
+
+ // String creation functions. Most of the string creation functions take
+ // a Heap::PretenureFlag argument to optionally request that they be
+ // allocated in the old generation. The pretenure flag defaults to
+ // DONT_TENURE.
+ //
+ // Creates a new String object. There are two String encodings: one-byte and
+ // two-byte. One should choose between the three string factory functions
+ // based on the encoding of the string buffer that the string is
+ // initialized from.
+ // - ...FromOneByte initializes the string from a buffer that is Latin1
+ // encoded (it does not check that the buffer is Latin1 encoded) and
+ // the result will be Latin1 encoded.
+ // - ...FromUtf8 initializes the string from a buffer that is UTF-8
+ // encoded. If the characters are all ASCII characters, the result
+ // will be Latin1 encoded, otherwise it will converted to two-byte.
+ // - ...FromTwoByte initializes the string from a buffer that is two-byte
+ // encoded. If the characters are all Latin1 characters, the result
+ // will be converted to Latin1, otherwise it will be left as two-byte.
+ //
+ // One-byte strings are pretenured when used as keys in the SourceCodeCache.
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromOneByte(
+ Vector<const uint8_t> str, PretenureFlag pretenure = NOT_TENURED);
+
+ template <size_t N>
+ inline Handle<String> NewStringFromStaticChars(
+ const char (&str)[N], PretenureFlag pretenure = NOT_TENURED) {
+ DCHECK(N == StrLength(str) + 1);
+ return NewStringFromOneByte(STATIC_CHAR_VECTOR(str), pretenure)
+ .ToHandleChecked();
+ }
+
+ inline Handle<String> NewStringFromAsciiChecked(
+ const char* str, PretenureFlag pretenure = NOT_TENURED) {
+ return NewStringFromOneByte(OneByteVector(str), pretenure)
+ .ToHandleChecked();
+ }
+
+ // UTF8 strings are pretenured when used for regexp literal patterns and
+ // flags in the parser.
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromUtf8(
+ Vector<const char> str, PretenureFlag pretenure = NOT_TENURED);
+
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromUtf8SubString(
+ Handle<SeqOneByteString> str, int begin, int end,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromTwoByte(
+ Vector<const uc16> str, PretenureFlag pretenure = NOT_TENURED);
+
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromTwoByte(
+ const ZoneVector<uc16>* str, PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<JSStringIterator> NewJSStringIterator(Handle<String> string);
+
+ // Allocates an internalized string in old space based on the character
+ // stream.
+ Handle<String> NewInternalizedStringFromUtf8(Vector<const char> str,
+ int chars, uint32_t hash_field);
+
+ Handle<String> NewOneByteInternalizedString(Vector<const uint8_t> str,
+ uint32_t hash_field);
+
+ Handle<String> NewOneByteInternalizedSubString(
+ Handle<SeqOneByteString> string, int offset, int length,
+ uint32_t hash_field);
+
+ Handle<String> NewTwoByteInternalizedString(Vector<const uc16> str,
+ uint32_t hash_field);
+
+ Handle<String> NewInternalizedStringImpl(Handle<String> string, int chars,
+ uint32_t hash_field);
+
+ // Compute the matching internalized string map for a string if possible.
+ // Empty handle is returned if string is in new space or not flattened.
+ V8_WARN_UNUSED_RESULT MaybeHandle<Map> InternalizedStringMapForString(
+ Handle<String> string);
+
+ // Creates an internalized copy of an external string. |string| must be
+ // of type StringClass.
+ template <class StringClass>
+ Handle<StringClass> InternalizeExternalString(Handle<String> string);
+
+ // Allocates and partially initializes an one-byte or two-byte String. The
+ // characters of the string are uninitialized. Currently used in regexp code
+ // only, where they are pretenured.
+ V8_WARN_UNUSED_RESULT MaybeHandle<SeqOneByteString> NewRawOneByteString(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+ V8_WARN_UNUSED_RESULT MaybeHandle<SeqTwoByteString> NewRawTwoByteString(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
+ // Creates a single character string where the character has given code.
+ // A cache is used for Latin1 codes.
+ Handle<String> LookupSingleCharacterStringFromCode(uint32_t code);
+
+ // Create a new cons string object which consists of a pair of strings.
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewConsString(Handle<String> left,
+ Handle<String> right);
+
+ V8_WARN_UNUSED_RESULT Handle<String> NewConsString(Handle<String> left,
+ Handle<String> right,
+ int length, bool one_byte);
+
+ // Create or lookup a single characters tring made up of a utf16 surrogate
+ // pair.
+ Handle<String> NewSurrogatePairString(uint16_t lead, uint16_t trail);
+
+ // Create a new string object which holds a proper substring of a string.
+ Handle<String> NewProperSubString(Handle<String> str, int begin, int end);
+
+ // Create a new string object which holds a substring of a string.
+ inline Handle<String> NewSubString(Handle<String> str, int begin, int end);
+
+ // Creates a new external String object. There are two String encodings
+ // in the system: one-byte and two-byte. Unlike other String types, it does
+ // not make sense to have a UTF-8 factory function for external strings,
+ // because we cannot change the underlying buffer. Note that these strings
+ // are backed by a string resource that resides outside the V8 heap.
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewExternalStringFromOneByte(
+ const ExternalOneByteString::Resource* resource);
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewExternalStringFromTwoByte(
+ const ExternalTwoByteString::Resource* resource);
+ // Create a new external string object for one-byte encoded native script.
+ // It does not cache the resource data pointer.
+ Handle<ExternalOneByteString> NewNativeSourceString(
+ const ExternalOneByteString::Resource* resource);
+
+ // Create a symbol in old space.
+ Handle<Symbol> NewSymbol();
+ Handle<Symbol> NewPrivateSymbol();
+ Handle<Symbol> NewPrivateFieldSymbol();
+
+ // Create a global (but otherwise uninitialized) context.
+ Handle<Context> NewNativeContext();
+
+ // Create a script context.
+ Handle<Context> NewScriptContext(Handle<JSFunction> function,
+ Handle<ScopeInfo> scope_info);
+
+ // Create an empty script context table.
+ Handle<ScriptContextTable> NewScriptContextTable();
+
+ // Create a module context.
+ Handle<Context> NewModuleContext(Handle<Module> module,
+ Handle<JSFunction> function,
+ Handle<ScopeInfo> scope_info);
+
+ // Create a function or eval context.
+ Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function,
+ ScopeType scope_type);
+
+ // Create a catch context.
+ Handle<Context> NewCatchContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
+ Handle<String> name,
+ Handle<Object> thrown_object);
+
+ // Create a 'with' context.
+ Handle<Context> NewWithContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
+ Handle<JSReceiver> extension);
+
+ Handle<Context> NewDebugEvaluateContext(Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
+ Handle<JSReceiver> extension,
+ Handle<Context> wrapped,
+ Handle<StringSet> whitelist);
+
+ // Create a block context.
+ Handle<Context> NewBlockContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<ScopeInfo> scope_info);
+
+ Handle<Struct> NewStruct(InstanceType type,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
+ int aliased_context_slot);
+
+ Handle<AccessorInfo> NewAccessorInfo();
+
+ Handle<Script> NewScript(Handle<String> source);
+
+ Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
+ Handle<BreakPoint> NewBreakPoint(int id, Handle<String> condition);
+ Handle<StackFrameInfo> NewStackFrameInfo();
+ Handle<SourcePositionTableWithFrameCache>
+ NewSourcePositionTableWithFrameCache(
+ Handle<ByteArray> source_position_table,
+ Handle<SimpleNumberDictionary> stack_frame_cache);
+
+ // Allocate various microtasks.
+ Handle<CallableTask> NewCallableTask(Handle<JSReceiver> callable,
+ Handle<Context> context);
+ Handle<CallbackTask> NewCallbackTask(Handle<Foreign> callback,
+ Handle<Foreign> data);
+ Handle<PromiseResolveThenableJobTask> NewPromiseResolveThenableJobTask(
+ Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
+ Handle<JSReceiver> thenable, Handle<Context> context);
+
+ // Foreign objects are pretenured when allocated by the bootstrapper.
+ Handle<Foreign> NewForeign(Address addr,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<ByteArray> NewByteArray(int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<BytecodeArray> NewBytecodeArray(int length, const byte* raw_bytecodes,
+ int frame_size, int parameter_count,
+ Handle<FixedArray> constant_pool);
+
+ Handle<FixedTypedArrayBase> NewFixedTypedArrayWithExternalPointer(
+ int length, ExternalArrayType array_type, void* external_pointer,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<FixedTypedArrayBase> NewFixedTypedArray(
+ size_t length, size_t byte_length, ExternalArrayType array_type,
+ bool initialize, PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<Cell> NewCell(Handle<Object> value);
+
+ Handle<PropertyCell> NewPropertyCell(Handle<Name> name);
+
+ Handle<WeakCell> NewWeakCell(Handle<HeapObject> value);
+
+ Handle<FeedbackCell> NewNoClosuresCell(Handle<HeapObject> value);
+ Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
+ Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
+
+ Handle<TransitionArray> NewTransitionArray(int capacity);
+
+ // Allocate a tenured AllocationSite. Its payload is null.
+ Handle<AllocationSite> NewAllocationSite();
+
+ // Allocates and initializes a new Map.
+ Handle<Map> NewMap(InstanceType type, int instance_size,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ int inobject_properties = 0);
+ // Initializes the fields of a newly created Map. Exposed for tests and
+ // heap setup; other code should just call NewMap which takes care of it.
+ Map* InitializeMap(Map* map, InstanceType type, int instance_size,
+ ElementsKind elements_kind, int inobject_properties);
+
+ // Allocate a block of memory in the given space (filled with a filler).
+ // Used as a fall-back for generated code when the space is full.
+ Handle<HeapObject> NewFillerObject(int size, bool double_align,
+ AllocationSpace space);
+
+ Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
+
+ // Returns a deep copy of the JavaScript object.
+ // Properties and elements are copied too.
+ Handle<JSObject> CopyJSObject(Handle<JSObject> object);
+ // Same as above, but also takes an AllocationSite to be appended in an
+ // AllocationMemento.
+ Handle<JSObject> CopyJSObjectWithAllocationSite(Handle<JSObject> object,
+ Handle<AllocationSite> site);
+
+ Handle<FixedArray> CopyFixedArrayWithMap(Handle<FixedArray> array,
+ Handle<Map> map);
+
+ Handle<FixedArray> CopyFixedArrayAndGrow(
+ Handle<FixedArray> array, int grow_by,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<PropertyArray> CopyPropertyArrayAndGrow(
+ Handle<PropertyArray> array, int grow_by,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<FixedArray> CopyFixedArrayUpTo(Handle<FixedArray> array, int new_len,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
+
+ // This method expects a COW array in new space, and creates a copy
+ // of it in old space.
+ Handle<FixedArray> CopyAndTenureFixedCOWArray(Handle<FixedArray> array);
+
+ Handle<FixedDoubleArray> CopyFixedDoubleArray(Handle<FixedDoubleArray> array);
+
+ Handle<FeedbackVector> CopyFeedbackVector(Handle<FeedbackVector> array);
+
+ // Numbers (e.g. literals) are pretenured by the parser.
+ // The return value may be a smi or a heap number.
+ Handle<Object> NewNumber(double value, PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<Object> NewNumberFromInt(int32_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+ Handle<Object> NewNumberFromUint(uint32_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<Object> NewNumberFromSize(
+ size_t value, PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<Object> NewNumberFromInt64(
+ int64_t value, PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<HeapNumber> NewHeapNumber(
+ double value, MutableMode mode = IMMUTABLE,
+ PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<HeapNumber> NewHeapNumberFromBits(
+ uint64_t bits, MutableMode mode = IMMUTABLE,
+ PretenureFlag pretenure = NOT_TENURED);
+ // Creates mutable heap number object with value field set to hole NaN.
+ inline Handle<HeapNumber> NewMutableHeapNumber(
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Creates heap number object with not yet set value field.
+ Handle<HeapNumber> NewHeapNumber(MutableMode mode,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a new BigInt with {length} digits. Only to be used by
+ // MutableBigInt::New*.
+ Handle<FreshlyAllocatedBigInt> NewBigInt(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
+
+ // Allocates and initializes a new JavaScript object based on a
+ // constructor.
+ // JS objects are pretenured when allocated by the bootstrapper and
+ // runtime.
+ Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
+ PretenureFlag pretenure = NOT_TENURED);
+ // JSObject without a prototype.
+ Handle<JSObject> NewJSObjectWithNullProto(
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Global objects are pretenured and initialized based on a constructor.
+ Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
+
+ // Allocates and initializes a new JavaScript object based on a map.
+ // Passing an allocation site means that a memento will be created that
+ // points to the site.
+ // JS objects are pretenured when allocated by the bootstrapper and
+ // runtime.
+ Handle<JSObject> NewJSObjectFromMap(
+ Handle<Map> map, PretenureFlag pretenure = NOT_TENURED,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
+ Handle<JSObject> NewSlowJSObjectFromMap(
+ Handle<Map> map,
+ int number_of_slow_properties = NameDictionary::kInitialCapacity,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // JS arrays are pretenured when allocated by the parser.
+
+ // Create a JSArray with a specified length and elements initialized
+ // according to the specified mode.
+ Handle<JSArray> NewJSArray(
+ ElementsKind elements_kind, int length, int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<JSArray> NewJSArray(
+ int capacity, ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ PretenureFlag pretenure = NOT_TENURED) {
+ if (capacity != 0) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ }
+ return NewJSArray(elements_kind, 0, capacity,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, pretenure);
+ }
+
+ // Create a JSArray with the given elements.
+ Handle<JSArray> NewJSArrayWithElements(Handle<FixedArrayBase> elements,
+ ElementsKind elements_kind, int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ inline Handle<JSArray> NewJSArrayWithElements(
+ Handle<FixedArrayBase> elements,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ void NewJSArrayStorage(
+ Handle<JSArray> array, int length, int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+
+ Handle<JSWeakMap> NewJSWeakMap();
+
+ Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
+
+ Handle<JSModuleNamespace> NewJSModuleNamespace();
+
+ Handle<Module> NewModule(Handle<SharedFunctionInfo> code);
+
+ Handle<JSArrayBuffer> NewJSArrayBuffer(
+ SharedFlag shared = SharedFlag::kNotShared,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ static void TypeAndSizeForElementsKind(ElementsKind kind,
+ ExternalArrayType* array_type,
+ size_t* element_size);
+
+ Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<JSTypedArray> NewJSTypedArray(ElementsKind elements_kind,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Creates a new JSTypedArray with the specified buffer.
+ Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
+ Handle<JSArrayBuffer> buffer,
+ size_t byte_offset, size_t length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Creates a new on-heap JSTypedArray.
+ Handle<JSTypedArray> NewJSTypedArray(ElementsKind elements_kind,
+ size_t number_of_elements,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
+ size_t byte_offset, size_t byte_length);
+
+ Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
+ Handle<JSAsyncFromSyncIterator> NewJSAsyncFromSyncIterator(
+ Handle<JSReceiver> sync_iterator, Handle<Object> next);
+
+ Handle<JSMap> NewJSMap();
+ Handle<JSSet> NewJSSet();
+
+ Handle<JSMapIterator> NewJSMapIterator(Handle<Map> map,
+ Handle<OrderedHashMap> table,
+ int index);
+ Handle<JSSetIterator> NewJSSetIterator(Handle<Map> map,
+ Handle<OrderedHashSet> table,
+ int index);
+
+ // Allocates a bound function.
+ MaybeHandle<JSBoundFunction> NewJSBoundFunction(
+ Handle<JSReceiver> target_function, Handle<Object> bound_this,
+ Vector<Handle<Object>> bound_args);
+
+ // Allocates a Harmony proxy.
+ Handle<JSProxy> NewJSProxy(Handle<JSReceiver> target,
+ Handle<JSReceiver> handler);
+
+ // Reinitialize an JSGlobalProxy based on a constructor. The object
+ // must have the same size as objects allocated using the
+ // constructor. The object is reinitialized and behaves as an
+ // object that has been freshly allocated using the constructor.
+ void ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> global,
+ Handle<JSFunction> constructor);
+
+ Handle<JSGlobalProxy> NewUninitializedJSGlobalProxy(int size);
+
+ // Creates a new JSFunction according to the given args. This is the function
+ // you'll probably want to use when creating a JSFunction from the runtime.
+ Handle<JSFunction> NewFunction(const NewFunctionArgs& args);
+
+ // For testing only. Creates a sloppy function without code.
+ Handle<JSFunction> NewFunctionForTest(Handle<String> name);
+
+ // Function creation from SharedFunctionInfo.
+
+ Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
+ Handle<Object> context_or_undefined, Handle<FeedbackCell> feedback_cell,
+ PretenureFlag pretenure = TENURED);
+
+ Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> function_info, Handle<Context> context,
+ Handle<FeedbackCell> feedback_cell, PretenureFlag pretenure = TENURED);
+
+ Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
+ Handle<Object> context_or_undefined, PretenureFlag pretenure = TENURED);
+
+ Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> function_info, Handle<Context> context,
+ PretenureFlag pretenure = TENURED);
+
+ // The choke-point for JSFunction creation. Handles allocation and
+ // initialization. All other utility methods call into this.
+ Handle<JSFunction> NewFunction(Handle<Map> map,
+ Handle<SharedFunctionInfo> info,
+ Handle<Object> context_or_undefined,
+ PretenureFlag pretenure = TENURED);
+
+ // Create a serialized scope info.
+ Handle<ScopeInfo> NewScopeInfo(int length);
+
+ Handle<ModuleInfo> NewModuleInfo();
+
+ Handle<PreParsedScopeData> NewPreParsedScopeData();
+
+ // Create an External object for V8's external API.
+ Handle<JSObject> NewExternal(void* value);
+
+ // Creates a new CodeDataContainer for a Code object.
+ Handle<CodeDataContainer> NewCodeDataContainer(int flags);
+
+ // Allocates a new code object (fully initialized). All header fields of the
+ // returned object are immutable and the code object is write protected.
+ // The reference to the Code object is stored in self_reference.
+ // This allows generated code to reference its own Code object
+ // by containing this handle.
+ Handle<Code> NewCode(const CodeDesc& desc, Code::Kind kind,
+ Handle<Object> self_reference,
+ int32_t builtin_index = Builtins::kNoBuiltinId,
+ MaybeHandle<ByteArray> maybe_source_position_table =
+ MaybeHandle<ByteArray>(),
+ MaybeHandle<DeoptimizationData> maybe_deopt_data =
+ MaybeHandle<DeoptimizationData>(),
+ Movability movability = kMovable, uint32_t stub_key = 0,
+ bool is_turbofanned = false, int stack_slots = 0,
+ int safepoint_table_offset = 0,
+ int handler_table_offset = 0);
+
+ // Allocates a new, empty code object for use by builtin deserialization. The
+ // given {size} argument specifies the size of the entire code object.
+ // Can only be used when code space is unprotected and requires manual
+ // initialization by the caller.
+ Handle<Code> NewCodeForDeserialization(uint32_t size);
+
+#ifdef V8_EMBEDDED_BUILTINS
+ // Allocates a new code object and initializes it as the trampoline to the
+ // given off-heap entry point.
+ Handle<Code> NewOffHeapTrampolineFor(Handle<Code> code,
+ Address off_heap_entry);
+#endif
+
+ Handle<Code> CopyCode(Handle<Code> code);
+
+ Handle<BytecodeArray> CopyBytecodeArray(Handle<BytecodeArray>);
+
+ // Interface for creating error objects.
+ Handle<Object> NewError(Handle<JSFunction> constructor,
+ Handle<String> message);
+
+ Handle<Object> NewInvalidStringLengthError();
+
+ inline Handle<Object> NewURIError();
+
+ Handle<Object> NewError(Handle<JSFunction> constructor,
+ MessageTemplate::Template template_index,
+ Handle<Object> arg0 = Handle<Object>(),
+ Handle<Object> arg1 = Handle<Object>(),
+ Handle<Object> arg2 = Handle<Object>());
+
+#define DECLARE_ERROR(NAME) \
+ Handle<Object> New##NAME(MessageTemplate::Template template_index, \
+ Handle<Object> arg0 = Handle<Object>(), \
+ Handle<Object> arg1 = Handle<Object>(), \
+ Handle<Object> arg2 = Handle<Object>());
+ DECLARE_ERROR(Error)
+ DECLARE_ERROR(EvalError)
+ DECLARE_ERROR(RangeError)
+ DECLARE_ERROR(ReferenceError)
+ DECLARE_ERROR(SyntaxError)
+ DECLARE_ERROR(TypeError)
+ DECLARE_ERROR(WasmCompileError)
+ DECLARE_ERROR(WasmLinkError)
+ DECLARE_ERROR(WasmRuntimeError)
+#undef DECLARE_ERROR
+
+ Handle<String> NumberToString(Handle<Object> number,
+ bool check_number_string_cache = true);
+
+ inline Handle<String> Uint32ToString(uint32_t value);
+
+#define ROOT_ACCESSOR(type, name, camel_name) inline Handle<type> name();
+ ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Handle<Map> name##_map();
+ STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
+ inline Handle<Map> name##_map();
+ DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
+#undef DATA_HANDLER_MAP_ACCESSOR
+
+#define STRING_ACCESSOR(name, str) inline Handle<String> name();
+ INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name) inline Handle<Symbol> name();
+ PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, description) inline Handle<Symbol> name();
+ PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
+ inline Handle<AccessorInfo> accessor_name##_accessor();
+ ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
+#undef ACCESSOR_INFO_ACCESSOR
+
+ // Allocates a new SharedFunctionInfo object.
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForApiFunction(
+ MaybeHandle<String> maybe_name,
+ Handle<FunctionTemplateInfo> function_template_info, FunctionKind kind);
+
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForBuiltin(
+ MaybeHandle<String> name, int builtin_index,
+ FunctionKind kind = kNormalFunction);
+
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
+ FunctionLiteral* literal, Handle<Script> script, bool is_toplevel);
+
+ static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
+ return (function_mode & kWithPrototypeBits) != 0;
+ }
+
+ static bool IsFunctionModeWithWritablePrototype(FunctionMode function_mode) {
+ return (function_mode & kWithWritablePrototypeBit) != 0;
+ }
+
+ static bool IsFunctionModeWithName(FunctionMode function_mode) {
+ return (function_mode & kWithNameBit) != 0;
+ }
+
+ static bool IsFunctionModeWithHomeObject(FunctionMode function_mode) {
+ return (function_mode & kWithHomeObjectBit) != 0;
+ }
+
+ Handle<Map> CreateSloppyFunctionMap(
+ FunctionMode function_mode, MaybeHandle<JSFunction> maybe_empty_function);
+
+ Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
+ Handle<JSFunction> empty_function);
+
+ Handle<Map> CreateClassFunctionMap(Handle<JSFunction> empty_function);
+
+ // Allocates a new JSMessageObject object.
+ Handle<JSMessageObject> NewJSMessageObject(MessageTemplate::Template message,
+ Handle<Object> argument,
+ int start_position,
+ int end_position,
+ Handle<Object> script,
+ Handle<Object> stack_frames);
+
+ Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
+
+ Handle<CoverageInfo> NewCoverageInfo(const ZoneVector<SourceRange>& slots);
+
+ // Return a map for given number of properties using the map cache in the
+ // native context.
+ Handle<Map> ObjectLiteralMapFromCache(Handle<Context> native_context,
+ int number_of_properties);
+
+ Handle<LoadHandler> NewLoadHandler(int data_count);
+ Handle<StoreHandler> NewStoreHandler(int data_count);
+
+ Handle<RegExpMatchInfo> NewRegExpMatchInfo();
+
+ // Creates a new FixedArray that holds the data associated with the
+ // atom regexp and stores it in the regexp.
+ void SetRegExpAtomData(Handle<JSRegExp> regexp, JSRegExp::Type type,
+ Handle<String> source, JSRegExp::Flags flags,
+ Handle<Object> match_pattern);
+
+ // Creates a new FixedArray that holds the data associated with the
+ // irregexp regexp and stores it in the regexp.
+ void SetRegExpIrregexpData(Handle<JSRegExp> regexp, JSRegExp::Type type,
+ Handle<String> source, JSRegExp::Flags flags,
+ int capture_count);
+
+ // Returns the value for a known global constant (a property of the global
+ // object which is neither configurable nor writable) like 'undefined'.
+ // Returns a null handle when the given name is unknown.
+ Handle<Object> GlobalConstantFor(Handle<Name> name);
+
+ // Converts the given boolean condition to JavaScript boolean value.
+ Handle<Object> ToBoolean(bool value);
+
+ // Converts the given ToPrimitive hint to it's string representation.
+ Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint);
+
+ Handle<JSPromise> NewJSPromise(PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<CallHandlerInfo> NewCallHandlerInfo(bool has_no_side_effect = false);
+
+ HeapObject* NewForTest(Handle<Map> map, PretenureFlag pretenure) {
+ return New(map, pretenure);
+ }
+
+ private:
+ Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
+
+ HeapObject* AllocateRawWithImmortalMap(
+ int size, PretenureFlag pretenure, Map* map,
+ AllocationAlignment alignment = kWordAligned);
+ HeapObject* AllocateRawWithAllocationSite(
+ Handle<Map> map, PretenureFlag pretenure,
+ Handle<AllocationSite> allocation_site);
+
+ // Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
+ HeapObject* AllocateRawArray(int size, PretenureFlag pretenure);
+ HeapObject* AllocateRawFixedArray(int length, PretenureFlag pretenure);
+ Handle<FixedArray> NewFixedArrayWithFiller(Heap::RootListIndex map_root_index,
+ int length, Object* filler,
+ PretenureFlag pretenure);
+
+ // Creates a heap object based on the map. The fields of the heap object are
+ // not initialized, it's the responsibility of the caller to do that.
+ HeapObject* New(Handle<Map> map, PretenureFlag pretenure);
+
+ template <typename T>
+ Handle<T> CopyArrayWithMap(Handle<T> src, Handle<Map> map);
+ template <typename T>
+ Handle<T> CopyArrayAndGrow(Handle<T> src, int grow_by,
+ PretenureFlag pretenure);
+
+ template <bool is_one_byte, typename T>
+ Handle<String> AllocateInternalizedStringImpl(T t, int chars,
+ uint32_t hash_field);
+
+ Handle<SeqOneByteString> AllocateRawOneByteInternalizedString(
+ int length, uint32_t hash_field);
+
+ Handle<String> AllocateTwoByteInternalizedString(Vector<const uc16> str,
+ uint32_t hash_field);
+
+ MaybeHandle<String> NewStringFromTwoByte(const uc16* string, int length,
+ PretenureFlag pretenure);
+
+ // Attempt to find the number in a small cache. If we finds it, return
+ // the string representation of the number. Otherwise return undefined.
+ Handle<Object> GetNumberStringCache(Handle<Object> number);
+
+ // Update the cache with a new number-string pair.
+ void SetNumberStringCache(Handle<Object> number, Handle<String> string);
+
+ // Create a JSArray with no elements and no length.
+ Handle<JSArray> NewJSArray(ElementsKind elements_kind,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<JSPromise> NewJSPromiseWithoutHook(
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<SharedFunctionInfo> NewSharedFunctionInfo(
+ MaybeHandle<String> name, MaybeHandle<HeapObject> maybe_function_data,
+ int maybe_builtin_index, FunctionKind kind = kNormalFunction);
+
+ void InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site);
+
+ // Initializes a JSObject based on its map.
+ void InitializeJSObjectFromMap(Handle<JSObject> obj,
+ Handle<Object> properties, Handle<Map> map);
+ // Initializes JSObject body starting at given offset.
+ void InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
+ int start_offset);
+};
+
+// Utility class to simplify argument handling around JSFunction creation.
+class NewFunctionArgs final {
+ public:
+ static NewFunctionArgs ForWasm(Handle<String> name, Handle<Code> code,
+ Handle<Map> map);
+ static NewFunctionArgs ForBuiltin(Handle<String> name, Handle<Map> map,
+ int builtin_id);
+ static NewFunctionArgs ForFunctionWithoutCode(Handle<String> name,
+ Handle<Map> map,
+ LanguageMode language_mode);
+ static NewFunctionArgs ForBuiltinWithPrototype(
+ Handle<String> name, Handle<Object> prototype, InstanceType type,
+ int instance_size, int inobject_properties, int builtin_id,
+ MutableMode prototype_mutability);
+ static NewFunctionArgs ForBuiltinWithoutPrototype(Handle<String> name,
+ int builtin_id,
+ LanguageMode language_mode);
+
+ Handle<Map> GetMap(Isolate* isolate) const;
+
+ private:
+ NewFunctionArgs() {} // Use the static factory constructors.
+
+ void SetShouldCreateAndSetInitialMap();
+ void SetShouldSetPrototype();
+ void SetShouldSetLanguageMode();
+
+ // Sentinel value.
+ static const int kUninitialized = -1;
+
+ Handle<String> name_;
+ MaybeHandle<Map> maybe_map_;
+ MaybeHandle<Code> maybe_code_;
+
+ bool should_create_and_set_initial_map_ = false;
+ InstanceType type_;
+ int instance_size_ = kUninitialized;
+ int inobject_properties_ = kUninitialized;
+
+ bool should_set_prototype_ = false;
+ MaybeHandle<Object> maybe_prototype_;
+
+ bool should_set_language_mode_ = false;
+ LanguageMode language_mode_;
+
+ int maybe_builtin_id_ = kUninitialized;
+
+ MutableMode prototype_mutability_;
+
+ friend class Factory;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_FACTORY_H_
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 30abe44ca6..9900b343fd 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -152,7 +152,11 @@ GCTracer::GCTracer(Heap* heap)
new_space_allocation_in_bytes_since_gc_(0),
old_generation_allocation_in_bytes_since_gc_(0),
combined_mark_compact_speed_cache_(0.0),
- start_counter_(0) {
+ start_counter_(0),
+ average_mutator_duration_(0),
+ average_mark_compact_duration_(0),
+ current_mark_compact_mutator_utilization_(1.0),
+ previous_mark_compact_end_time_(0) {
// All accesses to incremental_marking_scope assume that incremental marking
// scopes come first.
STATIC_ASSERT(0 == Scope::FIRST_INCREMENTAL_SCOPE);
@@ -188,6 +192,10 @@ void GCTracer::ResetForTesting() {
recorded_context_disposal_times_.Reset();
recorded_survival_ratios_.Reset();
start_counter_ = 0;
+ average_mutator_duration_ = 0;
+ average_mark_compact_duration_ = 0;
+ current_mark_compact_mutator_utilization_ = 1.0;
+ previous_mark_compact_end_time_ = 0;
base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
background_counter_[i].total_duration_ms = 0;
@@ -322,6 +330,9 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
current_.scopes[i] = incremental_marking_scopes_[i].duration;
}
+
+ RecordMutatorUtilization(
+ current_.end_time, duration + current_.incremental_marking_duration);
RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
current_.incremental_marking_duration);
recorded_incremental_mark_compacts_.Push(
@@ -333,6 +344,8 @@ void GCTracer::Stop(GarbageCollector collector) {
case Event::MARK_COMPACTOR:
DCHECK_EQ(0u, current_.incremental_marking_bytes);
DCHECK_EQ(0, current_.incremental_marking_duration);
+ RecordMutatorUtilization(
+ current_.end_time, duration + current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
ResetIncrementalMarkingCounters();
@@ -469,7 +482,7 @@ void GCTracer::Print() const {
"[%d:%p] "
"%8.0f ms: "
"%s %.1f (%.1f) -> %.1f (%.1f) MB, "
- "%.1f / %.1f ms %s %s %s\n",
+ "%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n",
base::OS::GetCurrentProcessId(),
reinterpret_cast<void*>(heap_->isolate()),
heap_->isolate()->time_millis_since_init(), current_.TypeName(false),
@@ -478,6 +491,8 @@ void GCTracer::Print() const {
static_cast<double>(current_.end_object_size) / MB,
static_cast<double>(current_.end_memory_size) / MB, duration,
TotalExternalTime(), incremental_buffer,
+ AverageMarkCompactMutatorUtilization(),
+ CurrentMarkCompactMutatorUtilization(),
Heap::GarbageCollectionReasonToString(current_.gc_reason),
current_.collector_reason != nullptr ? current_.collector_reason : "");
}
@@ -662,6 +677,7 @@ void GCTracer::PrintNVP() const {
"clear.weak_cells=%.1f "
"clear.weak_collections=%.1f "
"clear.weak_lists=%.1f "
+ "clear.weak_references=%.1f "
"epilogue=%.1f "
"evacuate=%.1f "
"evacuate.candidates=%.1f "
@@ -756,6 +772,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_CLEAR_WEAK_CELLS],
current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
+ current_.scopes[Scope::MC_CLEAR_WEAK_REFERENCES],
current_.scopes[Scope::MC_EPILOGUE],
current_.scopes[Scope::MC_EVACUATE],
current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
@@ -869,6 +886,43 @@ void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) {
}
}
+void GCTracer::RecordMutatorUtilization(double mark_compact_end_time,
+ double mark_compact_duration) {
+ if (previous_mark_compact_end_time_ == 0) {
+ // The first event only contributes to previous_mark_compact_end_time_,
+ // because we cannot compute the mutator duration.
+ previous_mark_compact_end_time_ = mark_compact_end_time;
+ } else {
+ double total_duration =
+ mark_compact_end_time - previous_mark_compact_end_time_;
+ double mutator_duration = total_duration - mark_compact_duration;
+ if (average_mark_compact_duration_ == 0 && average_mutator_duration_ == 0) {
+ // This is the first event with mutator and mark-compact durations.
+ average_mark_compact_duration_ = mark_compact_duration;
+ average_mutator_duration_ = mutator_duration;
+ } else {
+ average_mark_compact_duration_ =
+ (average_mark_compact_duration_ + mark_compact_duration) / 2;
+ average_mutator_duration_ =
+ (average_mutator_duration_ + mutator_duration) / 2;
+ }
+ current_mark_compact_mutator_utilization_ =
+ total_duration ? mutator_duration / total_duration : 0;
+ previous_mark_compact_end_time_ = mark_compact_end_time;
+ }
+}
+
+double GCTracer::AverageMarkCompactMutatorUtilization() const {
+ double average_total_duration =
+ average_mark_compact_duration_ + average_mutator_duration_;
+ if (average_total_duration == 0) return 1.0;
+ return average_mutator_duration_ / average_total_duration;
+}
+
+double GCTracer::CurrentMarkCompactMutatorUtilization() const {
+ return current_mark_compact_mutator_utilization_;
+}
+
double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
const int kConservativeSpeedInBytesPerMillisecond = 128 * KB;
if (recorded_incremental_marking_speed_ != 0) {
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index fb0f001e3d..f35fa50d45 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -300,6 +300,11 @@ class V8_EXPORT_PRIVATE GCTracer {
void NotifyIncrementalMarkingStart();
+ // Returns average mutator utilization with respect to mark-compact
+ // garbage collections. This ignores scavenger.
+ double AverageMarkCompactMutatorUtilization() const;
+ double CurrentMarkCompactMutatorUtilization() const;
+
V8_INLINE void AddScopeSample(Scope::ScopeId scope, double duration) {
DCHECK(scope < Scope::NUMBER_OF_SCOPES);
if (scope >= Scope::FIRST_INCREMENTAL_SCOPE &&
@@ -328,6 +333,7 @@ class V8_EXPORT_PRIVATE GCTracer {
FRIEND_TEST(GCTracerTest, IncrementalMarkingDetails);
FRIEND_TEST(GCTracerTest, IncrementalScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
+ FRIEND_TEST(GCTracerTest, MutatorUtilization);
struct BackgroundCounter {
double total_duration_ms;
@@ -344,6 +350,8 @@ class V8_EXPORT_PRIVATE GCTracer {
void ResetForTesting();
void ResetIncrementalMarkingCounters();
void RecordIncrementalMarkingSpeed(size_t bytes, double duration);
+ void RecordMutatorUtilization(double mark_compactor_end_time,
+ double mark_compactor_duration);
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@@ -415,6 +423,12 @@ class V8_EXPORT_PRIVATE GCTracer {
// Counts how many tracers were started without stopping.
int start_counter_;
+ // Used for computing average mutator utilization.
+ double average_mutator_duration_;
+ double average_mark_compact_duration_;
+ double current_mark_compact_mutator_utilization_;
+ double previous_mark_compact_end_time_;
+
base::RingBuffer<BytesAndDuration> recorded_minor_gcs_total_;
base::RingBuffer<BytesAndDuration> recorded_minor_gcs_survived_;
base::RingBuffer<BytesAndDuration> recorded_compactions_;
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 41af95fa44..230452d4d0 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -23,6 +23,8 @@
#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/literal-objects.h"
#include "src/objects/scope-info.h"
#include "src/objects/script-inl.h"
#include "src/profiler/heap-profiler.h"
@@ -126,125 +128,6 @@ size_t Heap::NewSpaceAllocationCounter() {
return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
}
-template <>
-bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
- // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
- return chars == str.length();
-}
-
-
-template <>
-bool inline Heap::IsOneByte(String* str, int chars) {
- return str->IsOneByteRepresentation();
-}
-
-
-AllocationResult Heap::AllocateInternalizedStringFromUtf8(
- Vector<const char> str, int chars, uint32_t hash_field) {
- if (IsOneByte(str, chars)) {
- return AllocateOneByteInternalizedString(Vector<const uint8_t>::cast(str),
- hash_field);
- }
- return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
-}
-
-
-template <typename T>
-AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
- uint32_t hash_field) {
- if (IsOneByte(t, chars)) {
- return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
- }
- return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
-}
-
-
-AllocationResult Heap::AllocateOneByteInternalizedString(
- Vector<const uint8_t> str, uint32_t hash_field) {
- CHECK_GE(String::kMaxLength, str.length());
- // The canonical empty_string is the only zero-length string we allow.
- DCHECK_IMPLIES(str.length() == 0, roots_[kempty_stringRootIndex] == nullptr);
- // Compute map and object size.
- Map* map = one_byte_internalized_string_map();
- int size = SeqOneByteString::SizeFor(str.length());
-
- // Allocate string.
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
-
- // String maps are all immortal immovable objects.
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(str.length());
- answer->set_hash_field(hash_field);
-
- DCHECK_EQ(size, answer->Size());
-
- // Fill in the characters.
- MemCopy(answer->address() + SeqOneByteString::kHeaderSize, str.start(),
- str.length());
-
- return answer;
-}
-
-
-AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
- uint32_t hash_field) {
- CHECK_GE(String::kMaxLength, str.length());
- DCHECK_NE(0, str.length()); // Use Heap::empty_string() instead.
- // Compute map and object size.
- Map* map = internalized_string_map();
- int size = SeqTwoByteString::SizeFor(str.length());
-
- // Allocate string.
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(map);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(str.length());
- answer->set_hash_field(hash_field);
-
- DCHECK_EQ(size, answer->Size());
-
- // Fill in the characters.
- MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
- str.length() * kUC16Size);
-
- return answer;
-}
-
-AllocationResult Heap::CopyFixedArray(FixedArray* src) {
- if (src->length() == 0) return src;
- return CopyFixedArrayWithMap(src, src->map());
-}
-
-
-AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
- if (src->length() == 0) return src;
- return CopyFixedDoubleArrayWithMap(src, src->map());
-}
-
-AllocationResult Heap::AllocateFixedArrayWithMap(RootListIndex map_root_index,
- int length,
- PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(map_root_index, length, pretenure,
- undefined_value());
-}
-
-AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
- pretenure, undefined_value());
-}
-
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
@@ -295,22 +178,33 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (MAP_SPACE == space) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
+ } else if (RO_SPACE == space) {
+#ifdef V8_USE_SNAPSHOT
+ DCHECK(isolate_->serializer_enabled());
+#endif
+ DCHECK(!large_object);
+ allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
} else {
// NEW_SPACE is not allowed here.
UNREACHABLE();
}
+
if (allocation.To(&object)) {
+ if (space == CODE_SPACE) {
+ // Unprotect the memory chunk of the object if it was not unprotected
+ // already.
+ UnprotectAndRegisterMemoryChunk(object);
+ ZapCodeObject(object->address(), size_in_bytes);
+ }
OnAllocationEvent(object, size_in_bytes);
}
return allocation;
}
-
void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
- HeapProfiler* profiler = isolate_->heap_profiler();
- if (profiler->is_tracking_allocations()) {
- profiler->AllocationEvent(object->address(), size_in_bytes);
+ for (auto& tracker : allocation_trackers_) {
+ tracker->AllocationEvent(object->address(), size_in_bytes);
}
if (FLAG_verify_predictable) {
@@ -342,6 +236,9 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
heap_profiler->ObjectMoveEvent(source->address(), target->address(),
size_in_bytes);
}
+ for (auto& tracker : allocation_trackers_) {
+ tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
+ }
if (target->IsSharedFunctionInfo()) {
LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
target->address()));
@@ -410,27 +307,55 @@ void Heap::FinalizeExternalString(String* string) {
Address Heap::NewSpaceTop() { return new_space_->top(); }
bool Heap::InNewSpace(Object* object) {
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return object->IsHeapObject() && InNewSpace(HeapObject::cast(object));
+}
+
+bool Heap::InNewSpace(MaybeObject* object) {
+ HeapObject* heap_object;
+ return object->ToStrongOrWeakHeapObject(&heap_object) &&
+ InNewSpace(heap_object);
+}
+
+bool Heap::InNewSpace(HeapObject* heap_object) {
// Inlined check from NewSpace::Contains.
- bool result =
- object->IsHeapObject() &&
- Page::FromAddress(HeapObject::cast(object)->address())->InNewSpace();
+ bool result = Page::FromAddress(heap_object->address())->InNewSpace();
DCHECK(!result || // Either not in new space
gc_state_ != NOT_IN_GC || // ... or in the middle of GC
- InToSpace(object)); // ... or in to-space (where we allocate).
+ InToSpace(heap_object)); // ... or in to-space (where we allocate).
return result;
}
bool Heap::InFromSpace(Object* object) {
- return object->IsHeapObject() &&
- MemoryChunk::FromAddress(HeapObject::cast(object)->address())
- ->IsFlagSet(Page::IN_FROM_SPACE);
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return object->IsHeapObject() && InFromSpace(HeapObject::cast(object));
}
+bool Heap::InFromSpace(MaybeObject* object) {
+ HeapObject* heap_object;
+ return object->ToStrongOrWeakHeapObject(&heap_object) &&
+ InFromSpace(heap_object);
+}
+
+bool Heap::InFromSpace(HeapObject* heap_object) {
+ return MemoryChunk::FromAddress(heap_object->address())
+ ->IsFlagSet(Page::IN_FROM_SPACE);
+}
bool Heap::InToSpace(Object* object) {
- return object->IsHeapObject() &&
- MemoryChunk::FromAddress(HeapObject::cast(object)->address())
- ->IsFlagSet(Page::IN_TO_SPACE);
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return object->IsHeapObject() && InToSpace(HeapObject::cast(object));
+}
+
+bool Heap::InToSpace(MaybeObject* object) {
+ HeapObject* heap_object;
+ return object->ToStrongOrWeakHeapObject(&heap_object) &&
+ InToSpace(heap_object);
+}
+
+bool Heap::InToSpace(HeapObject* heap_object) {
+ return MemoryChunk::FromAddress(heap_object->address())
+ ->IsFlagSet(Page::IN_TO_SPACE);
}
bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
@@ -451,6 +376,15 @@ bool Heap::ShouldBePromoted(Address old_address) {
}
void Heap::RecordWrite(Object* object, Object** slot, Object* value) {
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ if (!InNewSpace(value) || !object->IsHeapObject() || InNewSpace(object)) {
+ return;
+ }
+ store_buffer()->InsertEntry(reinterpret_cast<Address>(slot));
+}
+
+void Heap::RecordWrite(Object* object, MaybeObject** slot, MaybeObject* value) {
if (!InNewSpace(value) || !object->IsHeapObject() || InNewSpace(object)) {
return;
}
@@ -594,15 +528,22 @@ uint32_t Heap::HashSeed() {
int Heap::NextScriptId() {
int last_id = last_script_id()->value();
- if (last_id == Smi::kMaxValue) {
- last_id = 1;
- } else {
- last_id++;
- }
+ if (last_id == Smi::kMaxValue) last_id = v8::UnboundScript::kNoScriptId;
+ last_id++;
set_last_script_id(Smi::FromInt(last_id));
return last_id;
}
+int Heap::NextDebuggingId() {
+ int last_id = last_debugging_id()->value();
+ if (last_id == SharedFunctionInfo::DebuggingIdBits::kMax) {
+ last_id = SharedFunctionInfo::kNoDebuggingId;
+ }
+ last_id++;
+ set_last_debugging_id(Smi::FromInt(last_id));
+ return last_id;
+}
+
int Heap::GetNextTemplateSerialNumber() {
int next_serial_number = next_template_serial_number()->value() + 1;
set_next_template_serial_number(Smi::FromInt(next_serial_number));
@@ -649,6 +590,24 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
}
}
+CodePageCollectionMemoryModificationScope::
+ CodePageCollectionMemoryModificationScope(Heap* heap)
+ : heap_(heap) {
+ if (heap_->write_protect_code_memory() &&
+ !heap_->code_space_memory_modification_scope_depth()) {
+ heap_->EnableUnprotectedMemoryChunksRegistry();
+ }
+}
+
+CodePageCollectionMemoryModificationScope::
+ ~CodePageCollectionMemoryModificationScope() {
+ if (heap_->write_protect_code_memory() &&
+ !heap_->code_space_memory_modification_scope_depth()) {
+ heap_->ProtectUnprotectedMemoryChunks();
+ heap_->DisableUnprotectedMemoryChunksRegistry();
+ }
+}
+
CodePageMemoryModificationScope::CodePageMemoryModificationScope(
MemoryChunk* chunk)
: chunk_(chunk),
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 9a83c0d172..ab2399aad7 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -48,6 +48,7 @@
#include "src/instruction-stream.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/data-handler.h"
+#include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
@@ -55,7 +56,6 @@
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/unicode-decoder.h"
#include "src/unicode-inl.h"
#include "src/utils-inl.h"
@@ -161,8 +161,6 @@ Heap::Heap()
survived_last_scavenge_(0),
always_allocate_scope_count_(0),
memory_pressure_level_(MemoryPressureLevel::kNone),
- out_of_memory_callback_(nullptr),
- out_of_memory_callback_data_(nullptr),
contexts_disposed_(0),
number_of_disposed_maps_(0),
new_space_(nullptr),
@@ -170,6 +168,7 @@ Heap::Heap()
code_space_(nullptr),
map_space_(nullptr),
lo_space_(nullptr),
+ read_only_space_(nullptr),
write_protect_code_memory_(false),
code_space_memory_modification_scope_depth_(0),
gc_state_(NOT_IN_GC),
@@ -182,6 +181,7 @@ Heap::Heap()
max_marking_limit_reached_(0.0),
ms_count_(0),
gc_count_(0),
+ consecutive_ineffective_mark_compacts_(0),
mmap_region_base_(0),
remembered_unmapped_pages_index_(0),
old_generation_allocation_limit_(initial_old_generation_size_),
@@ -229,10 +229,10 @@ Heap::Heap()
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false),
- use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
- pending_layout_change_object_(nullptr)
+ pending_layout_change_object_(nullptr),
+ unprotected_memory_chunks_registry_enabled_(false)
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
,
allocation_timeout_(0)
@@ -250,6 +250,12 @@ Heap::Heap()
RememberUnmappedPage(nullptr, false);
}
+size_t Heap::MaxReserved() {
+ const double kFactor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
+ return static_cast<size_t>(
+ (2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
+}
+
size_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
@@ -324,7 +330,8 @@ bool Heap::CanExpandOldGeneration(size_t size) {
bool Heap::HasBeenSetUp() {
return old_space_ != nullptr && code_space_ != nullptr &&
- map_space_ != nullptr && lo_space_ != nullptr;
+ map_space_ != nullptr && lo_space_ != nullptr &&
+ read_only_space_ != nullptr;
}
@@ -435,13 +442,27 @@ void Heap::ReportStatisticsAfterGC() {
}
}
+void Heap::AddHeapObjectAllocationTracker(
+ HeapObjectAllocationTracker* tracker) {
+ if (allocation_trackers_.empty()) DisableInlineAllocation();
+ allocation_trackers_.push_back(tracker);
+}
+
+void Heap::RemoveHeapObjectAllocationTracker(
+ HeapObjectAllocationTracker* tracker) {
+ allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
+ allocation_trackers_.end(), tracker),
+ allocation_trackers_.end());
+ if (allocation_trackers_.empty()) EnableInlineAllocation();
+}
+
void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
RetainingPathOption option) {
if (!FLAG_track_retaining_path) {
- PrintF("Retaining path tracking requires --trace-retaining-path\n");
+ PrintF("Retaining path tracking requires --track-retaining-path\n");
} else {
int index = 0;
- Handle<WeakFixedArray> array = WeakFixedArray::Add(
+ Handle<FixedArrayOfWeakCells> array = FixedArrayOfWeakCells::Add(
handle(retaining_path_targets(), isolate()), object, &index);
set_retaining_path_targets(*array);
retaining_path_target_option_[index] = option;
@@ -450,8 +471,9 @@ void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
bool Heap::IsRetainingPathTarget(HeapObject* object,
RetainingPathOption* option) {
- if (!retaining_path_targets()->IsWeakFixedArray()) return false;
- WeakFixedArray* targets = WeakFixedArray::cast(retaining_path_targets());
+ if (!retaining_path_targets()->IsFixedArrayOfWeakCells()) return false;
+ FixedArrayOfWeakCells* targets =
+ FixedArrayOfWeakCells::cast(retaining_path_targets());
int length = targets->Length();
for (int i = 0; i < length; i++) {
if (targets->Get(i) == object) {
@@ -616,6 +638,8 @@ const char* Heap::GetSpaceName(int idx) {
return "code_space";
case LO_SPACE:
return "large_object_space";
+ case RO_SPACE:
+ return "read_only_space";
default:
UNREACHABLE();
}
@@ -965,7 +989,7 @@ void Heap::GarbageCollectionEpilogue() {
void Heap::PreprocessStackTraces() {
- WeakFixedArray::Iterator iterator(weak_stack_trace_list());
+ FixedArrayOfWeakCells::Iterator iterator(weak_stack_trace_list());
FixedArray* elements;
while ((elements = iterator.Next<FixedArray>()) != nullptr) {
for (int j = 1; j < elements->length(); j += 4) {
@@ -1063,6 +1087,33 @@ void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
}
}
+HistogramTimer* Heap::GCTypePriorityTimer(GarbageCollector collector) {
+ if (IsYoungGenerationCollector(collector)) {
+ if (isolate_->IsIsolateInBackground()) {
+ return isolate_->counters()->gc_scavenger_background();
+ }
+ return isolate_->counters()->gc_scavenger_foreground();
+ } else {
+ if (!incremental_marking()->IsStopped()) {
+ if (ShouldReduceMemory()) {
+ if (isolate_->IsIsolateInBackground()) {
+ return isolate_->counters()->gc_finalize_reduce_memory_background();
+ }
+ return isolate_->counters()->gc_finalize_reduce_memory_foreground();
+ } else {
+ if (isolate_->IsIsolateInBackground()) {
+ return isolate_->counters()->gc_finalize_background();
+ }
+ return isolate_->counters()->gc_finalize_foreground();
+ }
+ } else {
+ if (isolate_->IsIsolateInBackground()) {
+ return isolate_->counters()->gc_compactor_background();
+ }
+ return isolate_->counters()->gc_compactor_foreground();
+ }
+ }
+}
HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
if (IsYoungGenerationCollector(collector)) {
@@ -1163,7 +1214,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
if (gc_reason == GarbageCollectionReason::kLastResort) {
- InvokeOutOfMemoryCallback();
+ InvokeNearHeapLimitCallback();
}
RuntimeCallTimerScope runtime_timer(
isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
@@ -1226,7 +1277,7 @@ void Heap::ReportExternalMemoryPressure() {
}
if (incremental_marking()->IsStopped()) {
if (incremental_marking()->CanBeActivated()) {
- StartIncrementalMarking(i::Heap::kNoGCFlags,
+ StartIncrementalMarking(GCFlagsForIncrementalMarking(),
GarbageCollectionReason::kExternalMemoryPressure,
kGCCallbackFlagsForExternalMemory);
} else {
@@ -1267,12 +1318,16 @@ void Heap::EnsureFillerObjectAtTop() {
bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
- // The VM is in the GC state until exiting this function.
- VMState<GC> state(isolate());
-
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+ if (!CanExpandOldGeneration(new_space()->Capacity())) {
+ InvokeNearHeapLimitCallback();
+ }
+
+ // The VM is in the GC state until exiting this function.
+ VMState<GC> state(isolate());
+
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// Reset the allocation timeout, but make sure to allow at least a few
// allocations after a collection. The reason for this is that we have a lot
@@ -1311,6 +1366,10 @@ bool Heap::CollectGarbage(AllocationSpace space,
HistogramTimerScope histogram_timer_scope(gc_type_timer);
TRACE_EVENT0("v8", gc_type_timer->name());
+ HistogramTimer* gc_type_priority_timer = GCTypePriorityTimer(collector);
+ HistogramTimerScope histogram_timer_priority_scope(
+ gc_type_priority_timer);
+
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
}
@@ -1357,7 +1416,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
if (IsYoungGenerationCollector(collector) &&
!ShouldAbortIncrementalMarking()) {
StartIncrementalMarkingIfAllocationLimitIsReached(
- kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
+ GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
}
return next_gc_likely_to_collect_more;
@@ -1445,6 +1505,7 @@ class StringTableVerifier : public ObjectVisitor {
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
+ DCHECK(!HasWeakHeapObjectTag(*p));
if ((*p)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*p);
Isolate* isolate = object->GetIsolate();
@@ -1454,6 +1515,10 @@ class StringTableVerifier : public ObjectVisitor {
}
}
}
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ UNREACHABLE();
+ }
};
@@ -1469,8 +1534,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
- for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
- space++) {
+ for (int space = FIRST_SPACE;
+ space < SerializerDeserializer::kNumberOfSpaces; space++) {
Reservation* reservation = &reservations[space];
DCHECK_LE(1, reservation->size());
if (reservation->at(0).size == 0) continue;
@@ -1543,7 +1608,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
// so that we cannot allocate space to deserialize the initial heap.
if (!deserialization_complete_) {
V8::FatalProcessOutOfMemory(
- "insufficient memory to create an Isolate");
+ isolate(), "insufficient memory to create an Isolate");
}
if (space == NEW_SPACE) {
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
@@ -1572,7 +1637,7 @@ void Heap::EnsureFromSpaceIsCommitted() {
// Committing memory to from space failed.
// Memory is exhausted and we will die.
- V8::FatalProcessOutOfMemory("Committing semi space failed.");
+ FatalProcessOutOfMemory("Committing semi space failed.");
}
@@ -1707,6 +1772,8 @@ bool Heap::PerformGarbageCollection(
external_memory_at_last_mark_compact_ = external_memory_;
external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
+ CheckIneffectiveMarkCompact(
+ old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
@@ -1785,6 +1852,7 @@ void Heap::MarkCompact() {
}
void Heap::MinorMarkCompact() {
+#ifdef ENABLE_MINOR_MC
DCHECK(FLAG_minor_mc);
PauseAllocationObserversScope pause_observers(this);
@@ -1802,6 +1870,9 @@ void Heap::MinorMarkCompact() {
LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
SetGCState(NOT_IN_GC);
+#else
+ UNREACHABLE();
+#endif // ENABLE_MINOR_MC
}
void Heap::MarkCompactEpilogue() {
@@ -1920,7 +1991,8 @@ static bool IsLogging(Isolate* isolate) {
return FLAG_verify_predictable || isolate->logger()->is_logging() ||
isolate->is_profiling() ||
(isolate->heap_profiler() != nullptr &&
- isolate->heap_profiler()->is_tracking_object_moves());
+ isolate->heap_profiler()->is_tracking_object_moves()) ||
+ isolate->heap()->has_heap_object_allocation_tracker();
}
class PageScavengingItem final : public ItemParallelJob::Item {
@@ -1978,10 +2050,14 @@ int Heap::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
static_cast<int>(new_space()->TotalCapacity()) / MB;
- static int num_cores =
- 1 + static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
- return Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
+ static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
+ int tasks =
+ Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
+ if (!CanExpandOldGeneration(static_cast<size_t>(tasks * Page::kPageSize))) {
+ // Optimize for memory usage near the heap limit.
+ tasks = 1;
+ }
+ return tasks;
}
void Heap::Scavenge() {
@@ -2158,6 +2234,33 @@ void Heap::ComputeFastPromotionMode(double survival_rate) {
}
}
+void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
+ if (unprotected_memory_chunks_registry_enabled_) {
+ base::LockGuard<base::Mutex> guard(&unprotected_memory_chunks_mutex_);
+ if (unprotected_memory_chunks_.insert(chunk).second) {
+ chunk->SetReadAndWritable();
+ }
+ }
+}
+
+void Heap::UnprotectAndRegisterMemoryChunk(HeapObject* object) {
+ UnprotectAndRegisterMemoryChunk(MemoryChunk::FromAddress(object->address()));
+}
+
+void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
+ unprotected_memory_chunks_.erase(chunk);
+}
+
+void Heap::ProtectUnprotectedMemoryChunks() {
+ DCHECK(unprotected_memory_chunks_registry_enabled_);
+ for (auto chunk = unprotected_memory_chunks_.begin();
+ chunk != unprotected_memory_chunks_.end(); chunk++) {
+ CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
+ (*chunk)->SetReadAndExecutable();
+ }
+ unprotected_memory_chunks_.clear();
+}
+
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
@@ -2447,225 +2550,6 @@ void Heap::ConfigureInitialOldGenerationSize() {
}
}
-AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
- int instance_size) {
- Object* result = nullptr;
- AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
- if (!allocation.To(&result)) return allocation;
- // Map::cast cannot be used due to uninitialized map field.
- Map* map = reinterpret_cast<Map*>(result);
- map->set_map_after_allocation(reinterpret_cast<Map*>(root(kMetaMapRootIndex)),
- SKIP_WRITE_BARRIER);
- map->set_instance_type(instance_type);
- map->set_instance_size(instance_size);
- // Initialize to only containing tagged fields.
- if (FLAG_unbox_double_fields) {
- map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
- }
- // GetVisitorId requires a properly initialized LayoutDescriptor.
- map->set_visitor_id(Map::GetVisitorId(map));
- map->set_inobject_properties_start_or_constructor_function_index(0);
- DCHECK(!map->IsJSObjectMap());
- map->SetInObjectUnusedPropertyFields(0);
- map->set_bit_field(0);
- map->set_bit_field2(0);
- int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
- Map::OwnsDescriptorsBit::encode(true) |
- Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
- map->set_bit_field3(bit_field3);
- map->set_weak_cell_cache(Smi::kZero);
- map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
- return map;
-}
-
-AllocationResult Heap::AllocateMap(InstanceType instance_type,
- int instance_size,
- ElementsKind elements_kind,
- int inobject_properties) {
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- DCHECK_IMPLIES(instance_type >= FIRST_JS_OBJECT_TYPE &&
- !Map::CanHaveFastTransitionableElementsKind(instance_type),
- IsDictionaryElementsKind(elements_kind) ||
- IsTerminalElementsKind(elements_kind));
- HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
- if (!allocation.To(&result)) return allocation;
-
- isolate()->counters()->maps_created()->Increment();
- result->set_map_after_allocation(meta_map(), SKIP_WRITE_BARRIER);
- Map* map = Map::cast(result);
- map->set_instance_type(instance_type);
- map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
- map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
- map->set_instance_size(instance_size);
- if (map->IsJSObjectMap()) {
- map->SetInObjectPropertiesStartInWords(instance_size / kPointerSize -
- inobject_properties);
- DCHECK_EQ(map->GetInObjectProperties(), inobject_properties);
- } else {
- DCHECK_EQ(inobject_properties, 0);
- map->set_inobject_properties_start_or_constructor_function_index(0);
- }
- map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
- SKIP_WRITE_BARRIER);
- map->set_weak_cell_cache(Smi::kZero);
- map->set_raw_transitions(Smi::kZero);
- map->SetInObjectUnusedPropertyFields(inobject_properties);
- map->set_instance_descriptors(empty_descriptor_array());
- if (FLAG_unbox_double_fields) {
- map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
- }
- // Must be called only after |instance_type|, |instance_size| and
- // |layout_descriptor| are set.
- map->set_visitor_id(Map::GetVisitorId(map));
- map->set_bit_field(0);
- map->set_bit_field2(Map::IsExtensibleBit::kMask);
- int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
- Map::OwnsDescriptorsBit::encode(true) |
- Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
- map->set_bit_field3(bit_field3);
- map->set_elements_kind(elements_kind);
- map->set_new_target_is_base(true);
- if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
- return map;
-}
-
-
-AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
- AllocationSpace space) {
- HeapObject* obj = nullptr;
- {
- AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
- AllocationResult allocation = AllocateRaw(size, space, align);
- if (!allocation.To(&obj)) return allocation;
- }
-#ifdef DEBUG
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- DCHECK(chunk->owner()->identity() == space);
-#endif
- CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
- return obj;
-}
-
-
-AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
- PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate heap numbers in paged
- // spaces.
- int size = HeapNumber::kSize;
- STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
-
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
- if (!allocation.To(&result)) return allocation;
- }
-
- Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
- HeapObject::cast(result)->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- return result;
-}
-
-AllocationResult Heap::AllocateBigInt(int length, PretenureFlag pretenure) {
- if (length < 0 || length > BigInt::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true);
- }
- int size = BigInt::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
- result->set_map_after_allocation(bigint_map(), SKIP_WRITE_BARRIER);
- return result;
-}
-
-AllocationResult Heap::AllocateCell(Object* value) {
- int size = Cell::kSize;
- STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- result->set_map_after_allocation(cell_map(), SKIP_WRITE_BARRIER);
- Cell::cast(result)->set_value(value);
- return result;
-}
-
-AllocationResult Heap::AllocateFeedbackCell(Map* map, HeapObject* value) {
- int size = FeedbackCell::kSize;
- STATIC_ASSERT(FeedbackCell::kSize <= kMaxRegularHeapObjectSize);
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- FeedbackCell::cast(result)->set_value(value);
- return result;
-}
-
-AllocationResult Heap::AllocatePropertyCell(Name* name) {
- DCHECK(name->IsUniqueName());
- int size = PropertyCell::kSize;
- STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
-
- HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
-
- result->set_map_after_allocation(global_property_cell_map(),
- SKIP_WRITE_BARRIER);
- PropertyCell* cell = PropertyCell::cast(result);
- cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
- SKIP_WRITE_BARRIER);
- cell->set_property_details(PropertyDetails(Smi::kZero));
- cell->set_name(name);
- cell->set_value(the_hole_value());
- return result;
-}
-
-
-AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
- int size = WeakCell::kSize;
- STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- result->set_map_after_allocation(weak_cell_map(), SKIP_WRITE_BARRIER);
- WeakCell::cast(result)->initialize(value);
- return result;
-}
-
-
-AllocationResult Heap::AllocateTransitionArray(int capacity) {
- DCHECK_LT(0, capacity);
- HeapObject* raw_array = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
- if (!allocation.To(&raw_array)) return allocation;
- }
- raw_array->set_map_after_allocation(transition_array_map(),
- SKIP_WRITE_BARRIER);
- TransitionArray* array = TransitionArray::cast(raw_array);
- array->set_length(capacity);
- MemsetPointer(array->data_start(), undefined_value(), capacity);
- // Transition arrays are tenured. When black allocation is on we have to
- // add the transition array to the list of encountered_transition_arrays.
- if (incremental_marking()->black_allocation()) {
- mark_compact_collector()->AddTransitionArray(array);
- }
- return array;
-}
-
void Heap::CreateJSEntryStub() {
JSEntryStub stub(isolate(), StackFrame::ENTRY);
set_js_entry_code(*stub.GetCode());
@@ -2723,8 +2607,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kMaterializedObjectsRootIndex:
case kMicrotaskQueueRootIndex:
case kDetachedContextsRootIndex:
- case kWeakObjectToCodeTableRootIndex:
- case kWeakNewSpaceObjectToCodeListRootIndex:
case kRetainedMapsRootIndex:
case kRetainingPathTargetsRootIndex:
case kFeedbackVectorsForProfilingToolsRootIndex:
@@ -2781,34 +2663,38 @@ void Heap::FlushNumberStringCache() {
}
}
+namespace {
-Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
- return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
-}
-
-
-Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
- ExternalArrayType array_type) {
+Heap::RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type) {
switch (array_type) {
#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return kFixed##Type##ArrayMapRootIndex;
+ return Heap::kFixed##Type##ArrayMapRootIndex;
TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
#undef ARRAY_TYPE_TO_ROOT_INDEX
+ }
+ UNREACHABLE();
+}
+Heap::RootListIndex RootIndexForFixedTypedArray(ElementsKind elements_kind) {
+ switch (elements_kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ return Heap::kFixed##Type##ArrayMapRootIndex;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
UNREACHABLE();
+#undef TYPED_ARRAY_CASE
}
}
-
-Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
- ElementsKind elementsKind) {
- switch (elementsKind) {
+Heap::RootListIndex RootIndexForEmptyFixedTypedArray(
+ ElementsKind elements_kind) {
+ switch (elements_kind) {
#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
case TYPE##_ELEMENTS: \
- return kEmptyFixed##Type##ArrayRootIndex;
+ return Heap::kEmptyFixed##Type##ArrayRootIndex;
TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
#undef ELEMENT_KIND_TO_ROOT_INDEX
@@ -2817,122 +2703,24 @@ Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
}
}
-FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
- return FixedTypedArrayBase::cast(
- roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
-}
-
-
-AllocationResult Heap::AllocateForeign(Address address,
- PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate foreigns in paged spaces.
- STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
- Foreign* result = nullptr;
- AllocationResult allocation = Allocate(foreign_map(), space);
- if (!allocation.To(&result)) return allocation;
- result->set_foreign_address(address);
- return result;
-}
-
-AllocationResult Heap::AllocateSmallOrderedHashSet(int capacity,
- PretenureFlag pretenure) {
- DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
- CHECK_GE(SmallOrderedHashSet::kMaxCapacity, capacity);
-
- int size = SmallOrderedHashSet::Size(capacity);
- AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(small_ordered_hash_set_map(),
- SKIP_WRITE_BARRIER);
- Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result));
- table->Initialize(isolate(), capacity);
- return result;
-}
-
-AllocationResult Heap::AllocateSmallOrderedHashMap(int capacity,
- PretenureFlag pretenure) {
- DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
- CHECK_GE(SmallOrderedHashMap::kMaxCapacity, capacity);
-
- int size = SmallOrderedHashMap::Size(capacity);
- AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
+} // namespace
- result->set_map_after_allocation(small_ordered_hash_map_map(),
- SKIP_WRITE_BARRIER);
- Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result));
- table->Initialize(isolate(), capacity);
- return result;
+Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
+ return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
}
-AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
- }
- int size = ByteArray::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(byte_array_map(), SKIP_WRITE_BARRIER);
- ByteArray::cast(result)->set_length(length);
- ByteArray::cast(result)->clear_padding();
- return result;
+Map* Heap::MapForFixedTypedArray(ElementsKind elements_kind) {
+ return Map::cast(roots_[RootIndexForFixedTypedArray(elements_kind)]);
}
-
-AllocationResult Heap::AllocateBytecodeArray(int length,
- const byte* const raw_bytecodes,
- int frame_size,
- int parameter_count,
- FixedArray* constant_pool) {
- if (length < 0 || length > BytecodeArray::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
- }
- // Bytecode array is pretenured, so constant pool array should be to.
- DCHECK(!InNewSpace(constant_pool));
-
- int size = BytecodeArray::SizeFor(length);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(bytecode_array_map(), SKIP_WRITE_BARRIER);
- BytecodeArray* instance = BytecodeArray::cast(result);
- instance->set_length(length);
- instance->set_frame_size(frame_size);
- instance->set_parameter_count(parameter_count);
- instance->set_incoming_new_target_or_generator_register(
- interpreter::Register::invalid_value());
- instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
- instance->set_osr_loop_nesting_level(0);
- instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
- instance->set_constant_pool(constant_pool);
- instance->set_handler_table(empty_byte_array());
- instance->set_source_position_table(empty_byte_array());
- CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
- instance->clear_padding();
-
- return result;
+FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
+ return FixedTypedArrayBase::cast(
+ roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
}
HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
- ClearRecordedSlots mode) {
+ ClearRecordedSlots clear_slots_mode,
+ ClearFreedMemoryMode clear_memory_mode) {
if (size == 0) return nullptr;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
@@ -2943,14 +2731,22 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
filler->set_map_after_allocation(
reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)),
SKIP_WRITE_BARRIER);
+ if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
+ Memory::Address_at(addr + kPointerSize) =
+ reinterpret_cast<Address>(kClearedFreeMemoryValue);
+ }
} else {
DCHECK_GT(size, 2 * kPointerSize);
filler->set_map_after_allocation(
reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
SKIP_WRITE_BARRIER);
FreeSpace::cast(filler)->relaxed_write_size(size);
+ if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
+ memset(reinterpret_cast<void*>(addr + 2 * kPointerSize),
+ kClearedFreeMemoryValue, size - 2 * kPointerSize);
+ }
}
- if (mode == ClearRecordedSlots::kYes) {
+ if (clear_slots_mode == ClearRecordedSlots::kYes) {
ClearRecordedSlotRange(addr, addr + size);
}
@@ -2985,8 +2781,9 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
CHECK_NOT_NULL(object);
DCHECK(CanMoveObjectStart(object));
- DCHECK(!object->IsFixedTypedArrayBase());
- DCHECK(!object->IsByteArray());
+ // Add custom visitor to concurrent marker if new left-trimmable type
+ // is added.
+ DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
const int bytes_to_trim = elements_to_trim * element_size;
Map* map = object->map();
@@ -3097,1030 +2894,13 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// avoid races with the sweeper thread.
object->synchronized_set_length(len - elements_to_trim);
- // Notify the heap profiler of change in object layout. The array may not be
- // moved during GC, and size has to be adjusted nevertheless.
- HeapProfiler* profiler = isolate()->heap_profiler();
- if (profiler->is_tracking_allocations()) {
- profiler->UpdateObjectSizeEvent(object->address(), object->Size());
- }
-}
-
-
-AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
- int length, ExternalArrayType array_type, void* external_pointer,
- PretenureFlag pretenure) {
- int size = FixedTypedArrayBase::kHeaderSize;
- AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(MapForFixedTypedArray(array_type),
- SKIP_WRITE_BARRIER);
- FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
- elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
- elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
- elements->set_length(length);
- return elements;
-}
-
-static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
- ElementsKind* element_kind) {
- switch (array_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- *element_size = size; \
- *element_kind = TYPE##_ELEMENTS; \
- return;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- default:
- *element_size = 0; // Bogus
- *element_kind = UINT8_ELEMENTS; // Bogus
- UNREACHABLE();
- }
-}
-
-
-AllocationResult Heap::AllocateFixedTypedArray(int length,
- ExternalArrayType array_type,
- bool initialize,
- PretenureFlag pretenure) {
- int element_size;
- ElementsKind elements_kind;
- ForFixedTypedArray(array_type, &element_size, &elements_kind);
- int size = OBJECT_POINTER_ALIGN(length * element_size +
- FixedTypedArrayBase::kDataOffset);
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* object = nullptr;
- AllocationResult allocation = AllocateRaw(
- size, space,
- array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
- if (!allocation.To(&object)) return allocation;
-
- object->set_map_after_allocation(MapForFixedTypedArray(array_type),
- SKIP_WRITE_BARRIER);
- FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
- elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
- elements->set_external_pointer(
- ExternalReference::fixed_typed_array_base_data_offset().address(),
- SKIP_WRITE_BARRIER);
- elements->set_length(length);
- if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
- return elements;
-}
-
-AllocationResult Heap::AllocateCode(int object_size, Movability movability) {
- DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
- AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
-
- HeapObject* result = nullptr;
- if (!allocation.To(&result)) return allocation;
- if (movability == kImmovable) {
- Address address = result->address();
- MemoryChunk* chunk = MemoryChunk::FromAddress(address);
- // Code objects which should stay at a fixed address are allocated either
- // in the first page of code space, in large object space, or (during
- // snapshot creation) the containing page is marked as immovable.
- if (!Heap::IsImmovable(result)) {
- if (isolate()->serializer_enabled() ||
- code_space_->FirstPage()->Contains(address)) {
- chunk->MarkNeverEvacuate();
- } else {
- // Discard the first code allocation, which was on a page where it could
- // be moved.
- CreateFillerObjectAt(result->address(), object_size,
- ClearRecordedSlots::kNo);
- allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
- if (!allocation.To(&result)) return allocation;
- OnAllocationEvent(result, object_size);
- }
- }
- }
-
- result->set_map_after_allocation(code_map(), SKIP_WRITE_BARRIER);
- Code* code = Code::cast(result);
- DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
- DCHECK(!memory_allocator()->code_range()->valid() ||
- memory_allocator()->code_range()->contains(code->address()) ||
- object_size <= code_space()->AreaSize());
- return code;
-}
-
-AllocationResult Heap::AllocateCode(
- const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, ByteArray* reloc_info,
- CodeDataContainer* data_container, ByteArray* source_position_table,
- DeoptimizationData* deopt_data, Movability movability, uint32_t stub_key,
- bool is_turbofanned, int stack_slots, int safepoint_table_offset,
- int handler_table_offset) {
- bool has_unwinding_info = desc.unwinding_info != nullptr;
- DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
- (!has_unwinding_info && desc.unwinding_info_size == 0));
-
- // Compute size.
- int body_size = desc.instr_size;
- int unwinding_info_size_field_size = kInt64Size;
- if (has_unwinding_info) {
- body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
- unwinding_info_size_field_size;
- }
- int object_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
-
- Code* code = nullptr;
- CodeSpaceMemoryModificationScope code_allocation(this);
- AllocationResult allocation = AllocateCode(object_size, movability);
- if (!allocation.To(&code)) return allocation;
-
- // The code object has not been fully initialized yet. We rely on the
- // fact that no allocation will happen from this point on.
- DisallowHeapAllocation no_gc;
- code->set_instruction_size(desc.instr_size);
- code->set_relocation_info(reloc_info);
- code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots);
- code->set_safepoint_table_offset(safepoint_table_offset);
- code->set_handler_table_offset(handler_table_offset);
- code->set_code_data_container(data_container);
- code->set_has_tagged_params(true);
- code->set_deoptimization_data(deopt_data);
- code->set_stub_key(stub_key);
- code->set_source_position_table(source_position_table);
- code->set_protected_instructions(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
- code->set_builtin_index(builtin_index);
- code->set_trap_handler_index(Smi::FromInt(-1));
-
- switch (code->kind()) {
- case Code::OPTIMIZED_FUNCTION:
- code->set_marked_for_deoptimization(false);
- break;
- case Code::JS_TO_WASM_FUNCTION:
- case Code::C_WASM_ENTRY:
- case Code::WASM_FUNCTION:
- code->set_has_tagged_params(false);
- break;
- default:
- break;
- }
-
- // Allow self references to created code object by patching the handle to
- // point to the newly allocated Code object.
- if (!self_ref.is_null()) *(self_ref.location()) = code;
-
- // Migrate generated code.
- // The generated code can contain Object** values (typically from handles)
- // that are dereferenced during the copy to point directly to the actual heap
- // objects. These pointers can include references to the code object itself,
- // through the self_reference parameter.
- code->CopyFrom(desc);
-
- code->clear_padding();
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) code->ObjectVerify();
-#endif
- DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
- DCHECK(!memory_allocator()->code_range()->valid() ||
- memory_allocator()->code_range()->contains(code->address()) ||
- object_size <= code_space()->AreaSize());
- return code;
-}
-
-AllocationResult Heap::CopyCode(Code* code, CodeDataContainer* data_container) {
- AllocationResult allocation;
-
- HeapObject* result = nullptr;
- // Allocate an object the same size as the code object.
- int obj_size = code->Size();
- allocation = AllocateRaw(obj_size, CODE_SPACE);
- if (!allocation.To(&result)) return allocation;
-
- // Copy code object.
- Address old_addr = code->address();
- Address new_addr = result->address();
- CopyBlock(new_addr, old_addr, obj_size);
- Code* new_code = Code::cast(result);
-
- // Set the {CodeDataContainer}, it cannot be shared.
- new_code->set_code_data_container(data_container);
-
- // Clear the trap handler index since they can't be shared between code. We
- // have to do this before calling Relocate because relocate would adjust the
- // base pointer for the old code.
- new_code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
-
- // Relocate the copy.
- new_code->Relocate(new_addr - old_addr);
- // We have to iterate over the object and process its pointers when black
- // allocation is on.
- incremental_marking()->ProcessBlackAllocatedObject(new_code);
- // Record all references to embedded objects in the new code object.
- RecordWritesIntoCode(new_code);
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) new_code->ObjectVerify();
-#endif
- DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
- DCHECK(!memory_allocator()->code_range()->valid() ||
- memory_allocator()->code_range()->contains(new_code->address()) ||
- obj_size <= code_space()->AreaSize());
- return new_code;
-}
-
-AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
- int size = BytecodeArray::SizeFor(bytecode_array->length());
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(bytecode_array_map(), SKIP_WRITE_BARRIER);
- BytecodeArray* copy = BytecodeArray::cast(result);
- copy->set_length(bytecode_array->length());
- copy->set_frame_size(bytecode_array->frame_size());
- copy->set_parameter_count(bytecode_array->parameter_count());
- copy->set_incoming_new_target_or_generator_register(
- bytecode_array->incoming_new_target_or_generator_register());
- copy->set_constant_pool(bytecode_array->constant_pool());
- copy->set_handler_table(bytecode_array->handler_table());
- copy->set_source_position_table(bytecode_array->source_position_table());
- copy->set_interrupt_budget(bytecode_array->interrupt_budget());
- copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
- copy->set_bytecode_age(bytecode_array->bytecode_age());
- bytecode_array->CopyBytecodesTo(copy);
- return copy;
-}
-
-void Heap::InitializeAllocationMemento(AllocationMemento* memento,
- AllocationSite* allocation_site) {
- memento->set_map_after_allocation(allocation_memento_map(),
- SKIP_WRITE_BARRIER);
- DCHECK(allocation_site->map() == allocation_site_map());
- memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
- if (FLAG_allocation_site_pretenuring) {
- allocation_site->IncrementMementoCreateCount();
- }
-}
-
-
-AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
- AllocationSite* allocation_site) {
- DCHECK(gc_state_ == NOT_IN_GC);
- DCHECK(map->instance_type() != MAP_TYPE);
- int size = map->instance_size();
- if (allocation_site != nullptr) {
- size += AllocationMemento::kSize;
- }
- HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- // New space objects are allocated white.
- WriteBarrierMode write_barrier_mode =
- space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
- result->set_map_after_allocation(map, write_barrier_mode);
- if (allocation_site != nullptr) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(result) + map->instance_size());
- InitializeAllocationMemento(alloc_memento, allocation_site);
- }
- return result;
-}
-
-AllocationResult Heap::AllocateJSPromise(JSFunction* constructor,
- PretenureFlag pretenure) {
- AllocationResult allocation = AllocateJSObject(constructor, pretenure);
- JSPromise* promise = nullptr;
- if (!allocation.To(&promise)) return allocation;
-
- // Setup JSPromise fields
- promise->set_reactions_or_result(Smi::kZero);
- promise->set_flags(0);
- for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
- promise->SetEmbedderField(i, Smi::kZero);
- }
- return promise;
-}
-
-void Heap::InitializeJSObjectFromMap(JSObject* obj, Object* properties,
- Map* map) {
- obj->set_raw_properties_or_hash(properties);
- obj->initialize_elements();
- // TODO(1240798): Initialize the object's body using valid initial values
- // according to the object's initial map. For example, if the map's
- // instance type is JS_ARRAY_TYPE, the length field should be initialized
- // to a number (e.g. Smi::kZero) and the elements initialized to a
- // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
- // verification code has to cope with (temporarily) invalid objects. See
- // for example, JSArray::JSArrayVerify).
- InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
-}
-
-
-void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
- if (start_offset == map->instance_size()) return;
- DCHECK_LT(start_offset, map->instance_size());
-
- // We cannot always fill with one_pointer_filler_map because objects
- // created from API functions expect their embedder fields to be initialized
- // with undefined_value.
- // Pre-allocated fields need to be initialized with undefined_value as well
- // so that object accesses before the constructor completes (e.g. in the
- // debugger) will not cause a crash.
-
- // In case of Array subclassing the |map| could already be transitioned
- // to different elements kind from the initial map on which we track slack.
- bool in_progress = map->IsInobjectSlackTrackingInProgress();
- Object* filler;
- if (in_progress) {
- filler = one_pointer_filler_map();
- } else {
- filler = undefined_value();
- }
- obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
- if (in_progress) {
- map->FindRootMap()->InobjectSlackTrackingStep();
- }
-}
-
-
-AllocationResult Heap::AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
- // JSFunctions should be allocated using AllocateFunction to be
- // properly initialized.
- DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
-
- // Both types of global objects should be allocated using
- // AllocateGlobalObject to be properly initialized.
- DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
-
- // Allocate the backing storage for the properties.
- FixedArray* properties = empty_fixed_array();
-
- // Allocate the JSObject.
- AllocationSpace space = SelectSpace(pretenure);
- JSObject* js_obj = nullptr;
- AllocationResult allocation = Allocate(map, space, allocation_site);
- if (!allocation.To(&js_obj)) return allocation;
-
- // Initialize the JSObject.
- InitializeJSObjectFromMap(js_obj, properties, map);
- DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
- js_obj->HasFastStringWrapperElements() ||
- js_obj->HasFastArgumentsElements());
- return js_obj;
-}
-
-
-AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure,
- AllocationSite* allocation_site) {
- DCHECK(constructor->has_initial_map());
-
- // Allocate the object based on the constructors initial map.
- AllocationResult allocation = AllocateJSObjectFromMap(
- constructor->initial_map(), pretenure, allocation_site);
-#ifdef DEBUG
- // Make sure result is NOT a global object if valid.
- HeapObject* obj = nullptr;
- DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
-#endif
- return allocation;
-}
-
-
-AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
- // Make the clone.
- Map* map = source->map();
-
- // We can only clone regexps, normal objects, api objects, errors or arrays.
- // Copying anything else will break invariants.
- CHECK(map->instance_type() == JS_REGEXP_TYPE ||
- map->instance_type() == JS_OBJECT_TYPE ||
- map->instance_type() == JS_ERROR_TYPE ||
- map->instance_type() == JS_ARRAY_TYPE ||
- map->instance_type() == JS_API_OBJECT_TYPE ||
- map->instance_type() == WASM_INSTANCE_TYPE ||
- map->instance_type() == WASM_MEMORY_TYPE ||
- map->instance_type() == WASM_MODULE_TYPE ||
- map->instance_type() == WASM_TABLE_TYPE ||
- map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
-
- int object_size = map->instance_size();
- HeapObject* clone = nullptr;
-
- DCHECK(site == nullptr || AllocationSite::CanTrack(map->instance_type()));
-
- int adjusted_object_size =
- site != nullptr ? object_size + AllocationMemento::kSize : object_size;
- AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
- if (!allocation.To(&clone)) return allocation;
-
- SLOW_DCHECK(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(clone->address(), source->address(), object_size);
-
- if (site != nullptr) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(clone) + object_size);
- InitializeAllocationMemento(alloc_memento, site);
- }
-
- SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
- source->GetElementsKind());
- FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
- // Update elements if necessary.
- if (elements->length() > 0) {
- FixedArrayBase* elem = nullptr;
- {
- AllocationResult allocation;
- if (elements->map() == fixed_cow_array_map()) {
- allocation = FixedArray::cast(elements);
- } else if (source->HasDoubleElements()) {
- allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
- } else {
- allocation = CopyFixedArray(FixedArray::cast(elements));
- }
- if (!allocation.To(&elem)) return allocation;
- }
- JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
- }
-
- // Update properties if necessary.
- if (source->HasFastProperties()) {
- if (source->property_array()->length() > 0) {
- PropertyArray* properties = source->property_array();
- PropertyArray* prop = nullptr;
- {
- // TODO(gsathya): Do not copy hash code.
- AllocationResult allocation = CopyPropertyArray(properties);
- if (!allocation.To(&prop)) return allocation;
- }
- JSObject::cast(clone)->set_raw_properties_or_hash(prop,
- SKIP_WRITE_BARRIER);
- }
- } else {
- FixedArray* properties = FixedArray::cast(source->property_dictionary());
- FixedArray* prop = nullptr;
- {
- AllocationResult allocation = CopyFixedArray(properties);
- if (!allocation.To(&prop)) return allocation;
- }
- JSObject::cast(clone)->set_raw_properties_or_hash(prop, SKIP_WRITE_BARRIER);
- }
- // Return the new clone.
- return clone;
-}
-
-
-static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
- int len) {
- // Only works for one byte strings.
- DCHECK(vector.length() == len);
- MemCopy(chars, vector.start(), len);
-}
-
-static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
- int len) {
- unibrow::Utf8Iterator it = unibrow::Utf8Iterator(vector);
- while (!it.Done()) {
- DCHECK_GT(len, 0);
- len -= 1;
-
- uint16_t c = *it;
- ++it;
- DCHECK_NE(unibrow::Utf8::kBadChar, c);
- *chars++ = c;
- }
- DCHECK_EQ(len, 0);
-}
-
-
-static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
- DCHECK(s->length() == len);
- String::WriteToFlat(s, chars, 0, len);
-}
-
-
-static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
- DCHECK(s->length() == len);
- String::WriteToFlat(s, chars, 0, len);
-}
-
-
-template <bool is_one_byte, typename T>
-AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
- uint32_t hash_field) {
- DCHECK_LE(0, chars);
- // Compute map and object size.
- int size;
- Map* map;
-
- DCHECK_LE(0, chars);
- DCHECK_GE(String::kMaxLength, chars);
- if (is_one_byte) {
- map = one_byte_internalized_string_map();
- size = SeqOneByteString::SizeFor(chars);
- } else {
- map = internalized_string_map();
- size = SeqTwoByteString::SizeFor(chars);
- }
-
- // Allocate string.
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(chars);
- answer->set_hash_field(hash_field);
-
- DCHECK_EQ(size, answer->Size());
-
- if (is_one_byte) {
- WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
- } else {
- WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
- }
- return answer;
-}
-
-
-// Need explicit instantiations.
-template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
- int,
- uint32_t);
-template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
- int,
- uint32_t);
-template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
- Vector<const char>, int, uint32_t);
-
-
-AllocationResult Heap::AllocateRawOneByteString(int length,
- PretenureFlag pretenure) {
- DCHECK_LE(0, length);
- DCHECK_GE(String::kMaxLength, length);
- int size = SeqOneByteString::SizeFor(length);
- DCHECK_GE(SeqOneByteString::kMaxSize, size);
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
-
- // Partially initialize the object.
- result->set_map_after_allocation(one_byte_string_map(), SKIP_WRITE_BARRIER);
- String::cast(result)->set_length(length);
- String::cast(result)->set_hash_field(String::kEmptyHashField);
- DCHECK_EQ(size, HeapObject::cast(result)->Size());
-
- return result;
-}
-
-
-AllocationResult Heap::AllocateRawTwoByteString(int length,
- PretenureFlag pretenure) {
- DCHECK_LE(0, length);
- DCHECK_GE(String::kMaxLength, length);
- int size = SeqTwoByteString::SizeFor(length);
- DCHECK_GE(SeqTwoByteString::kMaxSize, size);
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
-
- // Partially initialize the object.
- result->set_map_after_allocation(string_map(), SKIP_WRITE_BARRIER);
- String::cast(result)->set_length(length);
- String::cast(result)->set_hash_field(String::kEmptyHashField);
- DCHECK_EQ(size, HeapObject::cast(result)->Size());
- return result;
-}
-
-
-AllocationResult Heap::AllocateEmptyFixedArray() {
- int size = FixedArray::SizeFor(0);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- // Initialize the object.
- result->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
- FixedArray::cast(result)->set_length(0);
- return result;
-}
-
-AllocationResult Heap::AllocateEmptyScopeInfo() {
- int size = FixedArray::SizeFor(0);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- // Initialize the object.
- result->set_map_after_allocation(scope_info_map(), SKIP_WRITE_BARRIER);
- FixedArray::cast(result)->set_length(0);
- return result;
-}
-
-AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
- if (!InNewSpace(src)) {
- return src;
- }
-
- int len = src->length();
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
- FixedArray* result = FixedArray::cast(obj);
- result->set_length(len);
-
- // Copy the content.
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
-
- // TODO(mvstanton): The map is set twice because of protection against calling
- // set() on a COW FixedArray. Issue v8:3221 created to track this, and
- // we might then be able to remove this whole method.
- HeapObject::cast(obj)->set_map_after_allocation(fixed_cow_array_map(),
- SKIP_WRITE_BARRIER);
- return result;
-}
-
-
-AllocationResult Heap::AllocateEmptyFixedTypedArray(
- ExternalArrayType array_type) {
- return AllocateFixedTypedArray(0, array_type, false, TENURED);
-}
-
-namespace {
-template <typename T>
-void initialize_length(T* array, int length) {
- array->set_length(length);
-}
-
-template <>
-void initialize_length<PropertyArray>(PropertyArray* array, int length) {
- array->initialize_length(length);
-}
-
-} // namespace
-
-template <typename T>
-AllocationResult Heap::CopyArrayAndGrow(T* src, int grow_by,
- PretenureFlag pretenure) {
- int old_len = src->length();
- int new_len = old_len + grow_by;
- DCHECK(new_len >= old_len);
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
- if (!allocation.To(&obj)) return allocation;
- }
-
- obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
- T* result = T::cast(obj);
- initialize_length(result, new_len);
-
- // Copy the content.
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
- MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
- return result;
-}
-
-template AllocationResult Heap::CopyArrayAndGrow(FixedArray* src, int grow_by,
- PretenureFlag pretenure);
-template AllocationResult Heap::CopyArrayAndGrow(PropertyArray* src,
- int grow_by,
- PretenureFlag pretenure);
-
-AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
- PretenureFlag pretenure) {
- if (new_len == 0) return empty_fixed_array();
-
- DCHECK_LE(new_len, src->length());
-
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
-
- FixedArray* result = FixedArray::cast(obj);
- result->set_length(new_len);
-
- // Copy the content.
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < new_len; i++) result->set(i, src->get(i), mode);
- return result;
-}
-
-template <typename T>
-AllocationResult Heap::CopyArrayWithMap(T* src, Map* map) {
- int len = src->length();
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
-
- T* result = T::cast(obj);
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-
- // Eliminate the write barrier if possible.
- if (mode == SKIP_WRITE_BARRIER) {
- CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
- T::SizeFor(len) - kPointerSize);
- return obj;
- }
-
- // Slow case: Just copy the content one-by-one.
- initialize_length(result, len);
- for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
- return result;
-}
-
-template AllocationResult Heap::CopyArrayWithMap(FixedArray* src, Map* map);
-template AllocationResult Heap::CopyArrayWithMap(PropertyArray* src, Map* map);
-
-AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
- return CopyArrayWithMap(src, map);
-}
-
-AllocationResult Heap::CopyPropertyArray(PropertyArray* src) {
- return CopyArrayWithMap(src, property_array_map());
-}
-
-AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
- Map* map) {
- int len = src->length();
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
- src->address() + FixedDoubleArray::kLengthOffset,
- FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
- return obj;
-}
-
-AllocationResult Heap::CopyFeedbackVector(FeedbackVector* src) {
- int len = src->length();
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFeedbackVector(len, NOT_TENURED);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_after_allocation(feedback_vector_map(), SKIP_WRITE_BARRIER);
-
- FeedbackVector* result = FeedbackVector::cast(obj);
-
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-
- // Eliminate the write barrier if possible.
- if (mode == SKIP_WRITE_BARRIER) {
- CopyBlock(result->address() + kPointerSize,
- result->address() + kPointerSize,
- FeedbackVector::SizeFor(len) - kPointerSize);
- return result;
- }
-
- // Slow case: Just copy the content one-by-one.
- result->set_shared_function_info(src->shared_function_info());
- result->set_optimized_code_cell(src->optimized_code_cell());
- result->set_invocation_count(src->invocation_count());
- result->set_profiler_ticks(src->profiler_ticks());
- result->set_deopt_count(src->deopt_count());
- for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
- return result;
-}
-
-AllocationResult Heap::AllocateRawFixedArray(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
- }
- int size = FixedArray::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
-
- AllocationResult result = AllocateRaw(size, space);
- if (!result.IsRetry() && size > kMaxRegularHeapObjectSize &&
- FLAG_use_marking_progress_bar) {
- MemoryChunk* chunk =
- MemoryChunk::FromAddress(result.ToObjectChecked()->address());
- chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
- }
- return result;
-}
-
-AllocationResult Heap::AllocateFixedArrayWithFiller(
- RootListIndex map_root_index, int length, PretenureFlag pretenure,
- Object* filler) {
- // Zero-length case must be handled outside, where the knowledge about
- // the map is.
- DCHECK_LT(0, length);
- DCHECK(!InNewSpace(filler));
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
- if (!allocation.To(&result)) return allocation;
- }
- DCHECK(RootIsImmortalImmovable(map_root_index));
- Map* map = Map::cast(root(map_root_index));
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- FixedArray* array = FixedArray::cast(result);
- array->set_length(length);
- MemsetPointer(array->data_start(), filler, length);
- return array;
-}
-
-AllocationResult Heap::AllocatePropertyArray(int length,
- PretenureFlag pretenure) {
- // Allow length = 0 for the empty_property_array singleton.
- DCHECK_LE(0, length);
- DCHECK_IMPLIES(length == 0, pretenure == TENURED);
-
- DCHECK(!InNewSpace(undefined_value()));
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(property_array_map(), SKIP_WRITE_BARRIER);
- PropertyArray* array = PropertyArray::cast(result);
- array->initialize_length(length);
- MemsetPointer(array->data_start(), undefined_value(), length);
- return result;
-}
-
-AllocationResult Heap::AllocateUninitializedFixedArray(
- int length, PretenureFlag pretenure) {
- if (length == 0) return empty_fixed_array();
-
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
- if (!allocation.To(&obj)) return allocation;
+ // Notify the heap object allocation tracker of change in object layout. The
+ // array may not be moved during GC, and size has to be adjusted nevertheless.
+ for (auto& tracker : allocation_trackers_) {
+ tracker->UpdateObjectSizeEvent(object->address(), object->Size());
}
-
- obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
- FixedArray::cast(obj)->set_length(length);
- return obj;
}
-
-AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
- int length, PretenureFlag pretenure) {
- if (length == 0) return empty_fixed_array();
-
- HeapObject* elements = nullptr;
- AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
- if (!allocation.To(&elements)) return allocation;
-
- elements->set_map_after_allocation(fixed_double_array_map(),
- SKIP_WRITE_BARRIER);
- FixedDoubleArray::cast(elements)->set_length(length);
- return elements;
-}
-
-
-AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
- }
- int size = FixedDoubleArray::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* object = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
- if (!allocation.To(&object)) return allocation;
- }
-
- return object;
-}
-
-AllocationResult Heap::AllocateRawFeedbackVector(int length,
- PretenureFlag pretenure) {
- DCHECK_LE(0, length);
-
- int size = FeedbackVector::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* object = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&object)) return allocation;
- }
-
- return object;
-}
-
-AllocationResult Heap::AllocateFeedbackVector(SharedFunctionInfo* shared,
- PretenureFlag pretenure) {
- int length = shared->feedback_metadata()->slot_count();
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRawFeedbackVector(length, pretenure);
- if (!allocation.To(&result)) return allocation;
- }
-
- // Initialize the object's map.
- result->set_map_after_allocation(feedback_vector_map(), SKIP_WRITE_BARRIER);
- FeedbackVector* vector = FeedbackVector::cast(result);
- vector->set_shared_function_info(shared);
- vector->set_optimized_code_cell(Smi::FromEnum(
- FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
- : OptimizationMarker::kNone));
- vector->set_length(length);
- vector->set_invocation_count(0);
- vector->set_profiler_ticks(0);
- vector->set_deopt_count(0);
- // TODO(leszeks): Initialize based on the feedback metadata.
- MemsetPointer(vector->slots_start(), undefined_value(), length);
- return vector;
-}
-
-AllocationResult Heap::AllocateSymbol() {
- // Statically ensure that it is safe to allocate symbols in paged spaces.
- STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
-
- HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
-
- result->set_map_after_allocation(symbol_map(), SKIP_WRITE_BARRIER);
-
- // Generate a random hash value.
- int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
-
- Symbol::cast(result)
- ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
- Symbol::cast(result)->set_name(undefined_value());
- Symbol::cast(result)->set_flags(0);
-
- DCHECK(!Symbol::cast(result)->is_private());
- return result;
-}
-
-AllocationResult Heap::AllocateStruct(InstanceType type,
- PretenureFlag pretenure) {
- Map* map;
- switch (type) {
-#define MAKE_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
- map = name##_map(); \
- break;
- STRUCT_LIST(MAKE_CASE)
-#undef MAKE_CASE
- default:
- UNREACHABLE();
- }
- int size = map->instance_size();
- Struct* result = nullptr;
- {
- AllocationSpace space = SelectSpace(pretenure);
- AllocationResult allocation = Allocate(map, space);
- if (!allocation.To(&result)) return allocation;
- }
- result->InitializeBody(size);
- return result;
-}
-
-
void Heap::MakeHeapIterable() {
mark_compact_collector()->EnsureSweepingCompleted();
}
@@ -4191,6 +2971,34 @@ bool Heap::HasLowAllocationRate() {
HasLowOldGenerationAllocationRate();
}
+bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
+ double mutator_utilization) {
+ const double kHighHeapPercentage = 0.8;
+ const double kLowMutatorUtilization = 0.4;
+ return old_generation_size >=
+ kHighHeapPercentage * max_old_generation_size_ &&
+ mutator_utilization < kLowMutatorUtilization;
+}
+
+void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
+ double mutator_utilization) {
+ const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
+ if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
+ if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
+ consecutive_ineffective_mark_compacts_ = 0;
+ return;
+ }
+ ++consecutive_ineffective_mark_compacts_;
+ if (consecutive_ineffective_mark_compacts_ ==
+ kMaxConsecutiveIneffectiveMarkCompacts) {
+ if (InvokeNearHeapLimitCallback()) {
+ // The callback increased the heap limit.
+ consecutive_ineffective_mark_compacts_ = 0;
+ return;
+ }
+ FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
+ }
+}
bool Heap::HasHighFragmentation() {
size_t used = PromotedSpaceSizeOfObjects();
@@ -4207,8 +3015,9 @@ bool Heap::HasHighFragmentation(size_t used, size_t committed) {
}
bool Heap::ShouldOptimizeForMemoryUsage() {
+ const size_t kOldGenerationSlack = max_old_generation_size_ / 8;
return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
- HighMemoryPressure();
+ HighMemoryPressure() || !CanExpandOldGeneration(kOldGenerationSlack);
}
void Heap::ActivateMemoryReducerIfNeeded() {
@@ -4328,17 +3137,22 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
class SlotCollectingVisitor final : public ObjectVisitor {
public:
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
+ VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
+ }
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ for (MaybeObject** p = start; p < end; p++) {
slots_.push_back(p);
}
}
int number_of_slots() { return static_cast<int>(slots_.size()); }
- Object** slot(int i) { return slots_[i]; }
+ MaybeObject** slot(int i) { return slots_[i]; }
private:
- std::vector<Object**> slots_;
+ std::vector<MaybeObject**> slots_;
};
void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
@@ -4596,16 +3410,43 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
}
}
-void Heap::SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
- void* data) {
- out_of_memory_callback_ = callback;
- out_of_memory_callback_data_ = data;
+void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
+ void* data) {
+ const size_t kMaxCallbacks = 100;
+ CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
+ for (auto callback_data : near_heap_limit_callbacks_) {
+ CHECK_NE(callback_data.first, callback);
+ }
+ near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
+}
+
+void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
+ size_t heap_limit) {
+ for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
+ if (near_heap_limit_callbacks_[i].first == callback) {
+ near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
+ if (heap_limit) {
+ RestoreHeapLimit(heap_limit);
+ }
+ return;
+ }
+ }
+ UNREACHABLE();
}
-void Heap::InvokeOutOfMemoryCallback() {
- if (out_of_memory_callback_) {
- out_of_memory_callback_(out_of_memory_callback_data_);
+bool Heap::InvokeNearHeapLimitCallback() {
+ if (near_heap_limit_callbacks_.size() > 0) {
+ v8::NearHeapLimitCallback callback =
+ near_heap_limit_callbacks_.back().first;
+ void* data = near_heap_limit_callbacks_.back().second;
+ size_t heap_limit = callback(data, max_old_generation_size_,
+ initial_max_old_generation_size_);
+ if (heap_limit > max_old_generation_size_) {
+ max_old_generation_size_ = heap_limit;
+ return true;
+ }
}
+ return false;
}
void Heap::CollectCodeStatistics() {
@@ -4695,7 +3536,7 @@ bool Heap::Contains(HeapObject* value) {
return HasBeenSetUp() &&
(new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
code_space_->Contains(value) || map_space_->Contains(value) ||
- lo_space_->Contains(value));
+ lo_space_->Contains(value) || read_only_space_->Contains(value));
}
bool Heap::ContainsSlow(Address addr) {
@@ -4705,7 +3546,8 @@ bool Heap::ContainsSlow(Address addr) {
return HasBeenSetUp() &&
(new_space_->ToSpaceContainsSlow(addr) ||
old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
- map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
+ map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr) ||
+ read_only_space_->Contains(addr));
}
bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
@@ -4725,6 +3567,8 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
return map_space_->Contains(value);
case LO_SPACE:
return lo_space_->Contains(value);
+ case RO_SPACE:
+ return read_only_space_->Contains(value);
}
UNREACHABLE();
}
@@ -4746,6 +3590,8 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
return map_space_->ContainsSlow(addr);
case LO_SPACE:
return lo_space_->ContainsSlow(addr);
+ case RO_SPACE:
+ return read_only_space_->ContainsSlow(addr);
}
UNREACHABLE();
}
@@ -4758,6 +3604,7 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
case CODE_SPACE:
case MAP_SPACE:
case LO_SPACE:
+ case RO_SPACE:
return true;
default:
return false;
@@ -4783,6 +3630,22 @@ bool Heap::RootIsImmortalImmovable(int root_index) {
}
#ifdef VERIFY_HEAP
+class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
+ protected:
+ void VerifyPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ VerifyPointersVisitor::VerifyPointers(host, start, end);
+
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ CHECK(
+ object->GetIsolate()->heap()->read_only_space()->Contains(object));
+ }
+ }
+ }
+};
+
void Heap::Verify() {
CHECK(HasBeenSetUp());
HandleScope scope(isolate());
@@ -4806,7 +3669,8 @@ void Heap::Verify() {
lo_space_->Verify();
- mark_compact_collector()->VerifyWeakEmbeddedObjectsInCode();
+ VerifyReadOnlyPointersVisitor read_only_visitor;
+ read_only_space_->Verify(&read_only_visitor);
}
class SlotVerifyingVisitor : public ObjectVisitor {
@@ -4815,10 +3679,22 @@ class SlotVerifyingVisitor : public ObjectVisitor {
std::set<std::pair<SlotType, Address> >* typed)
: untyped_(untyped), typed_(typed) {}
- virtual bool ShouldHaveBeenRecorded(HeapObject* host, Object* target) = 0;
+ virtual bool ShouldHaveBeenRecorded(HeapObject* host,
+ MaybeObject* target) = 0;
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+#ifdef DEBUG
for (Object** slot = start; slot < end; slot++) {
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ }
+#endif // DEBUG
+ VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
+ }
+
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ for (MaybeObject** slot = start; slot < end; slot++) {
if (ShouldHaveBeenRecorded(host, *slot)) {
CHECK_GT(untyped_->count(reinterpret_cast<Address>(slot)), 0);
}
@@ -4827,7 +3703,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (ShouldHaveBeenRecorded(host, target)) {
+ if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(
InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
@@ -4837,7 +3713,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
Object* target = rinfo->target_object();
- if (ShouldHaveBeenRecorded(host, target)) {
+ if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
@@ -4858,10 +3734,11 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
std::set<std::pair<SlotType, Address> >* typed)
: SlotVerifyingVisitor(untyped, typed), heap_(heap) {}
- bool ShouldHaveBeenRecorded(HeapObject* host, Object* target) override {
- DCHECK_IMPLIES(target->IsHeapObject() && heap_->InNewSpace(target),
- heap_->InToSpace(target));
- return target->IsHeapObject() && heap_->InNewSpace(target) &&
+ bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject* target) override {
+ DCHECK_IMPLIES(
+ target->IsStrongOrWeakHeapObject() && heap_->InNewSpace(target),
+ heap_->InToSpace(target));
+ return target->IsStrongOrWeakHeapObject() && heap_->InNewSpace(target) &&
!heap_->InNewSpace(host);
}
@@ -4938,6 +3815,14 @@ void Heap::ZapFromSpace() {
}
}
+void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
+#ifdef DEBUG
+ for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+ reinterpret_cast<Object**>(start_address)[i] = Smi::FromInt(kCodeZapValue);
+ }
+#endif
+}
+
void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
@@ -5128,7 +4013,7 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
// Overwrite default configuration.
if (max_semi_space_size_in_kb != 0) {
max_semi_space_size_ =
- ROUND_UP(max_semi_space_size_in_kb * KB, Page::kPageSize);
+ RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
}
if (max_old_generation_size_in_mb != 0) {
max_old_generation_size_ = max_old_generation_size_in_mb * MB;
@@ -5144,9 +4029,9 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
}
if (Page::kPageSize > MB) {
- max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
+ max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
max_old_generation_size_ =
- ROUND_UP(max_old_generation_size_, Page::kPageSize);
+ RoundUp<Page::kPageSize>(max_old_generation_size_);
}
if (FLAG_stress_compaction) {
@@ -5178,7 +4063,7 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
}
} else {
initial_semispace_size_ =
- ROUND_UP(initial_semispace_size, Page::kPageSize);
+ RoundUp<Page::kPageSize>(initial_semispace_size);
}
}
@@ -5189,7 +4074,8 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
}
// The old generation is paged and needs at least one page for each space.
- int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+ int paged_space_count =
+ LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
initial_max_old_generation_size_ = max_old_generation_size_ =
Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
max_old_generation_size_);
@@ -5593,6 +4479,73 @@ void Heap::DisableInlineAllocation() {
}
}
+HeapObject* Heap::AllocateRawWithRetry(int size, AllocationSpace space,
+ AllocationAlignment alignment) {
+ AllocationResult alloc = AllocateRaw(size, space, alignment);
+ HeapObject* result;
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ // Two GCs before panicking. In newspace will almost always succeed.
+ for (int i = 0; i < 2; i++) {
+ CollectGarbage(alloc.RetrySpace(),
+ GarbageCollectionReason::kAllocationFailure);
+ alloc = AllocateRaw(size, space, alignment);
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ }
+ isolate()->counters()->gc_last_resort_from_handles()->Increment();
+ CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
+ {
+ AlwaysAllocateScope scope(isolate());
+ alloc = AllocateRaw(size, space, alignment);
+ }
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ // TODO(1181417): Fix this.
+ FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
+ return nullptr;
+}
+
+// TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
+// parameter and just do what's necessary.
+HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
+ AllocationResult alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ HeapObject* result;
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ // Two GCs before panicking.
+ for (int i = 0; i < 2; i++) {
+ CollectGarbage(alloc.RetrySpace(),
+ GarbageCollectionReason::kAllocationFailure);
+ alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ }
+ isolate()->counters()->gc_last_resort_from_handles()->Increment();
+ CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
+ {
+ AlwaysAllocateScope scope(isolate());
+ alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ }
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ // TODO(1181417): Fix this.
+ FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
+ return nullptr;
+}
+
bool Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
@@ -5622,7 +4575,8 @@ bool Heap::SetUp() {
mark_compact_collector_ = new MarkCompactCollector(this);
incremental_marking_ =
- new IncrementalMarking(this, mark_compact_collector_->marking_worklist());
+ new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
+ mark_compact_collector_->weak_objects());
if (FLAG_concurrent_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
@@ -5660,6 +4614,10 @@ bool Heap::SetUp() {
space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (!lo_space_->SetUp()) return false;
+ space_[RO_SPACE] = read_only_space_ =
+ new ReadOnlySpace(this, RO_SPACE, NOT_EXECUTABLE);
+ if (!read_only_space_->SetUp()) return false;
+
// Set up the seed that is used to randomize the string hash function.
DCHECK_EQ(Smi::kZero, hash_seed());
if (FLAG_randomize_hashes) InitializeHashSeed();
@@ -5670,7 +4628,11 @@ bool Heap::SetUp() {
}
tracer_ = new GCTracer(this);
+#ifdef ENABLE_MINOR_MC
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
+#else
+ minor_mark_compact_collector_ = nullptr;
+#endif // ENABLE_MINOR_MC
array_buffer_collector_ = new ArrayBufferCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);
@@ -5687,9 +4649,11 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
mark_compact_collector()->SetUp();
+#ifdef ENABLE_MINOR_MC
if (minor_mark_compact_collector() != nullptr) {
minor_mark_compact_collector()->SetUp();
}
+#endif // ENABLE_MINOR_MC
idle_scavenge_observer_ = new IdleScavengeObserver(
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
@@ -5711,6 +4675,8 @@ bool Heap::SetUp() {
write_protect_code_memory_ = FLAG_write_protect_code_memory;
+ external_reference_table_.Init(isolate_);
+
return true;
}
@@ -5822,9 +4788,10 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
}
}
+void Heap::StartTearDown() { SetGCState(TEAR_DOWN); }
+
void Heap::TearDown() {
- SetGCState(TEAR_DOWN);
- DCHECK(!use_tasks_);
+ DCHECK_EQ(gc_state_, TEAR_DOWN);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -5868,11 +4835,13 @@ void Heap::TearDown() {
mark_compact_collector_ = nullptr;
}
+#ifdef ENABLE_MINOR_MC
if (minor_mark_compact_collector_ != nullptr) {
minor_mark_compact_collector_->TearDown();
delete minor_mark_compact_collector_;
minor_mark_compact_collector_ = nullptr;
}
+#endif // ENABLE_MINOR_MC
if (array_buffer_collector_ != nullptr) {
delete array_buffer_collector_;
@@ -5914,6 +4883,11 @@ void Heap::TearDown() {
external_string_table_.TearDown();
+ // Tear down all ArrayBuffers before tearing down the heap since their
+ // byte_length may be a HeapNumber which is required for freeing the backing
+ // store.
+ ArrayBufferTracker::TearDown(this);
+
delete tracer_;
tracer_ = nullptr;
@@ -5942,6 +4916,11 @@ void Heap::TearDown() {
lo_space_ = nullptr;
}
+ if (read_only_space_ != nullptr) {
+ delete read_only_space_;
+ read_only_space_ = nullptr;
+ }
+
store_buffer()->TearDown();
memory_allocator()->TearDown();
@@ -6006,75 +4985,48 @@ void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
UNREACHABLE();
}
-// TODO(ishell): Find a better place for this.
-void Heap::AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
- Handle<WeakCell> code) {
- DCHECK(InNewSpace(*obj));
- DCHECK(!InNewSpace(*code));
- Handle<ArrayList> list(weak_new_space_object_to_code_list(), isolate());
- list = ArrayList::Add(list, isolate()->factory()->NewWeakCell(obj), code);
- if (*list != weak_new_space_object_to_code_list()) {
- set_weak_new_space_object_to_code_list(*list);
- }
-}
-
-// TODO(ishell): Find a better place for this.
-void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
- Handle<DependentCode> dep) {
- DCHECK(!InNewSpace(*obj));
- DCHECK(!InNewSpace(*dep));
- Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
- table = WeakHashTable::Put(table, obj, dep);
- if (*table != weak_object_to_code_table())
- set_weak_object_to_code_table(*table);
- DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
-}
-
-
-DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
- Object* dep = weak_object_to_code_table()->Lookup(obj);
- if (dep->IsDependentCode()) return DependentCode::cast(dep);
- return DependentCode::cast(empty_fixed_array());
-}
-
namespace {
-void CompactWeakFixedArray(Object* object) {
- if (object->IsWeakFixedArray()) {
- WeakFixedArray* array = WeakFixedArray::cast(object);
- array->Compact<WeakFixedArray::NullCallback>();
+void CompactFixedArrayOfWeakCells(Object* object) {
+ if (object->IsFixedArrayOfWeakCells()) {
+ FixedArrayOfWeakCells* array = FixedArrayOfWeakCells::cast(object);
+ array->Compact<FixedArrayOfWeakCells::NullCallback>();
}
}
} // anonymous namespace
-void Heap::CompactWeakFixedArrays() {
- // Find known WeakFixedArrays and compact them.
+void Heap::CompactFixedArraysOfWeakCells() {
+ // Find known FixedArrayOfWeakCells and compact them.
HeapIterator iterator(this);
for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
if (o->IsPrototypeInfo()) {
Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
- if (prototype_users->IsWeakFixedArray()) {
- WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
+ if (prototype_users->IsFixedArrayOfWeakCells()) {
+ FixedArrayOfWeakCells* array =
+ FixedArrayOfWeakCells::cast(prototype_users);
array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
}
}
}
- CompactWeakFixedArray(noscript_shared_function_infos());
- CompactWeakFixedArray(script_list());
- CompactWeakFixedArray(weak_stack_trace_list());
+ CompactFixedArrayOfWeakCells(noscript_shared_function_infos());
+ CompactFixedArrayOfWeakCells(script_list());
+ CompactFixedArrayOfWeakCells(weak_stack_trace_list());
}
void Heap::AddRetainedMap(Handle<Map> map) {
+ if (map->is_in_retained_map_list()) {
+ return;
+ }
Handle<WeakCell> cell = Map::WeakCellForMap(map);
Handle<ArrayList> array(retained_maps(), isolate());
if (array->IsFull()) {
CompactRetainedMaps(*array);
}
array = ArrayList::Add(
- array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
- ArrayList::kReloadLengthAfterAllocation);
+ array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
if (*array != retained_maps()) {
set_retained_maps(*array);
}
+ map->set_is_in_retained_map_list(true);
}
@@ -6106,8 +5058,8 @@ void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
if (new_length != length) retained_maps->SetLength(new_length);
}
-void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
- v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom);
+void Heap::FatalProcessOutOfMemory(const char* location) {
+ v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
}
#ifdef DEBUG
@@ -6211,6 +5163,10 @@ void Heap::RecordWritesIntoCode(Code* code) {
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
+ case RO_SPACE:
+ // skip NEW_SPACE
+ counter_++;
+ return heap_->read_only_space();
case OLD_SPACE:
return heap_->old_space();
case CODE_SPACE:
@@ -6285,12 +5241,19 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
void VisitPointers(HeapObject* host, Object** start,
Object** end) override {
+ MarkPointers(reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
+ }
+
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
MarkPointers(start, end);
}
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override {
- MarkPointers(start, end);
+ MarkPointers(reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
}
void TransitiveClosure() {
@@ -6302,12 +5265,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
private:
- void MarkPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if (!(*p)->IsHeapObject()) continue;
- HeapObject* obj = HeapObject::cast(*p);
- if (filter_->MarkAsReachable(obj)) {
- marking_stack_.push_back(obj);
+ void MarkPointers(MaybeObject** start, MaybeObject** end) {
+ // Treat weak references as strong.
+ for (MaybeObject** p = start; p < end; p++) {
+ HeapObject* heap_object;
+ if ((*p)->ToStrongOrWeakHeapObject(&heap_object)) {
+ if (filter_->MarkAsReachable(heap_object)) {
+ marking_stack_.push_back(heap_object);
+ }
}
}
}
@@ -6606,6 +5571,8 @@ const char* AllocationSpaceName(AllocationSpace space) {
return "MAP_SPACE";
case LO_SPACE:
return "LO_SPACE";
+ case RO_SPACE:
+ return "RO_SPACE";
default:
UNREACHABLE();
}
@@ -6614,23 +5581,32 @@ const char* AllocationSpaceName(AllocationSpace space) {
void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
Object** end) {
- VerifyPointers(start, end);
+ VerifyPointers(host, reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
+}
+
+void VerifyPointersVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) {
+ VerifyPointers(host, start, end);
}
void VerifyPointersVisitor::VisitRootPointers(Root root,
const char* description,
Object** start, Object** end) {
- VerifyPointers(start, end);
+ VerifyPointers(nullptr, reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
}
-void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
+void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
+ MaybeObject** start,
+ MaybeObject** end) {
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ if ((*current)->ToStrongOrWeakHeapObject(&object)) {
CHECK(object->GetIsolate()->heap()->Contains(object));
CHECK(object->map()->IsMap());
} else {
- CHECK((*current)->IsSmi());
+ CHECK((*current)->IsSmi() || (*current)->IsClearedWeakHeapObject());
}
}
}
@@ -6668,6 +5644,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
case LO_SPACE:
+ case RO_SPACE:
return false;
}
UNREACHABLE();
@@ -6720,9 +5697,7 @@ bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
Map* map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == code->GetHeap()->code_map());
#ifdef V8_EMBEDDED_BUILTINS
- if (FLAG_stress_off_heap_code) {
- if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
- }
+ if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
#endif
Address start = code->address();
Address end = code->address() + code->SizeFromMap(map);
@@ -6731,10 +5706,8 @@ bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
#ifdef V8_EMBEDDED_BUILTINS
- if (FLAG_stress_off_heap_code) {
- Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
- if (code != nullptr) return code;
- }
+ Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
+ if (code != nullptr) return code;
#endif
// Check if the inner pointer points into a large object chunk.
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 63bcfb2990..cdd44f7a15 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -8,6 +8,7 @@
#include <cmath>
#include <map>
#include <unordered_map>
+#include <unordered_set>
#include <vector>
// Clients of this interface shouldn't depend on lots of heap internals.
@@ -17,6 +18,7 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/base/atomic-utils.h"
+#include "src/external-reference-table.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
#include "src/objects.h"
@@ -38,6 +40,7 @@ class HeapTester;
class TestMemoryAllocatorScope;
} // namespace heap
+class BoilerplateDescription;
class BytecodeArray;
class CodeDataContainer;
class DeoptimizationData;
@@ -87,7 +90,7 @@ using v8::MemoryPressureLevel;
/* Entries beyond the first 32 */ \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
- /* being compacted. */ \
+ /* being compacted.*/ \
/* Oddballs */ \
V(Oddball, arguments_marker, ArgumentsMarker) \
V(Oddball, exception, Exception) \
@@ -105,31 +108,36 @@ using v8::MemoryPressureLevel;
V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
V(Map, script_context_table_map, ScriptContextTableMap) \
/* Maps */ \
- V(Map, descriptor_array_map, DescriptorArrayMap) \
+ V(Map, feedback_metadata_map, FeedbackMetadataArrayMap) \
V(Map, array_list_map, ArrayListMap) \
+ V(Map, bigint_map, BigIntMap) \
+ V(Map, boilerplate_description_map, BoilerplateDescriptionMap) \
+ V(Map, bytecode_array_map, BytecodeArrayMap) \
+ V(Map, code_data_container_map, CodeDataContainerMap) \
+ V(Map, descriptor_array_map, DescriptorArrayMap) \
+ V(Map, external_map, ExternalMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(Map, global_dictionary_map, GlobalDictionaryMap) \
+ V(Map, many_closures_cell_map, ManyClosuresCellMap) \
+ V(Map, message_object_map, JSMessageObjectMap) \
+ V(Map, module_info_map, ModuleInfoMap) \
V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
- V(Map, ordered_hash_map_map, OrderedHashMapMap) \
- V(Map, ordered_hash_set_map, OrderedHashSetMap) \
V(Map, name_dictionary_map, NameDictionaryMap) \
- V(Map, global_dictionary_map, GlobalDictionaryMap) \
+ V(Map, no_closures_cell_map, NoClosuresCellMap) \
V(Map, number_dictionary_map, NumberDictionaryMap) \
+ V(Map, one_closure_cell_map, OneClosureCellMap) \
+ V(Map, ordered_hash_map_map, OrderedHashMapMap) \
+ V(Map, ordered_hash_set_map, OrderedHashSetMap) \
+ V(Map, property_array_map, PropertyArrayMap) \
+ V(Map, side_effect_call_handler_info_map, SideEffectCallHandlerInfoMap) \
+ V(Map, side_effect_free_call_handler_info_map, \
+ SideEffectFreeCallHandlerInfoMap) \
V(Map, simple_number_dictionary_map, SimpleNumberDictionaryMap) \
- V(Map, string_table_map, StringTableMap) \
- V(Map, weak_hash_table_map, WeakHashTableMap) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
- V(Map, code_data_container_map, CodeDataContainerMap) \
- V(Map, message_object_map, JSMessageObjectMap) \
- V(Map, external_map, ExternalMap) \
- V(Map, bytecode_array_map, BytecodeArrayMap) \
- V(Map, module_info_map, ModuleInfoMap) \
- V(Map, no_closures_cell_map, NoClosuresCellMap) \
- V(Map, one_closure_cell_map, OneClosureCellMap) \
- V(Map, many_closures_cell_map, ManyClosuresCellMap) \
- V(Map, property_array_map, PropertyArrayMap) \
- V(Map, bigint_map, BigIntMap) \
+ V(Map, string_table_map, StringTableMap) \
+ V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -186,6 +194,8 @@ using v8::MemoryPressureLevel;
V(EnumCache, empty_enum_cache, EmptyEnumCache) \
V(PropertyArray, empty_property_array, EmptyPropertyArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
+ V(BoilerplateDescription, empty_boilerplate_description, \
+ EmptyBoilerplateDescription) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
@@ -206,20 +216,25 @@ using v8::MemoryPressureLevel;
EmptySlowElementDictionary) \
V(FixedArray, empty_ordered_hash_map, EmptyOrderedHashMap) \
V(FixedArray, empty_ordered_hash_set, EmptyOrderedHashSet) \
+ V(FeedbackMetadata, empty_feedback_metadata, EmptyFeedbackMetadata) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
+ V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
+ V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
/* Protectors */ \
V(Cell, array_constructor_protector, ArrayConstructorProtector) \
V(PropertyCell, no_elements_protector, NoElementsProtector) \
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
- V(PropertyCell, species_protector, SpeciesProtector) \
+ V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
+ V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
+ V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \
V(Cell, string_length_protector, StringLengthProtector) \
- V(Cell, fast_array_iteration_protector, FastArrayIterationProtector) \
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
V(PropertyCell, array_buffer_neutering_protector, \
ArrayBufferNeuteringProtector) \
V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
+ V(Cell, promise_resolve_protector, PromiseResolveProtector) \
V(PropertyCell, promise_then_protector, PromiseThenProtector) \
/* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
@@ -244,12 +259,6 @@ using v8::MemoryPressureLevel;
V(FixedArray, detached_contexts, DetachedContexts) \
V(HeapObject, retaining_path_targets, RetainingPathTargets) \
V(ArrayList, retained_maps, RetainedMaps) \
- V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
- /* weak_new_space_object_to_code_list is an array of weak cells, where */ \
- /* slots with even indices refer to the weak object, and the subsequent */ \
- /* slots refer to the code with the reference to the weak object. */ \
- V(ArrayList, weak_new_space_object_to_code_list, \
- WeakNewSpaceObjectToCodeList) \
/* Indirection lists for isolate-independent builtins */ \
V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) \
/* Feedback vectors that we need for code coverage or type profile */ \
@@ -275,6 +284,7 @@ using v8::MemoryPressureLevel;
V(Smi, stack_limit, StackLimit) \
V(Smi, real_stack_limit, RealStackLimit) \
V(Smi, last_script_id, LastScriptId) \
+ V(Smi, last_debugging_id, LastDebuggingId) \
V(Smi, hash_seed, HashSeed) \
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
@@ -299,15 +309,16 @@ using v8::MemoryPressureLevel;
V(ArgumentsMarkerMap) \
V(ArrayBufferNeuteringProtector) \
V(ArrayIteratorProtector) \
- V(NoElementsProtector) \
V(BigIntMap) \
V(BlockContextMap) \
+ V(BoilerplateDescriptionMap) \
V(BooleanMap) \
V(ByteArrayMap) \
V(BytecodeArrayMap) \
V(CatchContextMap) \
V(CellMap) \
V(CodeMap) \
+ V(DebugEvaluateContextMap) \
V(DescriptorArrayMap) \
V(EmptyByteArray) \
V(EmptyDescriptorArray) \
@@ -328,12 +339,10 @@ using v8::MemoryPressureLevel;
V(EmptyScript) \
V(EmptySloppyArgumentsElements) \
V(EmptySlowElementDictionary) \
- V(empty_string) \
V(EmptyWeakCell) \
V(EvalContextMap) \
V(Exception) \
V(FalseValue) \
- V(FastArrayIterationProtector) \
V(FixedArrayMap) \
V(FixedCOWArrayMap) \
V(FixedDoubleArrayMap) \
@@ -347,9 +356,9 @@ using v8::MemoryPressureLevel;
V(HoleNanValue) \
V(InfinityValue) \
V(IsConcatSpreadableProtector) \
+ V(JSMessageObjectMap) \
V(JsConstructEntryCode) \
V(JsEntryCode) \
- V(JSMessageObjectMap) \
V(ManyClosuresCell) \
V(ManyClosuresCellMap) \
V(MetaMap) \
@@ -362,6 +371,7 @@ using v8::MemoryPressureLevel;
V(NanValue) \
V(NativeContextMap) \
V(NoClosuresCellMap) \
+ V(NoElementsProtector) \
V(NullMap) \
V(NullValue) \
V(NumberDictionaryMap) \
@@ -373,12 +383,15 @@ using v8::MemoryPressureLevel;
V(PropertyArrayMap) \
V(ScopeInfoMap) \
V(ScriptContextMap) \
+ V(ScriptContextTableMap) \
V(SharedFunctionInfoMap) \
V(SimpleNumberDictionaryMap) \
V(SloppyArgumentsElementsMap) \
V(SmallOrderedHashMapMap) \
V(SmallOrderedHashSetMap) \
- V(SpeciesProtector) \
+ V(ArraySpeciesProtector) \
+ V(TypedArraySpeciesProtector) \
+ V(PromiseSpeciesProtector) \
V(StaleRegister) \
V(StringLengthProtector) \
V(StringTableMap) \
@@ -394,8 +407,9 @@ using v8::MemoryPressureLevel;
V(UninitializedMap) \
V(UninitializedValue) \
V(WeakCellMap) \
- V(WeakHashTableMap) \
+ V(WeakFixedArrayMap) \
V(WithContextMap) \
+ V(empty_string) \
PRIVATE_SYMBOL_LIST(V)
#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
@@ -412,6 +426,7 @@ class GCIdleTimeAction;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
+class HeapObjectAllocationTracker;
class HeapObjectsFilter;
class HeapStats;
class HistogramTimer;
@@ -442,6 +457,8 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo };
+enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
+
enum class FixedArrayVisitationMode { kRegular, kIncremental };
enum class TraceRetainingPathMode { kEnabled, kDisabled };
@@ -671,11 +688,7 @@ class Heap {
// given alignment.
static int GetFillToAlign(Address address, AllocationAlignment alignment);
- template <typename T>
- static inline bool IsOneByte(T t, int chars);
-
- static void FatalProcessOutOfMemory(const char* location,
- bool is_heap_oom = false);
+ void FatalProcessOutOfMemory(const char* location);
V8_EXPORT_PRIVATE static bool RootIsImmortalImmovable(int root_index);
@@ -704,7 +717,11 @@ class Heap {
}
static inline GarbageCollector YoungGenerationCollector() {
+#if ENABLE_MINOR_MC
return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
+#else
+ return SCAVENGER;
+#endif // ENABLE_MINOR_MC
}
static inline const char* CollectorName(GarbageCollector collector) {
@@ -748,9 +765,13 @@ class Heap {
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in
// the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
- // pass ClearRecordedSlots::kNo.
- V8_EXPORT_PRIVATE HeapObject* CreateFillerObjectAt(Address addr, int size,
- ClearRecordedSlots mode);
+ // pass ClearRecordedSlots::kNo. If the memory after the object header of
+ // the filler should be cleared, pass in kClearFreedMemory. The default is
+ // kDontClearFreedMemory.
+ V8_EXPORT_PRIVATE HeapObject* CreateFillerObjectAt(
+ Address addr, int size, ClearRecordedSlots clear_slots_mode,
+ ClearFreedMemoryMode clear_memory_mode =
+ ClearFreedMemoryMode::kDontClearFreedMemory);
bool CanMoveObjectStart(HeapObject* object);
@@ -819,8 +840,26 @@ class Heap {
code_space_memory_modification_scope_depth_--;
}
+ void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
+ void UnprotectAndRegisterMemoryChunk(HeapObject* object);
+ void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
+ V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
+
+ void EnableUnprotectedMemoryChunksRegistry() {
+ unprotected_memory_chunks_registry_enabled_ = true;
+ }
+
+ void DisableUnprotectedMemoryChunksRegistry() {
+ unprotected_memory_chunks_registry_enabled_ = false;
+ }
+
+ bool unprotected_memory_chunks_registry_enabled() {
+ return unprotected_memory_chunks_registry_enabled_;
+ }
+
inline HeapState gc_state() { return gc_state_; }
void SetGCState(HeapState state);
+ bool IsTearingDown() const { return gc_state_ == TEAR_DOWN; }
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
@@ -846,8 +885,9 @@ class Heap {
bool is_isolate_locked);
void CheckMemoryPressure();
- void SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
- void* data);
+ void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
+ void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
+ size_t heap_limit);
double MonotonicallyIncreasingTimeInMs();
@@ -867,6 +907,7 @@ class Heap {
inline uint32_t HashSeed();
inline int NextScriptId();
+ inline int NextDebuggingId();
inline int GetNextTemplateSerialNumber();
void SetSerializedObjects(FixedArray* objects);
@@ -889,15 +930,7 @@ class Heap {
external_memory_concurrently_freed_.SetValue(0);
}
- void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
- Handle<WeakCell> code);
-
- void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
- Handle<DependentCode> dep);
-
- DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
-
- void CompactWeakFixedArrays();
+ void CompactFixedArraysOfWeakCells();
void AddRetainedMap(Handle<Map> map);
@@ -925,28 +958,11 @@ class Heap {
return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
}
- size_t HeapLimitForDebugging() {
- const size_t kDebugHeapSizeFactor = 4;
- size_t max_limit = std::numeric_limits<size_t>::max() / 4;
- return Min(max_limit,
- initial_max_old_generation_size_ * kDebugHeapSizeFactor);
- }
-
- void IncreaseHeapLimitForDebugging() {
- max_old_generation_size_ =
- Max(max_old_generation_size_, HeapLimitForDebugging());
- }
-
- void RestoreOriginalHeapLimit() {
+ void RestoreHeapLimit(size_t heap_limit) {
// Do not set the limit lower than the live size + some slack.
size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
max_old_generation_size_ =
- Min(max_old_generation_size_,
- Max(initial_max_old_generation_size_, min_limit));
- }
-
- bool IsHeapLimitIncreasedForDebugging() {
- return max_old_generation_size_ == HeapLimitForDebugging();
+ Min(max_old_generation_size_, Max(heap_limit, min_limit));
}
// ===========================================================================
@@ -977,16 +993,15 @@ class Heap {
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
void CreateObjectStats();
+ // Sets the TearDown state, so no new GC tasks get posted.
+ void StartTearDown();
+
// Destroys all memory allocated by the heap.
void TearDown();
// Returns whether SetUp has been called.
bool HasBeenSetUp();
- void stop_using_tasks() { use_tasks_ = false; }
-
- bool use_tasks() const { return use_tasks_; }
-
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================
@@ -998,6 +1013,7 @@ class Heap {
OldSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
+ ReadOnlySpace* read_only_space() { return read_only_space_; }
inline PagedSpace* paged_space(int idx);
inline Space* space(int idx);
@@ -1080,6 +1096,15 @@ class Heap {
// Generated code can embed this address to get access to the roots.
Object** roots_array_start() { return roots_; }
+ ExternalReferenceTable* external_reference_table() {
+ DCHECK(external_reference_table_.is_initialized());
+ return &external_reference_table_;
+ }
+
+ static constexpr int roots_to_external_reference_table_offset() {
+ return kRootsExternalReferenceTableOffset;
+ }
+
// Sets the stub_cache_ (only used when expanding the dictionary).
void SetRootCodeStubs(SimpleNumberDictionary* value);
@@ -1116,9 +1141,7 @@ class Heap {
bool RootCanBeTreatedAsConstant(RootListIndex root_index);
Map* MapForFixedTypedArray(ExternalArrayType array_type);
- RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
-
- RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
+ Map* MapForFixedTypedArray(ElementsKind elements_kind);
FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
void RegisterStrongRoots(Object** start, Object** end);
@@ -1197,6 +1220,8 @@ class Heap {
// ===========================================================================
// Write barrier support for object[offset] = o;
+ inline void RecordWrite(Object* object, MaybeObject** slot,
+ MaybeObject* value);
inline void RecordWrite(Object* object, Object** slot, Object* value);
inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target);
@@ -1222,6 +1247,11 @@ class Heap {
// Incremental marking API. ==================================================
// ===========================================================================
+ int GCFlagsForIncrementalMarking() {
+ return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask
+ : kNoGCFlags;
+ }
+
// Start incremental marking and ensure that idle time handler can perform
// incremental steps.
void StartIdleIncrementalMarking(
@@ -1315,8 +1345,14 @@ class Heap {
// Returns whether the object resides in new space.
inline bool InNewSpace(Object* object);
+ inline bool InNewSpace(MaybeObject* object);
+ inline bool InNewSpace(HeapObject* heap_object);
inline bool InFromSpace(Object* object);
+ inline bool InFromSpace(MaybeObject* object);
+ inline bool InFromSpace(HeapObject* heap_object);
inline bool InToSpace(Object* object);
+ inline bool InToSpace(MaybeObject* object);
+ inline bool InToSpace(HeapObject* heap_object);
// Returns whether the object resides in old space.
inline bool InOldSpace(Object* object);
@@ -1373,9 +1409,7 @@ class Heap {
// ===========================================================================
// Returns the maximum amount of memory reserved for the heap.
- size_t MaxReserved() {
- return 2 * max_semi_space_size_ + max_old_generation_size_;
- }
+ size_t MaxReserved();
size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
@@ -1541,16 +1575,15 @@ class Heap {
// ===========================================================================
// Creates a filler object and returns a heap object immediately after it.
- MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
- int filler_size);
+ V8_WARN_UNUSED_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
+ int filler_size);
// Creates a filler object if needed for alignment and returns a heap object
// immediately after it. If any space is left after the returned object,
// another filler object is created so the over allocated memory is iterable.
- MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
- int object_size,
- int allocation_size,
- AllocationAlignment alignment);
+ V8_WARN_UNUSED_RESULT HeapObject* AlignWithFiller(
+ HeapObject* object, int object_size, int allocation_size,
+ AllocationAlignment alignment);
// ===========================================================================
// ArrayBuffer tracking. =====================================================
@@ -1598,6 +1631,15 @@ class Heap {
}
// ===========================================================================
+ // Heap object allocation tracking. ==========================================
+ // ===========================================================================
+
+ void AddHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
+ void RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
+ bool has_heap_object_allocation_tracker() const {
+ return !allocation_trackers_.empty();
+ }
+
// Retaining path tracking. ==================================================
// ===========================================================================
@@ -1741,7 +1783,7 @@ class Heap {
void* data;
};
- static const int kInitialStringTableSize = 2048;
+ static const int kInitialStringTableSize = StringTable::kMinCapacity;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
@@ -1827,15 +1869,6 @@ class Heap {
inline void UpdateOldSpaceLimits();
- // Initializes a JSObject based on its map.
- void InitializeJSObjectFromMap(JSObject* obj, Object* properties, Map* map);
-
- // Initializes JSObject body starting at given offset.
- void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);
-
- void InitializeAllocationMemento(AllocationMemento* memento,
- AllocationSite* allocation_site);
-
bool CreateInitialMaps();
void CreateInternalAccessorInfoObjects();
void CreateInitialObjects();
@@ -1857,6 +1890,9 @@ class Heap {
// Fill in bogus values in from space
void ZapFromSpace();
+ // Zaps the memory of a code object.
+ void ZapCodeObject(Address start_address, int size_in_bytes);
+
// Deopts all code that contains allocation instruction which are tenured or
// not tenured. Moreover it clears the pretenuring allocation site statistics.
void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
@@ -1910,7 +1946,7 @@ class Heap {
void CollectGarbageOnMemoryPressure();
- void InvokeOutOfMemoryCallback();
+ bool InvokeNearHeapLimitCallback();
void ComputeFastPromotionMode(double survival_rate);
@@ -1927,6 +1963,7 @@ class Heap {
// - GCFinalizeMCReduceMemory: finalization of incremental full GC with
// memory reduction
HistogramTimer* GCTypeTimer(GarbageCollector collector);
+ HistogramTimer* GCTypePriorityTimer(GarbageCollector collector);
// ===========================================================================
// Pretenuring. ==============================================================
@@ -2005,6 +2042,11 @@ class Heap {
bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
+ bool IsIneffectiveMarkCompact(size_t old_generation_size,
+ double mutator_utilization);
+ void CheckIneffectiveMarkCompact(size_t old_generation_size,
+ double mutator_utilization);
+
// ===========================================================================
// Growing strategy. =========================================================
// ===========================================================================
@@ -2041,10 +2083,6 @@ class Heap {
bool CanExpandOldGeneration(size_t size);
- bool IsCloseToOutOfMemory(size_t slack) {
- return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
- }
-
bool ShouldExpandOldGenerationOnSlowAllocation();
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
@@ -2071,263 +2109,36 @@ class Heap {
// Allocation methods. =======================================================
// ===========================================================================
- // Returns a deep copy of the JavaScript object.
- // Properties and elements are copied too.
- // Optionally takes an AllocationSite to be appended in an AllocationMemento.
- MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
- AllocationSite* site = nullptr);
-
// Allocates a JS Map in the heap.
- MUST_USE_RESULT AllocationResult
+ V8_WARN_UNUSED_RESULT AllocationResult
AllocateMap(InstanceType instance_type, int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
int inobject_properties = 0);
- // Allocates and initializes a new JavaScript object based on a
- // constructor.
- // If allocation_site is non-null, then a memento is emitted after the object
- // that points to the site.
- MUST_USE_RESULT AllocationResult AllocateJSObject(
- JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = nullptr);
-
- // Allocates and initializes a new JavaScript object based on a map.
- // Passing an allocation site means that a memento will be created that
- // points to the site.
- MUST_USE_RESULT AllocationResult
- AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = nullptr);
-
- // Allocates a HeapNumber from value.
- MUST_USE_RESULT AllocationResult AllocateHeapNumber(
- MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT AllocationResult
- AllocateBigInt(int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a byte array of the specified length
- MUST_USE_RESULT AllocationResult
- AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a bytecode array with given contents.
- MUST_USE_RESULT AllocationResult
- AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
- int parameter_count, FixedArray* constant_pool);
-
- MUST_USE_RESULT AllocationResult CopyCode(Code* code,
- CodeDataContainer* data_container);
-
- MUST_USE_RESULT AllocationResult
- CopyBytecodeArray(BytecodeArray* bytecode_array);
-
- // Allocates a fixed array-like object with given map and initialized with
- // undefined values.
- MUST_USE_RESULT inline AllocationResult AllocateFixedArrayWithMap(
- RootListIndex map_root_index, int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT inline AllocationResult AllocateFixedArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a property array initialized with undefined values
- MUST_USE_RESULT AllocationResult
- AllocatePropertyArray(int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a feedback vector for the given shared function info. The slots
- // are pre-filled with undefined.
- MUST_USE_RESULT AllocationResult
- AllocateFeedbackVector(SharedFunctionInfo* shared, PretenureFlag pretenure);
-
- // Allocate an uninitialized feedback vector.
- MUST_USE_RESULT AllocationResult
- AllocateRawFeedbackVector(int length, PretenureFlag pretenure);
-
- MUST_USE_RESULT AllocationResult AllocateSmallOrderedHashSet(
- int length, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT AllocationResult AllocateSmallOrderedHashMap(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
// Allocate an uninitialized object. The memory is non-executable if the
// hardware and OS allow. This is the single choke-point for allocations
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
- MUST_USE_RESULT inline AllocationResult AllocateRaw(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationSpace space,
AllocationAlignment aligment = kWordAligned);
+ HeapObject* AllocateRawWithRetry(
+ int size, AllocationSpace space,
+ AllocationAlignment alignment = kWordAligned);
+ HeapObject* AllocateRawCodeInLargeObjectSpace(int size);
+
// Allocates a heap object based on the map.
- MUST_USE_RESULT AllocationResult
- Allocate(Map* map, AllocationSpace space,
- AllocationSite* allocation_site = nullptr);
+ V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map* map,
+ AllocationSpace space);
// Allocates a partial map for bootstrapping.
- MUST_USE_RESULT AllocationResult
- AllocatePartialMap(InstanceType instance_type, int instance_size);
-
- // Allocate a block of memory in the given space (filled with a filler).
- // Used as a fall-back for generated code when the space is full.
- MUST_USE_RESULT AllocationResult
- AllocateFillerObject(int size, bool double_align, AllocationSpace space);
-
- // Allocate an uninitialized fixed array.
- MUST_USE_RESULT AllocationResult
- AllocateRawFixedArray(int length, PretenureFlag pretenure);
-
- // Allocate an uninitialized fixed double array.
- MUST_USE_RESULT AllocationResult
- AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
-
- // Allocate an initialized fixed array with the given filler value.
- MUST_USE_RESULT AllocationResult
- AllocateFixedArrayWithFiller(RootListIndex map_root_index, int length,
- PretenureFlag pretenure, Object* filler);
-
- // Allocate and partially initializes a String. There are two String
- // encodings: one-byte and two-byte. These functions allocate a string of
- // the given length and set its map and length fields. The characters of
- // the string are uninitialized.
- MUST_USE_RESULT AllocationResult
- AllocateRawOneByteString(int length, PretenureFlag pretenure);
- MUST_USE_RESULT AllocationResult
- AllocateRawTwoByteString(int length, PretenureFlag pretenure);
-
- // Allocates an internalized string in old space based on the character
- // stream.
- MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
- Vector<const char> str, int chars, uint32_t hash_field);
-
- MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
- Vector<const uint8_t> str, uint32_t hash_field);
-
- MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
- Vector<const uc16> str, uint32_t hash_field);
-
- template <bool is_one_byte, typename T>
- MUST_USE_RESULT AllocationResult
- AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
-
- template <typename T>
- MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field);
-
- // Allocates an uninitialized fixed array. It must be filled by the caller.
- MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Make a copy of src and return it.
- MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
-
- // Make a copy of src, also grow the copy, and return the copy.
- template <typename T>
- MUST_USE_RESULT AllocationResult CopyArrayAndGrow(T* src, int grow_by,
- PretenureFlag pretenure);
-
- // Make a copy of src, also grow the copy, and return the copy.
- MUST_USE_RESULT AllocationResult CopyPropertyArrayAndGrow(
- PropertyArray* src, int grow_by, PretenureFlag pretenure);
-
- // Make a copy of src, also grow the copy, and return the copy.
- MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
- int new_len,
- PretenureFlag pretenure);
-
- // Make a copy of src, set the map, and return the copy.
- template <typename T>
- MUST_USE_RESULT AllocationResult CopyArrayWithMap(T* src, Map* map);
-
- // Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult CopyFixedArrayWithMap(FixedArray* src,
- Map* map);
-
- // Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult CopyPropertyArray(PropertyArray* src);
-
- // Make a copy of src and return it.
- MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
- FixedDoubleArray* src);
-
- // Make a copy of src and return it.
- MUST_USE_RESULT AllocationResult CopyFeedbackVector(FeedbackVector* src);
-
- // Computes a single character string where the character has code.
- // A cache is used for one-byte (Latin1) codes.
- MUST_USE_RESULT AllocationResult
- LookupSingleCharacterStringFromCode(uint16_t code);
-
- // Allocate a symbol in old space.
- MUST_USE_RESULT AllocationResult AllocateSymbol();
-
- // Allocates an external array of the specified length and type.
- MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer(
- int length, ExternalArrayType array_type, void* external_pointer,
- PretenureFlag pretenure);
-
- // Allocates a fixed typed array of the specified length and type.
- MUST_USE_RESULT AllocationResult
- AllocateFixedTypedArray(int length, ExternalArrayType array_type,
- bool initialize, PretenureFlag pretenure);
-
- // Make a copy of src and return it.
- MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
-
- // Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult
- CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
-
- // Allocates a fixed double array with uninitialized values. Returns
- MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate empty fixed array.
- MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
-
- // Allocate empty scope info.
- MUST_USE_RESULT AllocationResult AllocateEmptyScopeInfo();
+ V8_WARN_UNUSED_RESULT AllocationResult
+ AllocatePartialMap(InstanceType instance_type, int instance_size);
// Allocate empty fixed typed array of given type.
- MUST_USE_RESULT AllocationResult
- AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
-
- // Allocate a tenured simple cell.
- MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
-
- // Allocate a tenured simple feedback cell.
- MUST_USE_RESULT AllocationResult AllocateFeedbackCell(Map* map,
- HeapObject* value);
-
- // Allocate a tenured JS global property cell initialized with the hole.
- MUST_USE_RESULT AllocationResult AllocatePropertyCell(Name* name);
-
- MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
-
- MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
-
- // Allocates a new utility object in the old generation.
- MUST_USE_RESULT AllocationResult
- AllocateStruct(InstanceType type, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a new foreign object.
- MUST_USE_RESULT AllocationResult
- AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a new code object (mostly uninitialized). Can only be used when
- // code space is unprotected and requires manual initialization by the caller.
- MUST_USE_RESULT AllocationResult AllocateCode(int object_size,
- Movability movability);
-
- // Allocates a new code object (fully initialized). All header fields of the
- // returned object are immutable and the code object is write protected.
- MUST_USE_RESULT AllocationResult AllocateCode(
- const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, ByteArray* reloc_info,
- CodeDataContainer* data_container, ByteArray* source_position_table,
- DeoptimizationData* deopt_data, Movability movability, uint32_t stub_key,
- bool is_turbofanned, int stack_slots, int safepoint_table_offset,
- int handler_table_offset);
-
- MUST_USE_RESULT AllocationResult AllocateJSPromise(
- JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
+ V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
void set_force_oom(bool value) { force_oom_ = value; }
@@ -2361,6 +2172,13 @@ class Heap {
Object* roots_[kRootListLength];
+ // This table is accessed from builtin code compiled into the snapshot, and
+ // thus its offset from roots_ must remain static. This is verified in
+ // Isolate::Init() using runtime checks.
+ static constexpr int kRootsExternalReferenceTableOffset =
+ kRootListLength * kPointerSize;
+ ExternalReferenceTable external_reference_table_;
+
size_t code_range_size_;
size_t max_semi_space_size_;
size_t initial_semispace_size_;
@@ -2385,8 +2203,8 @@ class Heap {
// and reset by a mark-compact garbage collection.
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
- v8::debug::OutOfMemoryCallback out_of_memory_callback_;
- void* out_of_memory_callback_data_;
+ std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
+ near_heap_limit_callbacks_;
// For keeping track of context disposals.
int contexts_disposed_;
@@ -2401,6 +2219,7 @@ class Heap {
OldSpace* code_space_;
MapSpace* map_space_;
LargeObjectSpace* lo_space_;
+ ReadOnlySpace* read_only_space_;
// Map from the space id to the space.
Space* space_[LAST_SPACE + 1];
@@ -2446,6 +2265,10 @@ class Heap {
// How many gc happened.
unsigned int gc_count_;
+ // The number of Mark-Compact garbage collections that are considered as
+ // ineffective. See IsIneffectiveMarkCompact() predicate.
+ int consecutive_ineffective_mark_compacts_;
+
static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
uintptr_t mmap_region_base_;
@@ -2590,14 +2413,16 @@ class Heap {
bool fast_promotion_mode_;
- bool use_tasks_;
-
// Used for testing purposes.
bool force_oom_;
bool delay_sweeper_tasks_for_testing_;
HeapObject* pending_layout_change_object_;
+ base::Mutex unprotected_memory_chunks_mutex_;
+ std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
+ bool unprotected_memory_chunks_registry_enabled_;
+
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
@@ -2614,6 +2439,8 @@ class Heap {
// stores the option of the corresponding target.
std::map<int, RetainingPathOption> retaining_path_target_option_;
+ std::vector<HeapObjectAllocationTracker*> allocation_trackers_;
+
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
friend class ConcurrentMarking;
@@ -2704,6 +2531,18 @@ class CodeSpaceMemoryModificationScope {
Heap* heap_;
};
+// The CodePageCollectionMemoryModificationScope can only be used by the main
+// thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
+// already active.
+class CodePageCollectionMemoryModificationScope {
+ public:
+ explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
+ inline ~CodePageCollectionMemoryModificationScope();
+
+ private:
+ Heap* heap_;
+};
+
// The CodePageMemoryModificationScope does not check if tansitions to
// writeable and back to executable are actually allowed, i.e. the MemoryChunk
// was registered to be executable. It can be used by concurrent threads.
@@ -2729,11 +2568,14 @@ class CodePageMemoryModificationScope {
class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
void VisitPointers(HeapObject* host, Object** start, Object** end) override;
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override;
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override;
- private:
- void VerifyPointers(Object** start, Object** end);
+ protected:
+ virtual void VerifyPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end);
};
@@ -2745,11 +2587,17 @@ class VerifySmisVisitor : public RootVisitor {
};
// Space iterator for iterating over all the paged spaces of the heap: Map
-// space, old space, code space and cell space. Returns
-// each space in turn, and null when it is done.
+// space, old space, code space and optionally read only space. Returns each
+// space in turn, and null when it is done.
class V8_EXPORT_PRIVATE PagedSpaces BASE_EMBEDDED {
public:
- explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
+ enum class SpacesSpecifier { kSweepablePagedSpaces, kAllPagedSpaces };
+
+ explicit PagedSpaces(Heap* heap, SpacesSpecifier specifier =
+ SpacesSpecifier::kSweepablePagedSpaces)
+ : heap_(heap),
+ counter_(specifier == SpacesSpecifier::kAllPagedSpaces ? RO_SPACE
+ : OLD_SPACE) {}
PagedSpace* next();
private:
@@ -2865,6 +2713,16 @@ class AllocationObserver {
V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
+// -----------------------------------------------------------------------------
+// Allows observation of heap object allocations.
+class HeapObjectAllocationTracker {
+ public:
+ virtual void AllocationEvent(Address addr, int size) = 0;
+ virtual void MoveEvent(Address from, Address to, int size) {}
+ virtual void UpdateObjectSizeEvent(Address addr, int size) {}
+ virtual ~HeapObjectAllocationTracker() = default;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 84f31ef350..b64c203a8d 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -7,6 +7,7 @@
#include "src/heap/incremental-marking.h"
#include "src/isolate.h"
+#include "src/objects/maybe-object.h"
namespace v8 {
namespace internal {
@@ -14,8 +15,21 @@ namespace internal {
void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
Object* value) {
- if (IsMarking() && value->IsHeapObject()) {
- RecordWriteSlow(obj, slot, value);
+ DCHECK_IMPLIES(slot != nullptr, !HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ RecordMaybeWeakWrite(obj, reinterpret_cast<MaybeObject**>(slot),
+ reinterpret_cast<MaybeObject*>(value));
+}
+
+void IncrementalMarking::RecordMaybeWeakWrite(HeapObject* obj,
+ MaybeObject** slot,
+ MaybeObject* value) {
+ // When writing a weak reference, treat it as strong for the purposes of the
+ // marking barrier.
+ HeapObject* heap_object;
+ if (IsMarking() && value->ToStrongOrWeakHeapObject(&heap_object)) {
+ RecordWriteSlow(obj, reinterpret_cast<HeapObjectReference**>(slot),
+ heap_object);
}
}
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index fa6082ae7c..7583aaaadf 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -21,7 +21,7 @@ void IncrementalMarkingJob::Start(Heap* heap) {
}
void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
- if (!task_pending_ && heap->use_tasks()) {
+ if (!task_pending_ && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
task_pending_ = true;
auto task = new Task(heap->isolate(), this);
@@ -49,7 +49,7 @@ void IncrementalMarkingJob::Task::RunInternal() {
if (incremental_marking->IsStopped()) {
if (heap->IncrementalMarkingLimitReached() !=
Heap::IncrementalMarkingLimit::kNoLimit) {
- heap->StartIncrementalMarking(Heap::kNoGCFlags,
+ heap->StartIncrementalMarking(heap->GCFlagsForIncrementalMarking(),
GarbageCollectionReason::kIdleTask,
kGCCallbackScheduleIdleGarbageCollection);
}
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index a7b56e4315..2b693ed44e 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -53,9 +53,11 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
}
IncrementalMarking::IncrementalMarking(
- Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist)
+ Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist,
+ WeakObjects* weak_objects)
: heap_(heap),
marking_worklist_(marking_worklist),
+ weak_objects_(weak_objects),
initial_old_generation_size_(0),
bytes_marked_ahead_of_schedule_(0),
bytes_marked_concurrently_(0),
@@ -91,8 +93,8 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
return is_compacting_ && need_recording;
}
-
-void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
+ HeapObjectReference** slot,
Object* value) {
if (BaseRecordWrite(obj, value) && slot != nullptr) {
// Object is not going to be rescanned we need to record the slot.
@@ -422,7 +424,7 @@ void IncrementalMarking::StartMarking() {
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
- if (FLAG_concurrent_marking && heap_->use_tasks()) {
+ if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
heap_->concurrent_marking()->ScheduleTasks();
}
@@ -558,8 +560,6 @@ void IncrementalMarking::FinalizeIncrementally() {
// objects to reduce the marking load in the final pause.
// 1) We scan and mark the roots again to find all changes to the root set.
// 2) Age and retain maps embedded in optimized code.
- // 3) Remove weak cell with live values from the list of weak cells, they
- // do not need processing during GC.
MarkRoots();
// Map retaining is needed for perfromance, not correctness,
@@ -588,8 +588,12 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
Map* filler_map = heap_->one_pointer_filler_map();
+#ifdef ENABLE_MINOR_MC
MinorMarkCompactCollector::MarkingState* minor_marking_state =
heap()->minor_mark_compact_collector()->marking_state();
+#else
+ void* minor_marking_state = nullptr;
+#endif // ENABLE_MINOR_MC
marking_worklist()->Update([this, filler_map, minor_marking_state](
HeapObject* obj, HeapObject** out) -> bool {
@@ -613,19 +617,24 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
// The object may be on a page that was moved in new space.
DCHECK(
Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
+#ifdef ENABLE_MINOR_MC
if (minor_marking_state->IsGrey(obj)) {
*out = obj;
return true;
}
+#endif // ENABLE_MINOR_MC
return false;
} else {
- // The object may be on a page that was moved from new to old space.
+ // The object may be on a page that was moved from new to old space. Only
+ // applicable during minor MC garbage collections.
if (Page::FromAddress(obj->address())
->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
+#ifdef ENABLE_MINOR_MC
if (minor_marking_state->IsGrey(obj)) {
*out = obj;
return true;
}
+#endif // ENABLE_MINOR_MC
return false;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
@@ -638,6 +647,47 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
return false;
}
});
+
+ UpdateWeakReferencesAfterScavenge();
+}
+
+void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
+ weak_objects_->weak_references.Update(
+ [](std::pair<HeapObject*, HeapObjectReference**> slot_in,
+ std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool {
+ HeapObject* heap_obj = slot_in.first;
+ MapWord map_word = heap_obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ ptrdiff_t distance_to_slot =
+ reinterpret_cast<Address>(slot_in.second) -
+ reinterpret_cast<Address>(slot_in.first);
+ Address new_slot =
+ reinterpret_cast<Address>(map_word.ToForwardingAddress()) +
+ distance_to_slot;
+ slot_out->first = map_word.ToForwardingAddress();
+ slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot);
+ return true;
+ }
+ if (heap_obj->GetHeap()->InNewSpace(heap_obj)) {
+ // The new space object containing the weak reference died.
+ return false;
+ }
+ *slot_out = slot_in;
+ return true;
+ });
+ weak_objects_->weak_objects_in_code.Update(
+ [](std::pair<HeapObject*, Code*> slot_in,
+ std::pair<HeapObject*, Code*>* slot_out) -> bool {
+ HeapObject* heap_obj = slot_in.first;
+ MapWord map_word = heap_obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ slot_out->first = map_word.ToForwardingAddress();
+ slot_out->second = slot_in.second;
+ } else {
+ *slot_out = slot_in;
+ }
+ return true;
+ });
}
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
@@ -908,7 +958,7 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
const size_t kTargetStepCountAtOOM = 32;
size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
- if (heap()->IsCloseToOutOfMemory(oom_slack)) {
+ if (!heap()->CanExpandOldGeneration(oom_slack)) {
return heap()->PromotedSpaceSizeOfObjects() / kTargetStepCountAtOOM;
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 87a1751fd9..b9f6a66444 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -87,7 +87,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
#endif
IncrementalMarking(Heap* heap,
- MarkCompactCollector::MarkingWorklist* marking_worklist);
+ MarkCompactCollector::MarkingWorklist* marking_worklist,
+ WeakObjects* weak_objects);
MarkingState* marking_state() { return &marking_state_; }
@@ -165,6 +166,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeIncrementally();
void UpdateMarkingWorklistAfterScavenge();
+ void UpdateWeakReferencesAfterScavenge();
void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
void Hurry();
@@ -205,11 +207,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// the incremental cycle (stays white).
V8_INLINE bool BaseRecordWrite(HeapObject* obj, Object* value);
V8_INLINE void RecordWrite(HeapObject* obj, Object** slot, Object* value);
+ V8_INLINE void RecordMaybeWeakWrite(HeapObject* obj, MaybeObject** slot,
+ MaybeObject* value);
V8_INLINE void RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
Object* value);
V8_INLINE void RecordWrites(HeapObject* obj);
- void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+ void RecordWriteSlow(HeapObject* obj, HeapObjectReference** slot,
+ Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
// Returns true if the function succeeds in transitioning the object
@@ -324,6 +329,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
Heap* const heap_;
MarkCompactCollector::MarkingWorklist* const marking_worklist_;
+ WeakObjects* weak_objects_;
double start_time_ms_;
size_t initial_old_generation_size_;
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 8ca289cf1a..577c4a5576 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -61,7 +61,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
// we can return true here.
return true;
}
- return invalidated_object_->IsValidSlot(offset);
+ return invalidated_object_->IsValidSlot(invalidated_object_->map(), offset);
}
} // namespace internal
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
index 1c8d4c8ac4..e909ef69d7 100644
--- a/deps/v8/src/heap/item-parallel-job.cc
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -90,10 +90,11 @@ void ItemParallelJob::Run(std::shared_ptr<Counters> async_counters) {
: 0;
CancelableTaskManager::Id* task_ids =
new CancelableTaskManager::Id[num_tasks];
- Task* main_task = nullptr;
+ std::unique_ptr<Task> main_task;
for (size_t i = 0, start_index = 0; i < num_tasks;
i++, start_index += items_per_task + (i < items_remainder ? 1 : 0)) {
- Task* task = tasks_[i];
+ auto task = std::move(tasks_[i]);
+ DCHECK(task);
// By definition there are less |items_remainder| to distribute then
// there are tasks processing items so this cannot overflow while we are
@@ -105,16 +106,15 @@ void ItemParallelJob::Run(std::shared_ptr<Counters> async_counters) {
: base::Optional<AsyncTimedHistogram>());
task_ids[i] = task->id();
if (i > 0) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallBlockingTaskOnWorkerThread(std::move(task));
} else {
- main_task = task;
+ main_task = std::move(task);
}
}
// Contribute on main thread.
+ DCHECK(main_task);
main_task->Run();
- delete main_task;
// Wait for background tasks.
for (size_t i = 0; i < num_tasks; i++) {
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 4c21f69ca9..51e0afd401 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -126,7 +126,7 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
~ItemParallelJob();
// Adds a task to the job. Transfers ownership to the job.
- void AddTask(Task* task) { tasks_.push_back(task); }
+ void AddTask(Task* task) { tasks_.push_back(std::unique_ptr<Task>(task)); }
// Adds an item to the job. Transfers ownership to the job.
void AddItem(Item* item) { items_.push_back(item); }
@@ -140,7 +140,7 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
private:
std::vector<Item*> items_;
- std::vector<Task*> tasks_;
+ std::vector<std::unique_ptr<Task>> tasks_;
CancelableTaskManager* cancelable_task_manager_;
base::Semaphore* pending_tasks_;
DISALLOW_COPY_AND_ASSIGN(ItemParallelJob);
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index a6bbecd88e..c21c7dda6e 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -28,7 +28,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitAllocationSite(Map* map,
AllocationSite* object) {
int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
- AllocationSite::BodyDescriptorWeak::IterateBody(object, size, this);
+ AllocationSite::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -38,7 +38,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitBytecodeArray(Map* map,
BytecodeArray* array) {
int size = BytecodeArray::BodyDescriptor::SizeOf(map, array);
- BytecodeArray::BodyDescriptor::IterateBody(array, size, this);
+ BytecodeArray::BodyDescriptor::IterateBody(map, array, size, this);
array->MakeOlder();
return size;
}
@@ -48,7 +48,7 @@ template <FixedArrayVisitationMode fixed_array_mode,
int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
VisitCodeDataContainer(Map* map, CodeDataContainer* object) {
int size = CodeDataContainer::BodyDescriptorWeak::SizeOf(map, object);
- CodeDataContainer::BodyDescriptorWeak::IterateBody(object, size, this);
+ CodeDataContainer::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -71,7 +71,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
heap_->TracePossibleWrapper(object);
}
int size = JSObject::BodyDescriptor::SizeOf(map, object);
- JSObject::BodyDescriptor::IterateBody(object, size, this);
+ JSObject::BodyDescriptor::IterateBody(map, object, size, this);
return size;
}
@@ -81,7 +81,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitJSFunction(Map* map,
JSFunction* object) {
int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
- JSFunction::BodyDescriptorWeak::IterateBody(object, size, this);
+ JSFunction::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -98,7 +98,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
// Skip visiting the backing hash table containing the mappings and the
// pointer to the other enqueued weak collections, both are post-processed.
int size = JSWeakCollection::BodyDescriptorWeak::SizeOf(map, weak_collection);
- JSWeakCollection::BodyDescriptorWeak::IterateBody(weak_collection, size,
+ JSWeakCollection::BodyDescriptorWeak::IterateBody(map, weak_collection, size,
this);
// Partially initialized weak collection is enqueued, but table is ignored.
@@ -119,14 +119,13 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitMap(Map* map, Map* object) {
// When map collection is enabled we have to mark through map's transitions
// and back pointers in a special way to make these links weak.
+ int size = Map::BodyDescriptor::SizeOf(map, object);
if (object->CanTransition()) {
MarkMapContents(object);
} else {
- VisitPointers(object,
- HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
+ Map::BodyDescriptor::IterateBody(map, object, size, this);
}
- return Map::BodyDescriptor::SizeOf(map, object);
+ return size;
}
template <FixedArrayVisitationMode fixed_array_mode,
@@ -135,7 +134,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitNativeContext(Map* map,
Context* context) {
int size = Context::BodyDescriptorWeak::SizeOf(map, context);
- Context::BodyDescriptorWeak::IterateBody(context, size, this);
+ Context::BodyDescriptorWeak::IterateBody(map, context, size, this);
return size;
}
@@ -145,7 +144,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitTransitionArray(Map* map,
TransitionArray* array) {
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
- TransitionArray::BodyDescriptor::IterateBody(array, size, this);
+ TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
collector_->AddTransitionArray(array);
return size;
}
@@ -187,6 +186,32 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitPointer(HeapObject* host,
+ MaybeObject** p) {
+ HeapObject* target_object;
+ if ((*p)->ToStrongHeapObject(&target_object)) {
+ collector_->RecordSlot(host, reinterpret_cast<HeapObjectReference**>(p),
+ target_object);
+ MarkObject(host, target_object);
+ } else if ((*p)->ToWeakHeapObject(&target_object)) {
+ if (marking_state()->IsBlackOrGrey(target_object)) {
+ // Weak references with live values are directly processed here to reduce
+ // the processing time of weak cells during the main GC pause.
+ collector_->RecordSlot(host, reinterpret_cast<HeapObjectReference**>(p),
+ target_object);
+ } else {
+ // If we do not know about liveness of values of weak cells, we have to
+ // process them when we know the liveness of the whole transitive
+ // closure.
+ collector_->AddWeakReference(host,
+ reinterpret_cast<HeapObjectReference**>(p));
+ }
+ }
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitPointers(HeapObject* host,
Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
@@ -197,6 +222,17 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitPointers(HeapObject* host,
+ MaybeObject** start,
+ MaybeObject** end) {
+ for (MaybeObject** p = start; p < end; p++) {
+ VisitPointer(host, p);
+ }
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitEmbeddedPointer(Code* host,
RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
@@ -204,6 +240,8 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
collector_->RecordRelocSlot(host, rinfo, object);
if (!host->IsWeakObject(object)) {
MarkObject(host, object);
+ } else if (!marking_state()->IsBlackOrGrey(object)) {
+ collector_->AddWeakObjectInCode(object, host);
}
}
@@ -288,7 +326,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
}
}
} else {
- FixedArray::BodyDescriptor::IterateBody(object, object_size, this);
+ FixedArray::BodyDescriptor::IterateBody(map, object, object_size, this);
}
return object_size;
}
@@ -320,8 +358,8 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
- VisitPointers(map, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
+ Map::BodyDescriptor::IterateBody(
+ map->map(), map, Map::BodyDescriptor::SizeOf(map->map(), map), this);
}
void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
@@ -353,6 +391,12 @@ void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
Object* target) {
+ RecordSlot(object, reinterpret_cast<HeapObjectReference**>(slot), target);
+}
+
+void MarkCompactCollector::RecordSlot(HeapObject* object,
+ HeapObjectReference** slot,
+ Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index c6c8c29962..eedc942835 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -62,6 +62,7 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
virtual void VerifyPointers(Object** start, Object** end) = 0;
+ virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
virtual bool IsMarked(HeapObject* object) = 0;
@@ -71,6 +72,11 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ VerifyPointers(start, end);
+ }
+
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override {
VerifyPointers(start, end);
@@ -182,6 +188,15 @@ class FullMarkingVerifier : public MarkingVerifier {
}
}
+ void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ if ((*current)->ToStrongHeapObject(&object)) {
+ CHECK(marking_state_->IsBlackOrGrey(object));
+ }
+ }
+ }
+
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
if (!host->IsWeakObject(rinfo->target_object())) {
@@ -194,44 +209,6 @@ class FullMarkingVerifier : public MarkingVerifier {
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
-class YoungGenerationMarkingVerifier : public MarkingVerifier {
- public:
- explicit YoungGenerationMarkingVerifier(Heap* heap)
- : MarkingVerifier(heap),
- marking_state_(
- heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
-
- Bitmap* bitmap(const MemoryChunk* chunk) override {
- return marking_state_->bitmap(chunk);
- }
-
- bool IsMarked(HeapObject* object) override {
- return marking_state_->IsGrey(object);
- }
-
- bool IsBlackOrGrey(HeapObject* object) override {
- return marking_state_->IsBlackOrGrey(object);
- }
-
- void Run() override {
- VerifyRoots(VISIT_ALL_IN_SCAVENGE);
- VerifyMarking(heap_->new_space());
- }
-
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- if (!heap_->InNewSpace(object)) return;
- CHECK(IsMarked(object));
- }
- }
- }
-
- private:
- MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
-};
-
class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
public:
virtual void Run() = 0;
@@ -240,6 +217,11 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ VerifyPointers(start, end);
+ }
+
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override {
VerifyPointers(start, end);
@@ -251,6 +233,7 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
inline Heap* heap() { return heap_; }
virtual void VerifyPointers(Object** start, Object** end) = 0;
+ virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
void VerifyRoots(VisitMode mode);
void VerifyEvacuationOnPage(Address start, Address end);
@@ -320,27 +303,14 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
}
}
-};
-
-class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
- public:
- explicit YoungGenerationEvacuationVerifier(Heap* heap)
- : EvacuationVerifier(heap) {}
-
- void Run() override {
- VerifyRoots(VISIT_ALL_IN_SCAVENGE);
- VerifyEvacuation(heap_->new_space());
- VerifyEvacuation(heap_->old_space());
- VerifyEvacuation(heap_->code_space());
- VerifyEvacuation(heap_->map_space());
- }
-
- protected:
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
+ void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ if ((*current)->ToStrongHeapObject(&object)) {
+ if (heap()->InNewSpace(object)) {
+ CHECK(heap()->InToSpace(object));
+ }
+ CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
}
}
}
@@ -408,16 +378,10 @@ class RootMarkingVisitorSeedOnly : public RootVisitor {
};
int NumberOfAvailableCores() {
- static int num_cores =
- static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) +
- 1;
+ static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
// This number of cores should be greater than zero and never change.
DCHECK_GE(num_cores, 1);
- DCHECK_EQ(
- num_cores,
- 1 + static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+ DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
return num_cores;
}
@@ -425,7 +389,14 @@ int NumberOfAvailableCores() {
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
DCHECK_GT(pages, 0);
- return FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
+ int tasks =
+ FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
+ if (!heap_->CanExpandOldGeneration(
+ static_cast<size_t>(tasks * Page::kPageSize))) {
+ // Optimize for memory usage near the heap limit.
+ tasks = 1;
+ }
+ return tasks;
}
int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
@@ -452,16 +423,6 @@ int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
: 1;
}
-int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
- DCHECK_GT(pages, 0);
- if (!FLAG_minor_mc_parallel_marking) return 1;
- // Pages are not private to markers but we can still use them to estimate the
- // amount of marking that is required.
- const int kPagesPerTask = 2;
- const int wanted_tasks = Max(1, pages / kPagesPerTask);
- return Min(NumberOfAvailableCores(), Min(wanted_tasks, kNumMarkers));
-}
-
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
page_parallel_job_semaphore_(0),
@@ -487,8 +448,6 @@ void MarkCompactCollector::SetUp() {
DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
}
-void MinorMarkCompactCollector::SetUp() {}
-
void MarkCompactCollector::TearDown() {
AbortCompaction();
AbortWeakObjects();
@@ -497,8 +456,6 @@ void MarkCompactCollector::TearDown() {
}
}
-void MinorMarkCompactCollector::TearDown() {}
-
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
p->MarkEvacuationCandidate();
@@ -542,7 +499,9 @@ void MarkCompactCollector::CollectGarbage() {
// update the state as they proceed.
DCHECK(state_ == PREPARE_GC);
+#ifdef ENABLE_MINOR_MC
heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
+#endif // ENABLE_MINOR_MC
MarkLiveObjects();
ClearNonLiveReferences();
@@ -558,6 +517,13 @@ void MarkCompactCollector::CollectGarbage() {
}
#ifdef VERIFY_HEAP
+void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
+ HeapObjectIterator iterator(space);
+ while (HeapObject* object = iterator.Next()) {
+ CHECK(non_atomic_marking_state()->IsBlack(object));
+ }
+}
+
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
for (Page* p : *space) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
@@ -579,6 +545,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->code_space());
VerifyMarkbitsAreClean(heap_->map_space());
VerifyMarkbitsAreClean(heap_->new_space());
+ // Read-only space should always be black since we never collect any objects
+ // in it or linked from it.
+ VerifyMarkbitsAreDirty(heap_->read_only_space());
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
@@ -588,17 +557,6 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
}
}
-void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
- HeapObjectIterator code_iterator(heap()->code_space());
- for (HeapObject* obj = code_iterator.Next(); obj != nullptr;
- obj = code_iterator.Next()) {
- Code* code = Code::cast(obj);
- if (!code->is_optimized_code()) continue;
- if (WillBeDeoptimized(code)) continue;
- code->VerifyEmbeddedObjectsDependency();
- }
-}
-
#endif // VERIFY_HEAP
void MarkCompactCollector::ClearMarkbitsInPagedSpace(PagedSpace* space) {
@@ -936,9 +894,6 @@ void MarkCompactCollector::Finish() {
sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks();
- // The hashing of weak_object_to_code_table is no longer valid.
- heap()->weak_object_to_code_table()->Rehash();
-
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
@@ -962,16 +917,6 @@ void MarkCompactCollector::Finish() {
}
}
-void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
- for (Page* p : sweep_to_iterate_pages_) {
- if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
- p->ClearFlag(Page::SWEEP_TO_ITERATE);
- non_atomic_marking_state()->ClearLiveness(p);
- }
- }
- sweep_to_iterate_pages_.clear();
-}
-
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
public:
explicit RootMarkingVisitor(MarkCompactCollector* collector)
@@ -1016,7 +961,16 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
}
void VisitPointers(HeapObject* host, Object** start, Object** end) final {
- for (Object** p = start; p < end; p++) MarkObject(host, *p);
+ for (Object** p = start; p < end; p++) {
+ DCHECK(!HasWeakHeapObjectTag(*p));
+ MarkObject(host, *p);
+ }
+ }
+
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ // At the moment, custom roots cannot contain weak pointers.
+ UNREACHABLE();
}
// VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
@@ -1057,6 +1011,11 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
}
}
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ UNREACHABLE();
+ }
+
int PointersRemoved() {
return pointers_removed_;
}
@@ -1099,68 +1058,6 @@ class ExternalStringTableCleaner : public RootVisitor {
Heap* heap_;
};
-// Helper class for pruning the string table.
-class YoungGenerationExternalStringTableCleaner : public RootVisitor {
- public:
- YoungGenerationExternalStringTableCleaner(
- MinorMarkCompactCollector* collector)
- : heap_(collector->heap()),
- marking_state_(collector->non_atomic_marking_state()) {}
-
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- DCHECK_EQ(static_cast<int>(root),
- static_cast<int>(Root::kExternalStringsTable));
- // Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
- Object* o = *p;
- if (o->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(o);
- if (marking_state_->IsWhite(heap_object)) {
- if (o->IsExternalString()) {
- heap_->FinalizeExternalString(String::cast(*p));
- } else {
- // The original external string may have been internalized.
- DCHECK(o->IsThinString());
- }
- // Set the entry to the_hole_value (as deleted).
- *p = heap_->the_hole_value();
- }
- }
- }
- }
-
- private:
- Heap* heap_;
- MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
-};
-
-// Marked young generation objects and all old generation objects will be
-// retained.
-class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
- public:
- explicit MinorMarkCompactWeakObjectRetainer(
- MinorMarkCompactCollector* collector)
- : heap_(collector->heap()),
- marking_state_(collector->non_atomic_marking_state()) {}
-
- virtual Object* RetainAs(Object* object) {
- HeapObject* heap_object = HeapObject::cast(object);
- if (!heap_->InNewSpace(heap_object)) return object;
-
- // Young generation marking only marks to grey instead of black.
- DCHECK(!marking_state_->IsBlack(heap_object));
- if (marking_state_->IsGrey(heap_object)) {
- return object;
- }
- return nullptr;
- }
-
- private:
- Heap* heap_;
- MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
-};
-
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
@@ -1197,13 +1094,27 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
: collector_(collector) {}
inline void VisitPointer(HeapObject* host, Object** p) final {
+ DCHECK(!HasWeakHeapObjectTag(*p));
+ RecordMigratedSlot(host, reinterpret_cast<MaybeObject*>(*p),
+ reinterpret_cast<Address>(p));
+ }
+
+ inline void VisitPointer(HeapObject* host, MaybeObject** p) final {
RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p));
}
inline void VisitPointers(HeapObject* host, Object** start,
Object** end) final {
while (start < end) {
- RecordMigratedSlot(host, *start, reinterpret_cast<Address>(start));
+ VisitPointer(host, start);
+ ++start;
+ }
+ }
+
+ inline void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ while (start < end) {
+ VisitPointer(host, start);
++start;
}
}
@@ -1233,9 +1144,9 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {}
protected:
- inline virtual void RecordMigratedSlot(HeapObject* host, Object* value,
+ inline virtual void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
Address slot) {
- if (value->IsHeapObject()) {
+ if (value->IsStrongOrWeakHeapObject()) {
Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
@@ -1278,65 +1189,6 @@ class ProfilingMigrationObserver final : public MigrationObserver {
}
};
-class YoungGenerationMigrationObserver final : public MigrationObserver {
- public:
- YoungGenerationMigrationObserver(Heap* heap,
- MarkCompactCollector* mark_compact_collector)
- : MigrationObserver(heap),
- mark_compact_collector_(mark_compact_collector) {}
-
- inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
- int size) final {
- // Migrate color to old generation marking in case the object survived young
- // generation garbage collection.
- if (heap_->incremental_marking()->IsMarking()) {
- DCHECK(
- heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
- heap_->incremental_marking()->TransferColor(src, dst);
- }
- }
-
- protected:
- base::Mutex mutex_;
- MarkCompactCollector* mark_compact_collector_;
-};
-
-class YoungGenerationRecordMigratedSlotVisitor final
- : public RecordMigratedSlotVisitor {
- public:
- explicit YoungGenerationRecordMigratedSlotVisitor(
- MarkCompactCollector* collector)
- : RecordMigratedSlotVisitor(collector) {}
-
- void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
- UNREACHABLE();
- }
-
- private:
- // Only record slots for host objects that are considered as live by the full
- // collector.
- inline bool IsLive(HeapObject* object) {
- return collector_->non_atomic_marking_state()->IsBlack(object);
- }
-
- inline void RecordMigratedSlot(HeapObject* host, Object* value,
- Address slot) final {
- if (value->IsHeapObject()) {
- Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
- if (p->InNewSpace()) {
- DCHECK_IMPLIES(p->InToSpace(),
- p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
- RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
- Page::FromAddress(slot), slot);
- } else if (p->IsEvacuationCandidate() && IsLive(host)) {
- RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
- Page::FromAddress(slot), slot);
- }
- }
- }
-};
-
class HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() {}
@@ -1371,16 +1223,14 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst->IterateBodyFast(dst->map()->instance_type(), size,
- base->record_visitor_);
+ dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
base->heap_->CopyBlock(dst_addr, src_addr, size);
Code::cast(dst)->Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst->IterateBodyFast(dst->map()->instance_type(), size,
- base->record_visitor_);
+ dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
@@ -1532,8 +1382,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
AllocationResult allocation =
local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
if (allocation.IsRetry()) {
- v8::internal::Heap::FatalProcessOutOfMemory(
- "MarkCompactCollector: semi-space copy, fallback in old gen", true);
+ heap_->FatalProcessOutOfMemory(
+ "MarkCompactCollector: semi-space copy, fallback in old gen");
}
return allocation;
}
@@ -1616,7 +1466,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
inline bool Visit(HeapObject* object, int size) {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
- object->IterateBody(&visitor);
+ object->IterateBodyFast(&visitor);
return true;
}
@@ -1699,7 +1549,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code* code = it.frame()->LookupCode();
if (!code->CanDeoptAt(it.frame()->pc())) {
- Code::BodyDescriptor::IterateBody(code, visitor);
+ Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
}
return;
}
@@ -1731,598 +1581,6 @@ void MarkCompactCollector::RecordObjectStats() {
}
}
-class YoungGenerationMarkingVisitor final
- : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
- public:
- YoungGenerationMarkingVisitor(
- Heap* heap, MinorMarkCompactCollector::MarkingState* marking_state,
- MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : heap_(heap),
- worklist_(global_worklist, task_id),
- marking_state_(marking_state) {}
-
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) {
- VisitPointer(host, p);
- }
- }
-
- V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
- Object* target = *slot;
- if (heap_->InNewSpace(target)) {
- HeapObject* target_object = HeapObject::cast(target);
- MarkObjectViaMarkingWorklist(target_object);
- }
- }
-
- private:
- inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
- if (marking_state_->WhiteToGrey(object)) {
- // Marking deque overflow is unsupported for the young generation.
- CHECK(worklist_.Push(object));
- }
- }
-
- Heap* heap_;
- MinorMarkCompactCollector::MarkingWorklist::View worklist_;
- MinorMarkCompactCollector::MarkingState* marking_state_;
-};
-
-class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
- public:
- explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
- : collector_(collector),
- marking_state_(collector_->non_atomic_marking_state()) {}
-
- void VisitRootPointer(Root root, const char* description,
- Object** p) override {
- MarkObjectByPointer(p);
- }
-
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
- }
-
- private:
- void MarkObjectByPointer(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* object = HeapObject::cast(*p);
-
- if (!collector_->heap()->InNewSpace(object)) return;
-
- if (marking_state_->WhiteToGrey(object)) {
- collector_->main_marking_visitor()->Visit(object);
- collector_->ProcessMarkingWorklist();
- }
- }
-
- MinorMarkCompactCollector* collector_;
- MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
-};
-
-class MarkingItem;
-class GlobalHandlesMarkingItem;
-class PageMarkingItem;
-class RootMarkingItem;
-class YoungGenerationMarkingTask;
-
-class MarkingItem : public ItemParallelJob::Item {
- public:
- virtual ~MarkingItem() {}
- virtual void Process(YoungGenerationMarkingTask* task) = 0;
-};
-
-class YoungGenerationMarkingTask : public ItemParallelJob::Task {
- public:
- YoungGenerationMarkingTask(
- Isolate* isolate, MinorMarkCompactCollector* collector,
- MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : ItemParallelJob::Task(isolate),
- collector_(collector),
- marking_worklist_(global_worklist, task_id),
- marking_state_(collector->marking_state()),
- visitor_(isolate->heap(), marking_state_, global_worklist, task_id) {
- local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
- Page::kPageSize);
- }
-
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
- double marking_time = 0.0;
- {
- TimedScope scope(&marking_time);
- MarkingItem* item = nullptr;
- while ((item = GetItem<MarkingItem>()) != nullptr) {
- item->Process(this);
- item->MarkFinished();
- EmptyLocalMarkingWorklist();
- }
- EmptyMarkingWorklist();
- DCHECK(marking_worklist_.IsLocalEmpty());
- FlushLiveBytes();
- }
- if (FLAG_trace_minor_mc_parallel_marking) {
- PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
- static_cast<void*>(this), marking_time);
- }
- };
-
- void MarkObject(Object* object) {
- if (!collector_->heap()->InNewSpace(object)) return;
- HeapObject* heap_object = HeapObject::cast(object);
- if (marking_state_->WhiteToGrey(heap_object)) {
- const int size = visitor_.Visit(heap_object);
- IncrementLiveBytes(heap_object, size);
- }
- }
-
- private:
- void EmptyLocalMarkingWorklist() {
- HeapObject* object = nullptr;
- while (marking_worklist_.Pop(&object)) {
- const int size = visitor_.Visit(object);
- IncrementLiveBytes(object, size);
- }
- }
-
- void EmptyMarkingWorklist() {
- HeapObject* object = nullptr;
- while (marking_worklist_.Pop(&object)) {
- const int size = visitor_.Visit(object);
- IncrementLiveBytes(object, size);
- }
- }
-
- void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
- local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
- bytes;
- }
-
- void FlushLiveBytes() {
- for (auto pair : local_live_bytes_) {
- marking_state_->IncrementLiveBytes(pair.first, pair.second);
- }
- }
-
- MinorMarkCompactCollector* collector_;
- MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
- MinorMarkCompactCollector::MarkingState* marking_state_;
- YoungGenerationMarkingVisitor visitor_;
- std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
-};
-
-class BatchedRootMarkingItem : public MarkingItem {
- public:
- explicit BatchedRootMarkingItem(std::vector<Object*>&& objects)
- : objects_(objects) {}
- virtual ~BatchedRootMarkingItem() {}
-
- void Process(YoungGenerationMarkingTask* task) override {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "BatchedRootMarkingItem::Process");
- for (Object* object : objects_) {
- task->MarkObject(object);
- }
- }
-
- private:
- std::vector<Object*> objects_;
-};
-
-class PageMarkingItem : public MarkingItem {
- public:
- explicit PageMarkingItem(MemoryChunk* chunk,
- base::AtomicNumber<intptr_t>* global_slots)
- : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
- virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
-
- void Process(YoungGenerationMarkingTask* task) override {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "PageMarkingItem::Process");
- base::LockGuard<base::Mutex> guard(chunk_->mutex());
- MarkUntypedPointers(task);
- MarkTypedPointers(task);
- }
-
- private:
- inline Heap* heap() { return chunk_->heap(); }
-
- void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
- SlotSet::PREFREE_EMPTY_BUCKETS);
- }
-
- void MarkTypedPointers(YoungGenerationMarkingTask* task) {
- Isolate* isolate = heap()->isolate();
- RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_, [this, isolate, task](SlotType slot_type, Address host_addr,
- Address slot) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate, slot_type, slot, [this, task](Object** slot) {
- return CheckAndMarkObject(task,
- reinterpret_cast<Address>(slot));
- });
- });
- }
-
- SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
- Address slot_address) {
- Object* object = *reinterpret_cast<Object**>(slot_address);
- if (heap()->InNewSpace(object)) {
- // Marking happens before flipping the young generation, so the object
- // has to be in ToSpace.
- DCHECK(heap()->InToSpace(object));
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- task->MarkObject(heap_object);
- slots_++;
- return KEEP_SLOT;
- }
- return REMOVE_SLOT;
- }
-
- MemoryChunk* chunk_;
- base::AtomicNumber<intptr_t>* global_slots_;
- intptr_t slots_;
-};
-
-class GlobalHandlesMarkingItem : public MarkingItem {
- public:
- GlobalHandlesMarkingItem(GlobalHandles* global_handles, size_t start,
- size_t end)
- : global_handles_(global_handles), start_(start), end_(end) {}
- virtual ~GlobalHandlesMarkingItem() {}
-
- void Process(YoungGenerationMarkingTask* task) override {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "GlobalHandlesMarkingItem::Process");
- GlobalHandlesRootMarkingVisitor visitor(task);
- global_handles_
- ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
- &visitor, start_, end_);
- }
-
- private:
- class GlobalHandlesRootMarkingVisitor : public RootVisitor {
- public:
- explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
- : task_(task) {}
-
- void VisitRootPointer(Root root, const char* description,
- Object** p) override {
- DCHECK_EQ(Root::kGlobalHandles, root);
- task_->MarkObject(*p);
- }
-
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- DCHECK_EQ(Root::kGlobalHandles, root);
- for (Object** p = start; p < end; p++) {
- task_->MarkObject(*p);
- }
- }
-
- private:
- YoungGenerationMarkingTask* task_;
- };
-
- GlobalHandles* global_handles_;
- size_t start_;
- size_t end_;
-};
-
-MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
- : MarkCompactCollectorBase(heap),
- worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
- main_marking_visitor_(new YoungGenerationMarkingVisitor(
- heap, marking_state(), worklist_, kMainMarker)),
- page_parallel_job_semaphore_(0) {
- static_assert(
- kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
- "more marker tasks than marking deque can handle");
-}
-
-MinorMarkCompactCollector::~MinorMarkCompactCollector() {
- delete worklist_;
- delete main_marking_visitor_;
-}
-
-static bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
- DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
- return heap->InNewSpace(*p) && !heap->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->IsGrey(HeapObject::cast(*p));
-}
-
-template <class ParallelItem>
-static void SeedGlobalHandles(GlobalHandles* global_handles,
- ItemParallelJob* job) {
- // Create batches of global handles.
- const size_t kGlobalHandlesBufferSize = 1000;
- const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
- for (size_t start = 0; start < new_space_nodes;
- start += kGlobalHandlesBufferSize) {
- size_t end = start + kGlobalHandlesBufferSize;
- if (end > new_space_nodes) end = new_space_nodes;
- job->AddItem(new ParallelItem(global_handles, start, end));
- }
-}
-
-void MinorMarkCompactCollector::MarkRootSetInParallel() {
- base::AtomicNumber<intptr_t> slots;
- {
- ItemParallelJob job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- // Seed the root set (roots + old->new set).
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
- // Create batches of roots.
- RootMarkingVisitorSeedOnly<BatchedRootMarkingItem> root_seed_visitor(
- &job);
- heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
- // Create batches of global handles.
- SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
- &job);
- // Create items for each page.
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap(), [&job, &slots](MemoryChunk* chunk) {
- job.AddItem(new PageMarkingItem(chunk, &slots));
- });
- // Flush any remaining objects in the seeding visitor.
- root_seed_visitor.FlushObjects();
- }
-
- // Add tasks and run in parallel.
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
- const int new_space_pages =
- static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
- const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
- for (int i = 0; i < num_tasks; i++) {
- job.AddTask(
- new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
- }
- job.Run(isolate()->async_counters());
- DCHECK(worklist()->IsGlobalEmpty());
- }
- }
- old_to_new_slots_ = static_cast<int>(slots.Value());
-}
-
-void MinorMarkCompactCollector::MarkLiveObjects() {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
-
- PostponeInterruptsScope postpone(isolate());
-
- RootMarkingVisitor root_visitor(this);
-
- MarkRootSetInParallel();
-
- // Mark rest on the main thread.
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
- heap()->IterateEncounteredWeakCollections(&root_visitor);
- ProcessMarkingWorklist();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
- isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
- &IsUnmarkedObjectForYoungGeneration);
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(&root_visitor);
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
- &root_visitor, &IsUnmarkedObjectForYoungGeneration);
- ProcessMarkingWorklist();
- }
-}
-
-void MinorMarkCompactCollector::ProcessMarkingWorklist() {
- MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
- HeapObject* object = nullptr;
- while (marking_worklist.Pop(&object)) {
- DCHECK(!object->IsFiller());
- DCHECK(object->IsHeapObject());
- DCHECK(heap()->Contains(object));
- DCHECK(non_atomic_marking_state()->IsGrey(object));
- main_marking_visitor()->Visit(object);
- }
- DCHECK(marking_worklist.IsLocalEmpty());
-}
-
-void MinorMarkCompactCollector::CollectGarbage() {
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
- heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
- CleanupSweepToIteratePages();
- }
-
- MarkLiveObjects();
- ClearNonLiveReferences();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- YoungGenerationMarkingVerifier verifier(heap());
- verifier.Run();
- }
-#endif // VERIFY_HEAP
-
- Evacuate();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- YoungGenerationEvacuationVerifier verifier(heap());
- verifier.Run();
- }
-#endif // VERIFY_HEAP
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
- heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
- for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
- heap()->new_space()->FromSpaceEnd())) {
- DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
- non_atomic_marking_state()->ClearLiveness(p);
- if (FLAG_concurrent_marking) {
- // Ensure that concurrent marker does not track pages that are
- // going to be unmapped.
- heap()->concurrent_marking()->ClearLiveness(p);
- }
- }
- }
-
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap(), [](MemoryChunk* chunk) {
- if (chunk->SweepingDone()) {
- RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- } else {
- RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
- }
- });
-
- heap()->account_external_memory_concurrently_freed();
-}
-
-void MinorMarkCompactCollector::MakeIterable(
- Page* p, MarkingTreatmentMode marking_mode,
- FreeSpaceTreatmentMode free_space_mode) {
- // We have to clear the full collectors markbits for the areas that we
- // remove here.
- MarkCompactCollector* full_collector = heap()->mark_compact_collector();
- Address free_start = p->area_start();
- DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
-
- for (auto object_and_size :
- LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
- HeapObject* const object = object_and_size.first;
- DCHECK(non_atomic_marking_state()->IsGrey(object));
- Address free_end = object->address();
- if (free_end != free_start) {
- CHECK_GT(free_end, free_start);
- size_t size = static_cast<size_t>(free_end - free_start);
- full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
- p->AddressToMarkbitIndex(free_start),
- p->AddressToMarkbitIndex(free_end));
- if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xCC, size);
- }
- p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo);
- }
- Map* map = object->synchronized_map();
- int size = object->SizeFromMap(map);
- free_start = free_end + size;
- }
-
- if (free_start != p->area_end()) {
- CHECK_GT(p->area_end(), free_start);
- size_t size = static_cast<size_t>(p->area_end() - free_start);
- full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
- p->AddressToMarkbitIndex(free_start),
- p->AddressToMarkbitIndex(p->area_end()));
- if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xCC, size);
- }
- p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo);
- }
-
- if (marking_mode == MarkingTreatmentMode::CLEAR) {
- non_atomic_marking_state()->ClearLiveness(p);
- p->ClearFlag(Page::SWEEP_TO_ITERATE);
- }
-}
-
-void MinorMarkCompactCollector::ClearNonLiveReferences() {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
- // Internalized strings are always stored in old space, so there is no need
- // to clean them here.
- YoungGenerationExternalStringTableCleaner external_visitor(this);
- heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
- heap()->external_string_table_.CleanUpNewSpaceStrings();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
- // Process the weak references.
- MinorMarkCompactWeakObjectRetainer retainer(this);
- heap()->ProcessYoungWeakReferences(&retainer);
- }
-}
-
-void MinorMarkCompactCollector::EvacuatePrologue() {
- NewSpace* new_space = heap()->new_space();
- // Append the list of new space pages to be processed.
- for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
- new_space_evacuation_pages_.push_back(p);
- }
- new_space->Flip();
- new_space->ResetLinearAllocationArea();
-}
-
-void MinorMarkCompactCollector::EvacuateEpilogue() {
- heap()->new_space()->set_age_mark(heap()->new_space()->top());
- // Give pages that are queued to be freed back to the OS.
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
-}
-
-void MinorMarkCompactCollector::Evacuate() {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
- base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
- EvacuatePrologue();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
- EvacuatePagesInParallel();
- }
-
- UpdatePointersAfterEvacuation();
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
- if (!heap()->new_space()->Rebalance()) {
- FatalProcessOutOfMemory("NewSpace::Rebalance");
- }
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
- for (Page* p : new_space_evacuation_pages_) {
- if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
- p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
- p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- p->SetFlag(Page::SWEEP_TO_ITERATE);
- sweep_to_iterate_pages_.push_back(p);
- }
- }
- new_space_evacuation_pages_.clear();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
- EvacuateEpilogue();
- }
-}
-
void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
// The recursive GC marker detects when it is nearing stack overflow,
@@ -2430,7 +1688,6 @@ void MarkCompactCollector::MarkLiveObjects() {
}
}
-
void MarkCompactCollector::ClearNonLiveReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
@@ -2463,79 +1720,34 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// ClearFullMapTransitions must be called before WeakCells are cleared.
ClearFullMapTransitions();
}
- DependentCode* dependent_code_list;
- ClearWeakCellsAndSimpleMapTransitions(&dependent_code_list);
- MarkDependentCodeForDeoptimization(dependent_code_list);
+ ClearWeakCells();
+ ClearWeakReferences();
+ MarkDependentCodeForDeoptimization();
ClearWeakCollections();
DCHECK(weak_objects_.weak_cells.IsGlobalEmpty());
DCHECK(weak_objects_.transition_arrays.IsGlobalEmpty());
+ DCHECK(weak_objects_.weak_references.IsGlobalEmpty());
+ DCHECK(weak_objects_.weak_objects_in_code.IsGlobalEmpty());
}
-
-void MarkCompactCollector::MarkDependentCodeForDeoptimization(
- DependentCode* list_head) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
- Isolate* isolate = this->isolate();
- DependentCode* current = list_head;
- while (current->length() > 0) {
- have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
- isolate, DependentCode::kWeakCodeGroup);
- current = current->next_link();
- }
-
- {
- ArrayList* list = heap_->weak_new_space_object_to_code_list();
- int counter = 0;
- for (int i = 0; i < list->Length(); i += 2) {
- WeakCell* obj = WeakCell::cast(list->Get(i));
- WeakCell* dep = WeakCell::cast(list->Get(i + 1));
- if (obj->cleared() || dep->cleared()) {
- if (!dep->cleared()) {
- Code* code = Code::cast(dep->value());
- if (!code->marked_for_deoptimization()) {
- DependentCode::SetMarkedForDeoptimization(
- code, DependentCode::DependencyGroup::kWeakCodeGroup);
- code->InvalidateEmbeddedObjects();
- have_code_to_deoptimize_ = true;
- }
- }
- } else {
- // We record the slot manually because marking is finished at this
- // point and the write barrier would bailout.
- list->Set(counter, obj, SKIP_WRITE_BARRIER);
- RecordSlot(list, list->Slot(counter), obj);
- counter++;
- list->Set(counter, dep, SKIP_WRITE_BARRIER);
- RecordSlot(list, list->Slot(counter), dep);
- counter++;
- }
- }
- }
-
- WeakHashTable* table = heap_->weak_object_to_code_table();
- uint32_t capacity = table->Capacity();
- for (uint32_t i = 0; i < capacity; i++) {
- uint32_t key_index = table->EntryToIndex(i);
- Object* key = table->get(key_index);
- if (!table->IsKey(isolate, key)) continue;
- uint32_t value_index = table->EntryToValueIndex(i);
- Object* value = table->get(value_index);
- DCHECK(key->IsWeakCell());
- if (WeakCell::cast(key)->cleared()) {
- have_code_to_deoptimize_ |=
- DependentCode::cast(value)->MarkCodeForDeoptimization(
- isolate, DependentCode::kWeakCodeGroup);
- table->set(key_index, heap_->the_hole_value());
- table->set(value_index, heap_->the_hole_value());
- table->ElementRemoved();
+void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
+ std::pair<HeapObject*, Code*> weak_object_in_code;
+ while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
+ &weak_object_in_code)) {
+ HeapObject* object = weak_object_in_code.first;
+ Code* code = weak_object_in_code.second;
+ if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
+ !code->marked_for_deoptimization()) {
+ code->SetMarkedForDeoptimization("weak objects");
+ code->InvalidateEmbeddedObjects();
+ have_code_to_deoptimize_ = true;
}
}
}
-void MarkCompactCollector::ClearSimpleMapTransition(
- WeakCell* potential_transition, Map* dead_target) {
+void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) {
DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
Object* potential_parent = dead_target->constructor_or_backpointer();
if (potential_parent->IsMap()) {
@@ -2543,26 +1755,24 @@ void MarkCompactCollector::ClearSimpleMapTransition(
DisallowHeapAllocation no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
TransitionsAccessor(parent, &no_gc_obviously)
- .HasSimpleTransitionTo(potential_transition)) {
- ClearSimpleMapTransition(parent, dead_target);
+ .HasSimpleTransitionTo(dead_target)) {
+ ClearPotentialSimpleMapTransition(parent, dead_target);
}
}
}
-void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
- Map* dead_target) {
+void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* map,
+ Map* dead_target) {
DCHECK(!map->is_prototype_map());
DCHECK(!dead_target->is_prototype_map());
- // Clear the useless weak cell pointer, and take ownership of the descriptor
- // array.
- map->set_raw_transitions(Smi::kZero);
+ DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
+ // Take ownership of the descriptor array.
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
DescriptorArray* descriptors = map->instance_descriptors();
if (descriptors == dead_target->instance_descriptors() &&
number_of_own_descriptors > 0) {
TrimDescriptorArray(map, descriptors);
DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
- map->set_owns_descriptors(true);
}
}
@@ -2571,16 +1781,21 @@ void MarkCompactCollector::ClearFullMapTransitions() {
while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
int num_transitions = array->number_of_entries();
if (num_transitions > 0) {
- Map* map = array->GetTarget(0);
- DCHECK_NOT_NULL(map); // WeakCells aren't cleared yet.
- Map* parent = Map::cast(map->constructor_or_backpointer());
- bool parent_is_alive = non_atomic_marking_state()->IsBlackOrGrey(parent);
- DescriptorArray* descriptors =
- parent_is_alive ? parent->instance_descriptors() : nullptr;
- bool descriptors_owner_died =
- CompactTransitionArray(parent, array, descriptors);
- if (descriptors_owner_died) {
- TrimDescriptorArray(parent, descriptors);
+ Map* map;
+ // The array might contain "undefined" elements because it's not yet
+ // filled. Allow it.
+ if (array->GetTargetIfExists(0, isolate(), &map)) {
+ DCHECK_NOT_NULL(map); // WeakCells aren't cleared yet.
+ Map* parent = Map::cast(map->constructor_or_backpointer());
+ bool parent_is_alive =
+ non_atomic_marking_state()->IsBlackOrGrey(parent);
+ DescriptorArray* descriptors =
+ parent_is_alive ? parent->instance_descriptors() : nullptr;
+ bool descriptors_owner_died =
+ CompactTransitionArray(parent, array, descriptors);
+ if (descriptors_owner_died) {
+ TrimDescriptorArray(parent, descriptors);
+ }
}
}
}
@@ -2627,14 +1842,12 @@ bool MarkCompactCollector::CompactTransitionArray(
// array disappeared during GC.
int trim = transitions->Capacity() - transition_index;
if (trim > 0) {
- heap_->RightTrimFixedArray(transitions,
- trim * TransitionArray::kTransitionSize);
+ heap_->RightTrimFixedArray(transitions, trim * TransitionArray::kEntrySize);
transitions->SetNumberOfTransitions(transition_index);
}
return descriptors_owner_died;
}
-
void MarkCompactCollector::TrimDescriptorArray(Map* map,
DescriptorArray* descriptors) {
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
@@ -2664,7 +1877,6 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
map->set_owns_descriptors(true);
}
-
void MarkCompactCollector::TrimEnumCache(Map* map,
DescriptorArray* descriptors) {
int live_enum = map->EnumLength();
@@ -2685,7 +1897,6 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
heap_->RightTrimFixedArray(indices, to_trim);
}
-
void MarkCompactCollector::ProcessWeakCollections() {
MarkCompactMarkingVisitor visitor(this, marking_state());
Object* weak_collection_obj = heap()->encountered_weak_collections();
@@ -2716,7 +1927,6 @@ void MarkCompactCollector::ProcessWeakCollections() {
}
}
-
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
@@ -2739,7 +1949,6 @@ void MarkCompactCollector::ClearWeakCollections() {
heap()->set_encountered_weak_collections(Smi::kZero);
}
-
void MarkCompactCollector::AbortWeakCollections() {
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::kZero) {
@@ -2751,12 +1960,9 @@ void MarkCompactCollector::AbortWeakCollections() {
heap()->set_encountered_weak_collections(Smi::kZero);
}
-void MarkCompactCollector::ClearWeakCellsAndSimpleMapTransitions(
- DependentCode** dependent_code_list) {
+void MarkCompactCollector::ClearWeakCells() {
Heap* heap = this->heap();
TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
- DependentCode* dependent_code_head =
- DependentCode::cast(heap->empty_fixed_array());
WeakCell* weak_cell;
while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
// We do not insert cleared weak cells into the list, so the value
@@ -2782,20 +1988,6 @@ void MarkCompactCollector::ClearWeakCellsAndSimpleMapTransitions(
} else {
weak_cell->clear();
}
- } else if (value->IsMap()) {
- // The map is non-live.
- Map* map = Map::cast(value);
- // Add dependent code to the dependent_code_list.
- DependentCode* candidate = map->dependent_code();
- // We rely on the fact that the weak code group comes first.
- STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
- if (candidate->length() > 0 &&
- candidate->group() == DependentCode::kWeakCodeGroup) {
- candidate->set_next_link(dependent_code_head);
- dependent_code_head = candidate;
- }
- ClearSimpleMapTransition(weak_cell, map);
- weak_cell->clear();
} else {
// All other objects.
weak_cell->clear();
@@ -2806,12 +1998,35 @@ void MarkCompactCollector::ClearWeakCellsAndSimpleMapTransitions(
RecordSlot(weak_cell, slot, *slot);
}
}
- *dependent_code_list = dependent_code_head;
+}
+
+void MarkCompactCollector::ClearWeakReferences() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
+ std::pair<HeapObject*, HeapObjectReference**> slot;
+ while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
+ HeapObject* value;
+ HeapObjectReference** location = slot.second;
+ if ((*location)->ToWeakHeapObject(&value)) {
+ DCHECK(!value->IsCell());
+ if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
+ // The value of the weak reference is alive.
+ RecordSlot(slot.first, location, value);
+ } else {
+ if (value->IsMap()) {
+ // The map is non-live.
+ ClearPotentialSimpleMapTransition(Map::cast(value));
+ }
+ *location = HeapObjectReference::ClearedValue();
+ }
+ }
+ }
}
void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.weak_cells.Clear();
weak_objects_.transition_arrays.Clear();
+ weak_objects_.weak_references.Clear();
+ weak_objects_.weak_objects_in_code.Clear();
}
void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
@@ -2839,30 +2054,57 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
}
template <AccessMode access_mode>
-static inline SlotCallbackResult UpdateSlot(Object** slot) {
- Object* obj = *slot;
- if (obj->IsHeapObject()) {
- HeapObject* heap_obj = HeapObject::cast(obj);
- MapWord map_word = heap_obj->map_word();
- if (map_word.IsForwardingAddress()) {
- DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
- MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
- Page::FromAddress(heap_obj->address())
- ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
- HeapObject* target = map_word.ToForwardingAddress();
- if (access_mode == AccessMode::NON_ATOMIC) {
- *slot = target;
- } else {
- base::AsAtomicPointer::Release_CompareAndSwap(slot, obj, target);
- }
- DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
- DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
+static inline SlotCallbackResult UpdateSlot(
+ MaybeObject** slot, MaybeObject* old, HeapObject* heap_obj,
+ HeapObjectReferenceType reference_type) {
+ MapWord map_word = heap_obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
+ MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
+ Page::FromAddress(heap_obj->address())
+ ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ MaybeObject* target =
+ reference_type == HeapObjectReferenceType::WEAK
+ ? HeapObjectReference::Weak(map_word.ToForwardingAddress())
+ : HeapObjectReference::Strong(map_word.ToForwardingAddress());
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ *slot = target;
+ } else {
+ base::AsAtomicPointer::Release_CompareAndSwap(slot, old, target);
}
+ DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
+ DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
}
// OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
}
+template <AccessMode access_mode>
+static inline SlotCallbackResult UpdateSlot(MaybeObject** slot) {
+ MaybeObject* obj = base::AsAtomicPointer::Relaxed_Load(slot);
+ HeapObject* heap_obj;
+ if (obj->ToWeakHeapObject(&heap_obj)) {
+ UpdateSlot<access_mode>(slot, obj, heap_obj, HeapObjectReferenceType::WEAK);
+ } else if (obj->ToStrongHeapObject(&heap_obj)) {
+ return UpdateSlot<access_mode>(slot, obj, heap_obj,
+ HeapObjectReferenceType::STRONG);
+ }
+ return REMOVE_SLOT;
+}
+
+template <AccessMode access_mode>
+static inline SlotCallbackResult UpdateStrongSlot(MaybeObject** maybe_slot) {
+ DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrongHeapObject());
+ Object** slot = reinterpret_cast<Object**>(maybe_slot);
+ Object* obj = base::AsAtomicPointer::Relaxed_Load(slot);
+ if (obj->IsHeapObject()) {
+ HeapObject* heap_obj = HeapObject::cast(obj);
+ return UpdateSlot<access_mode>(maybe_slot, MaybeObject::FromObject(obj),
+ heap_obj, HeapObjectReferenceType::STRONG);
+ }
+ return REMOVE_SLOT;
+}
+
// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
// TODO(ulan): Remove code object specific functions. This visitor
@@ -2870,33 +2112,61 @@ static inline SlotCallbackResult UpdateSlot(Object** slot) {
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
void VisitPointer(HeapObject* host, Object** p) override {
+ UpdateStrongSlotInternal(p);
+ }
+
+ void VisitPointer(HeapObject* host, MaybeObject** p) override {
UpdateSlotInternal(p);
}
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
+ for (Object** p = start; p < end; p++) {
+ UpdateStrongSlotInternal(p);
+ }
+ }
+
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ for (MaybeObject** p = start; p < end; p++) {
+ UpdateSlotInternal(p);
+ }
}
void VisitRootPointer(Root root, const char* description,
Object** p) override {
- UpdateSlotInternal(p);
+ UpdateStrongSlotInternal(p);
}
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override {
- for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
+ for (Object** p = start; p < end; p++) UpdateStrongSlotInternal(p);
}
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlotInternal);
+ UpdateTypedSlotHelper::UpdateEmbeddedPointer(
+ rinfo, UpdateStrongMaybeObjectSlotInternal);
}
void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlotInternal);
+ UpdateTypedSlotHelper::UpdateCodeTarget(
+ rinfo, UpdateStrongMaybeObjectSlotInternal);
}
private:
- static inline SlotCallbackResult UpdateSlotInternal(Object** slot) {
+ static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
+ MaybeObject** slot) {
+ DCHECK(!(*slot)->IsWeakHeapObject());
+ DCHECK(!(*slot)->IsClearedWeakHeapObject());
+ return UpdateStrongSlotInternal(reinterpret_cast<Object**>(slot));
+ }
+
+ static inline SlotCallbackResult UpdateStrongSlotInternal(Object** slot) {
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(
+ reinterpret_cast<MaybeObject**>(slot));
+ }
+
+ static inline SlotCallbackResult UpdateSlotInternal(MaybeObject** slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
}
};
@@ -3144,82 +2414,6 @@ void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
}
}
-class YoungGenerationEvacuator : public Evacuator {
- public:
- YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
- RecordMigratedSlotVisitor* record_visitor)
- : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
-
- GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
- return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
- }
-
- protected:
- void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
-
- MinorMarkCompactCollector* collector_;
-};
-
-void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
- intptr_t* live_bytes) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "YoungGenerationEvacuator::RawEvacuatePage");
- MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
- collector_->non_atomic_marking_state();
- *live_bytes = marking_state->live_bytes(page);
- switch (ComputeEvacuationMode(page)) {
- case kObjectsNewToOld:
- LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_space_visitor_,
- LiveObjectVisitor::kClearMarkbits);
- // ArrayBufferTracker will be updated during pointers updating.
- break;
- case kPageNewToOld:
- LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_to_old_page_visitor_,
- LiveObjectVisitor::kKeepMarking);
- new_to_old_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
- // TODO(mlippautz): If cleaning array buffers is too slow here we can
- // delay it until the next GC.
- ArrayBufferTracker::FreeDead(page, marking_state);
- if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- ZAP_FREE_SPACE);
- } else if (heap()->incremental_marking()->IsMarking()) {
- // When incremental marking is on, we need to clear the mark bits of
- // the full collector. We cannot yet discard the young generation mark
- // bits as they are still relevant for pointers updating.
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- IGNORE_FREE_SPACE);
- }
- break;
- case kPageNewToNew:
- LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_to_new_page_visitor_,
- LiveObjectVisitor::kKeepMarking);
- new_to_new_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
- // TODO(mlippautz): If cleaning array buffers is too slow here we can
- // delay it until the next GC.
- ArrayBufferTracker::FreeDead(page, marking_state);
- if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- ZAP_FREE_SPACE);
- } else if (heap()->incremental_marking()->IsMarking()) {
- // When incremental marking is on, we need to clear the mark bits of
- // the full collector. We cannot yet discard the young generation mark
- // bits as they are still relevant for pointers updating.
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- IGNORE_FREE_SPACE);
- }
- break;
- case kObjectsOldToOld:
- UNREACHABLE();
- break;
- }
-}
-
class PageEvacuationItem : public ItemParallelJob::Item {
public:
explicit PageEvacuationItem(Page* page) : page_(page) {}
@@ -3265,7 +2459,8 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
const bool profiling =
heap()->isolate()->is_profiling() ||
heap()->isolate()->logger()->is_logging_code_events() ||
- heap()->isolate()->heap_profiler()->is_tracking_object_moves();
+ heap()->isolate()->heap_profiler()->is_tracking_object_moves() ||
+ heap()->has_heap_object_allocation_tracker();
ProfilingMigrationObserver profiling_observer(heap());
const int wanted_num_tasks =
@@ -3286,16 +2481,15 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
delete[] evacuators;
if (FLAG_trace_evacuation) {
- PrintIsolate(
- isolate(),
- "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
- "wanted_tasks=%d tasks=%d cores=%" PRIuS " live_bytes=%" V8PRIdPTR
- " compaction_speed=%.f\n",
- isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
- wanted_num_tasks, job->NumberOfTasks(),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() + 1,
- live_bytes, compaction_speed);
+ PrintIsolate(isolate(),
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
+ "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
+ " compaction_speed=%.f\n",
+ isolate()->time_millis_since_init(),
+ FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
+ wanted_num_tasks, job->NumberOfTasks(),
+ V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
+ live_bytes, compaction_speed);
}
}
@@ -3343,34 +2537,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
PostProcessEvacuationCandidates();
}
-void MinorMarkCompactCollector::EvacuatePagesInParallel() {
- ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
- intptr_t live_bytes = 0;
-
- for (Page* page : new_space_evacuation_pages_) {
- intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
- if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
- live_bytes += live_bytes_on_page;
- if (ShouldMovePage(page, live_bytes_on_page)) {
- if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
- EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
- } else {
- EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
- }
- }
- evacuation_job.AddItem(new PageEvacuationItem(page));
- }
- if (evacuation_job.NumberOfItems() == 0) return;
-
- YoungGenerationMigrationObserver observer(heap(),
- heap()->mark_compact_collector());
- YoungGenerationRecordMigratedSlotVisitor record_visitor(
- heap()->mark_compact_collector());
- CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- this, &evacuation_job, &record_visitor, &observer, live_bytes);
-}
-
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
@@ -3495,7 +2661,7 @@ void MarkCompactCollector::Evacuate() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->Rebalance()) {
- FatalProcessOutOfMemory("NewSpace::Rebalance");
+ heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
}
}
@@ -3604,7 +2770,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
int size = object->SizeFromMap(map);
- object->IterateBody(map->instance_type(), size, &visitor);
+ object->IterateBodyFast(map, size, &visitor);
cur += size;
}
}
@@ -3649,28 +2815,34 @@ class RememberedSetUpdatingItem : public UpdatingItem {
private:
inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- if (heap_->InFromSpace(*slot)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
- DCHECK(heap_object->IsHeapObject());
+ MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
+ HeapObject* heap_object;
+ if (!(*slot)->ToStrongOrWeakHeapObject(&heap_object)) {
+ return REMOVE_SLOT;
+ }
+ if (heap_->InFromSpace(heap_object)) {
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
- *slot = map_word.ToForwardingAddress();
+ HeapObjectReference::Update(
+ reinterpret_cast<HeapObjectReference**>(slot),
+ map_word.ToForwardingAddress());
}
+ bool success = (*slot)->ToStrongOrWeakHeapObject(&heap_object);
+ USE(success);
+ DCHECK(success);
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
- if (heap_->InToSpace(*slot)) {
+ if (heap_->InToSpace(heap_object)) {
return KEEP_SLOT;
}
- } else if (heap_->InToSpace(*slot)) {
+ } else if (heap_->InToSpace(heap_object)) {
// Slots can point to "to" space if the page has been moved, or if the
// slot has been recorded multiple times in the remembered set, or
// if the slot was already updated during old->old updating.
// In case the page has been moved, check markbits to determine liveness
// of the slot. In the other case, the slot can just be kept.
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
if (Page::FromAddress(heap_object->address())
->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// IsBlackOrGrey is required because objects are marked as grey for
@@ -3684,7 +2856,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
return KEEP_SLOT;
} else {
- DCHECK(!heap_->InNewSpace(*slot));
+ DCHECK(!heap_->InNewSpace(heap_object));
}
return REMOVE_SLOT;
}
@@ -3704,7 +2876,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
[&filter](Address slot) {
if (!filter.IsValid(slot)) return REMOVE_SLOT;
return UpdateSlot<AccessMode::NON_ATOMIC>(
- reinterpret_cast<Object**>(slot));
+ reinterpret_cast<MaybeObject**>(slot));
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
@@ -3732,7 +2904,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
chunk_,
[isolate, this](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate, slot_type, slot, [this](Object** slot) {
+ isolate, slot_type, slot, [this](MaybeObject** slot) {
return CheckAndUpdateOldToNewSlot(
reinterpret_cast<Address>(slot));
});
@@ -3745,8 +2917,11 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSet<OLD_TO_OLD>::IterateTyped(
chunk_,
[isolate](SlotType slot_type, Address host_addr, Address slot) {
+ // Using UpdateStrongSlot is OK here, because there are no weak
+ // typed slots.
return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate, slot_type, slot, UpdateSlot<AccessMode::NON_ATOMIC>);
+ isolate, slot_type, slot,
+ UpdateStrongSlot<AccessMode::NON_ATOMIC>);
});
}
}
@@ -3757,24 +2932,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSetUpdatingMode updating_mode_;
};
-UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
- MemoryChunk* chunk, Address start, Address end) {
- return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
- chunk, start, end, non_atomic_marking_state());
-}
-
UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
MemoryChunk* chunk, Address start, Address end) {
return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
chunk, start, end, non_atomic_marking_state());
}
-UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
- MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
- return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
- heap(), non_atomic_marking_state(), chunk, updating_mode);
-}
-
UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
@@ -3878,21 +3041,6 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
return pages;
}
-int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
- ItemParallelJob* job) {
- int pages = 0;
- for (Page* p : new_space_evacuation_pages_) {
- if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
- if (p->local_tracker() == nullptr) continue;
-
- pages++;
- job->AddItem(new ArrayBufferTrackerUpdatingItem(
- p, ArrayBufferTrackerUpdatingItem::kRegular));
- }
- }
- return pages;
-}
-
int MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
ItemParallelJob* job) {
int pages = 0;
@@ -4017,69 +3165,6 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
}
-void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
-
- PointersUpdatingVisitor updating_visitor;
- ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- CollectNewSpaceArrayBufferTrackerItems(&updating_job);
- // Create batches of global handles.
- SeedGlobalHandles<GlobalHandlesUpdatingItem>(isolate()->global_handles(),
- &updating_job);
- const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
- int remembered_set_pages = 0;
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->old_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->code_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->map_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->lo_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- const int remembered_set_tasks =
- remembered_set_pages == 0 ? 0
- : NumberOfParallelPointerUpdateTasks(
- remembered_set_pages, old_to_new_slots_);
- const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
- for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::BackgroundScope::
- MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
- }
-
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
- heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
- }
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
- updating_job.Run(isolate()->async_counters());
- heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
- }
-
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
-
- EvacuationWeakObjectRetainer evacuation_object_retainer;
- heap()->ProcessWeakListRoots(&evacuation_object_retainer);
-
- // Update pointers from external string table.
- heap()->UpdateNewSpaceReferencesInExternalStringTable(
- &UpdateReferenceInExternalStringTableEntry);
- heap()->IterateEncounteredWeakCollections(&updating_visitor);
- }
-}
-
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
HeapObject* failed_object, Page* page) {
base::LockGuard<base::Mutex> guard(&mutex_);
@@ -4225,5 +3310,1062 @@ void MarkCompactCollector::StartSweepSpaces() {
}
}
+#ifdef ENABLE_MINOR_MC
+
+namespace {
+
+#ifdef VERIFY_HEAP
+
+class YoungGenerationMarkingVerifier : public MarkingVerifier {
+ public:
+ explicit YoungGenerationMarkingVerifier(Heap* heap)
+ : MarkingVerifier(heap),
+ marking_state_(
+ heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
+
+ Bitmap* bitmap(const MemoryChunk* chunk) override {
+ return marking_state_->bitmap(chunk);
+ }
+
+ bool IsMarked(HeapObject* object) override {
+ return marking_state_->IsGrey(object);
+ }
+
+ bool IsBlackOrGrey(HeapObject* object) override {
+ return marking_state_->IsBlackOrGrey(object);
+ }
+
+ void Run() override {
+ VerifyRoots(VISIT_ALL_IN_SCAVENGE);
+ VerifyMarking(heap_->new_space());
+ }
+
+ void VerifyPointers(Object** start, Object** end) override {
+ for (Object** current = start; current < end; current++) {
+ DCHECK(!HasWeakHeapObjectTag(*current));
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ if (!heap_->InNewSpace(object)) return;
+ CHECK(IsMarked(object));
+ }
+ }
+ }
+
+ void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ // Minor MC treats weak references as strong.
+ if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ if (!heap_->InNewSpace(object)) {
+ continue;
+ }
+ CHECK(IsMarked(object));
+ }
+ }
+ }
+
+ private:
+ MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
+};
+
+class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
+ public:
+ explicit YoungGenerationEvacuationVerifier(Heap* heap)
+ : EvacuationVerifier(heap) {}
+
+ void Run() override {
+ VerifyRoots(VISIT_ALL_IN_SCAVENGE);
+ VerifyEvacuation(heap_->new_space());
+ VerifyEvacuation(heap_->old_space());
+ VerifyEvacuation(heap_->code_space());
+ VerifyEvacuation(heap_->map_space());
+ }
+
+ protected:
+ void VerifyPointers(Object** start, Object** end) override {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
+ }
+ }
+ }
+ void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
+ }
+ }
+ }
+};
+
+#endif // VERIFY_HEAP
+
+template <class ParallelItem>
+void SeedGlobalHandles(GlobalHandles* global_handles, ItemParallelJob* job) {
+ // Create batches of global handles.
+ const size_t kGlobalHandlesBufferSize = 1000;
+ const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
+ for (size_t start = 0; start < new_space_nodes;
+ start += kGlobalHandlesBufferSize) {
+ size_t end = start + kGlobalHandlesBufferSize;
+ if (end > new_space_nodes) end = new_space_nodes;
+ job->AddItem(new ParallelItem(global_handles, start, end));
+ }
+}
+
+bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
+ DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
+ return heap->InNewSpace(*p) && !heap->minor_mark_compact_collector()
+ ->non_atomic_marking_state()
+ ->IsGrey(HeapObject::cast(*p));
+}
+
+} // namespace
+
+class YoungGenerationMarkingVisitor final
+ : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
+ public:
+ YoungGenerationMarkingVisitor(
+ Heap* heap, MinorMarkCompactCollector::MarkingState* marking_state,
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
+ : heap_(heap),
+ worklist_(global_worklist, task_id),
+ marking_state_(marking_state) {}
+
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
+ for (Object** p = start; p < end; p++) {
+ VisitPointer(host, p);
+ }
+ }
+
+ V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ for (MaybeObject** p = start; p < end; p++) {
+ VisitPointer(host, p);
+ }
+ }
+
+ V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
+ Object* target = *slot;
+ DCHECK(!HasWeakHeapObjectTag(target));
+ if (heap_->InNewSpace(target)) {
+ HeapObject* target_object = HeapObject::cast(target);
+ MarkObjectViaMarkingWorklist(target_object);
+ }
+ }
+
+ V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** slot) final {
+ MaybeObject* target = *slot;
+ if (heap_->InNewSpace(target)) {
+ HeapObject* target_object;
+ // Treat weak references as strong. TODO(marja): Proper weakness handling
+ // for minor-mcs.
+ if (target->ToStrongOrWeakHeapObject(&target_object)) {
+ MarkObjectViaMarkingWorklist(target_object);
+ }
+ }
+ }
+
+ private:
+ inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
+ if (marking_state_->WhiteToGrey(object)) {
+ // Marking deque overflow is unsupported for the young generation.
+ CHECK(worklist_.Push(object));
+ }
+ }
+
+ Heap* heap_;
+ MinorMarkCompactCollector::MarkingWorklist::View worklist_;
+ MinorMarkCompactCollector::MarkingState* marking_state_;
+};
+
+void MinorMarkCompactCollector::SetUp() {}
+
+void MinorMarkCompactCollector::TearDown() {}
+
+MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
+ : MarkCompactCollectorBase(heap),
+ worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
+ main_marking_visitor_(new YoungGenerationMarkingVisitor(
+ heap, marking_state(), worklist_, kMainMarker)),
+ page_parallel_job_semaphore_(0) {
+ static_assert(
+ kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
+ "more marker tasks than marking deque can handle");
+}
+
+MinorMarkCompactCollector::~MinorMarkCompactCollector() {
+ delete worklist_;
+ delete main_marking_visitor_;
+}
+
+int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
+ DCHECK_GT(pages, 0);
+ if (!FLAG_minor_mc_parallel_marking) return 1;
+ // Pages are not private to markers but we can still use them to estimate the
+ // amount of marking that is required.
+ const int kPagesPerTask = 2;
+ const int wanted_tasks = Max(1, pages / kPagesPerTask);
+ return Min(NumberOfAvailableCores(), Min(wanted_tasks, kNumMarkers));
+}
+
+void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
+ for (Page* p : sweep_to_iterate_pages_) {
+ if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
+ p->ClearFlag(Page::SWEEP_TO_ITERATE);
+ non_atomic_marking_state()->ClearLiveness(p);
+ }
+ }
+ sweep_to_iterate_pages_.clear();
+}
+
+class YoungGenerationMigrationObserver final : public MigrationObserver {
+ public:
+ YoungGenerationMigrationObserver(Heap* heap,
+ MarkCompactCollector* mark_compact_collector)
+ : MigrationObserver(heap),
+ mark_compact_collector_(mark_compact_collector) {}
+
+ inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ int size) final {
+ // Migrate color to old generation marking in case the object survived young
+ // generation garbage collection.
+ if (heap_->incremental_marking()->IsMarking()) {
+ DCHECK(
+ heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
+ heap_->incremental_marking()->TransferColor(src, dst);
+ }
+ }
+
+ protected:
+ base::Mutex mutex_;
+ MarkCompactCollector* mark_compact_collector_;
+};
+
+class YoungGenerationRecordMigratedSlotVisitor final
+ : public RecordMigratedSlotVisitor {
+ public:
+ explicit YoungGenerationRecordMigratedSlotVisitor(
+ MarkCompactCollector* collector)
+ : RecordMigratedSlotVisitor(collector) {}
+
+ void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
+ void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
+ UNREACHABLE();
+ }
+
+ private:
+ // Only record slots for host objects that are considered as live by the full
+ // collector.
+ inline bool IsLive(HeapObject* object) {
+ return collector_->non_atomic_marking_state()->IsBlack(object);
+ }
+
+ inline void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
+ Address slot) final {
+ if (value->IsStrongOrWeakHeapObject()) {
+ Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+ if (p->InNewSpace()) {
+ DCHECK_IMPLIES(p->InToSpace(),
+ p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
+ } else if (p->IsEvacuationCandidate() && IsLive(host)) {
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
+ }
+ }
+ }
+};
+
+void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
+
+ PointersUpdatingVisitor updating_visitor;
+ ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ CollectNewSpaceArrayBufferTrackerItems(&updating_job);
+ // Create batches of global handles.
+ SeedGlobalHandles<GlobalHandlesUpdatingItem>(isolate()->global_handles(),
+ &updating_job);
+ const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
+ int remembered_set_pages = 0;
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->old_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->code_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->map_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ const int remembered_set_tasks =
+ remembered_set_pages == 0 ? 0
+ : NumberOfParallelPointerUpdateTasks(
+ remembered_set_pages, old_to_new_slots_);
+ const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
+ for (int i = 0; i < num_tasks; i++) {
+ updating_job.AddTask(new PointersUpdatingTask(
+ isolate(), GCTracer::BackgroundScope::
+ MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
+ }
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
+ heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
+ }
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
+ updating_job.Run(isolate()->async_counters());
+ heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
+
+ EvacuationWeakObjectRetainer evacuation_object_retainer;
+ heap()->ProcessWeakListRoots(&evacuation_object_retainer);
+
+ // Update pointers from external string table.
+ heap()->UpdateNewSpaceReferencesInExternalStringTable(
+ &UpdateReferenceInExternalStringTableEntry);
+ heap()->IterateEncounteredWeakCollections(&updating_visitor);
+ }
+}
+
+class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
+ public:
+ explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
+ : collector_(collector),
+ marking_state_(collector_->non_atomic_marking_state()) {}
+
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
+ MarkObjectByPointer(p);
+ }
+
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ }
+
+ private:
+ void MarkObjectByPointer(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* object = HeapObject::cast(*p);
+
+ if (!collector_->heap()->InNewSpace(object)) return;
+
+ if (marking_state_->WhiteToGrey(object)) {
+ collector_->main_marking_visitor()->Visit(object);
+ collector_->ProcessMarkingWorklist();
+ }
+ }
+
+ MinorMarkCompactCollector* collector_;
+ MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
+};
+
+void MinorMarkCompactCollector::CollectGarbage() {
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
+ heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
+ CleanupSweepToIteratePages();
+ }
+
+ MarkLiveObjects();
+ ClearNonLiveReferences();
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ YoungGenerationMarkingVerifier verifier(heap());
+ verifier.Run();
+ }
+#endif // VERIFY_HEAP
+
+ Evacuate();
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ YoungGenerationEvacuationVerifier verifier(heap());
+ verifier.Run();
+ }
+#endif // VERIFY_HEAP
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
+ heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
+ for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
+ heap()->new_space()->FromSpaceEnd())) {
+ DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
+ non_atomic_marking_state()->ClearLiveness(p);
+ if (FLAG_concurrent_marking) {
+ // Ensure that concurrent marker does not track pages that are
+ // going to be unmapped.
+ heap()->concurrent_marking()->ClearLiveness(p);
+ }
+ }
+ }
+
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [](MemoryChunk* chunk) {
+ if (chunk->SweepingDone()) {
+ RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
+ } else {
+ RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
+ }
+ });
+
+ heap()->account_external_memory_concurrently_freed();
+}
+
+void MinorMarkCompactCollector::MakeIterable(
+ Page* p, MarkingTreatmentMode marking_mode,
+ FreeSpaceTreatmentMode free_space_mode) {
+ // We have to clear the full collectors markbits for the areas that we
+ // remove here.
+ MarkCompactCollector* full_collector = heap()->mark_compact_collector();
+ Address free_start = p->area_start();
+ DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
+
+ for (auto object_and_size :
+ LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
+ HeapObject* const object = object_and_size.first;
+ DCHECK(non_atomic_marking_state()->IsGrey(object));
+ Address free_end = object->address();
+ if (free_end != free_start) {
+ CHECK_GT(free_end, free_start);
+ size_t size = static_cast<size_t>(free_end - free_start);
+ full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
+ p->AddressToMarkbitIndex(free_start),
+ p->AddressToMarkbitIndex(free_end));
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xCC, size);
+ }
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
+ ClearRecordedSlots::kNo);
+ }
+ Map* map = object->synchronized_map();
+ int size = object->SizeFromMap(map);
+ free_start = free_end + size;
+ }
+
+ if (free_start != p->area_end()) {
+ CHECK_GT(p->area_end(), free_start);
+ size_t size = static_cast<size_t>(p->area_end() - free_start);
+ full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
+ p->AddressToMarkbitIndex(free_start),
+ p->AddressToMarkbitIndex(p->area_end()));
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xCC, size);
+ }
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
+ ClearRecordedSlots::kNo);
+ }
+
+ if (marking_mode == MarkingTreatmentMode::CLEAR) {
+ non_atomic_marking_state()->ClearLiveness(p);
+ p->ClearFlag(Page::SWEEP_TO_ITERATE);
+ }
+}
+
+namespace {
+
+// Helper class for pruning the string table.
+class YoungGenerationExternalStringTableCleaner : public RootVisitor {
+ public:
+ YoungGenerationExternalStringTableCleaner(
+ MinorMarkCompactCollector* collector)
+ : heap_(collector->heap()),
+ marking_state_(collector->non_atomic_marking_state()) {}
+
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
+ DCHECK_EQ(static_cast<int>(root),
+ static_cast<int>(Root::kExternalStringsTable));
+ // Visit all HeapObject pointers in [start, end).
+ for (Object** p = start; p < end; p++) {
+ Object* o = *p;
+ if (o->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(o);
+ if (marking_state_->IsWhite(heap_object)) {
+ if (o->IsExternalString()) {
+ heap_->FinalizeExternalString(String::cast(*p));
+ } else {
+ // The original external string may have been internalized.
+ DCHECK(o->IsThinString());
+ }
+ // Set the entry to the_hole_value (as deleted).
+ *p = heap_->the_hole_value();
+ }
+ }
+ }
+ }
+
+ private:
+ Heap* heap_;
+ MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
+};
+
+// Marked young generation objects and all old generation objects will be
+// retained.
+class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+ explicit MinorMarkCompactWeakObjectRetainer(
+ MinorMarkCompactCollector* collector)
+ : heap_(collector->heap()),
+ marking_state_(collector->non_atomic_marking_state()) {}
+
+ virtual Object* RetainAs(Object* object) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (!heap_->InNewSpace(heap_object)) return object;
+
+ // Young generation marking only marks to grey instead of black.
+ DCHECK(!marking_state_->IsBlack(heap_object));
+ if (marking_state_->IsGrey(heap_object)) {
+ return object;
+ }
+ return nullptr;
+ }
+
+ private:
+ Heap* heap_;
+ MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
+};
+
+} // namespace
+
+void MinorMarkCompactCollector::ClearNonLiveReferences() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
+ // Internalized strings are always stored in old space, so there is no need
+ // to clean them here.
+ YoungGenerationExternalStringTableCleaner external_visitor(this);
+ heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
+ heap()->external_string_table_.CleanUpNewSpaceStrings();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
+ // Process the weak references.
+ MinorMarkCompactWeakObjectRetainer retainer(this);
+ heap()->ProcessYoungWeakReferences(&retainer);
+ }
+}
+
+void MinorMarkCompactCollector::EvacuatePrologue() {
+ NewSpace* new_space = heap()->new_space();
+ // Append the list of new space pages to be processed.
+ for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
+ new_space_evacuation_pages_.push_back(p);
+ }
+ new_space->Flip();
+ new_space->ResetLinearAllocationArea();
+}
+
+void MinorMarkCompactCollector::EvacuateEpilogue() {
+ heap()->new_space()->set_age_mark(heap()->new_space()->top());
+ // Give pages that are queued to be freed back to the OS.
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+}
+
+UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
+ MemoryChunk* chunk, Address start, Address end) {
+ return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
+ chunk, start, end, non_atomic_marking_state());
+}
+
+UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
+ MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
+ return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
+ heap(), non_atomic_marking_state(), chunk, updating_mode);
+}
+
+class MarkingItem;
+class GlobalHandlesMarkingItem;
+class PageMarkingItem;
+class RootMarkingItem;
+class YoungGenerationMarkingTask;
+
+class MarkingItem : public ItemParallelJob::Item {
+ public:
+ virtual ~MarkingItem() {}
+ virtual void Process(YoungGenerationMarkingTask* task) = 0;
+};
+
+class YoungGenerationMarkingTask : public ItemParallelJob::Task {
+ public:
+ YoungGenerationMarkingTask(
+ Isolate* isolate, MinorMarkCompactCollector* collector,
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
+ : ItemParallelJob::Task(isolate),
+ collector_(collector),
+ marking_worklist_(global_worklist, task_id),
+ marking_state_(collector->marking_state()),
+ visitor_(isolate->heap(), marking_state_, global_worklist, task_id) {
+ local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
+ Page::kPageSize);
+ }
+
+ void RunInParallel() override {
+ TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
+ double marking_time = 0.0;
+ {
+ TimedScope scope(&marking_time);
+ MarkingItem* item = nullptr;
+ while ((item = GetItem<MarkingItem>()) != nullptr) {
+ item->Process(this);
+ item->MarkFinished();
+ EmptyLocalMarkingWorklist();
+ }
+ EmptyMarkingWorklist();
+ DCHECK(marking_worklist_.IsLocalEmpty());
+ FlushLiveBytes();
+ }
+ if (FLAG_trace_minor_mc_parallel_marking) {
+ PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
+ static_cast<void*>(this), marking_time);
+ }
+ };
+
+ void MarkObject(Object* object) {
+ if (!collector_->heap()->InNewSpace(object)) return;
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (marking_state_->WhiteToGrey(heap_object)) {
+ const int size = visitor_.Visit(heap_object);
+ IncrementLiveBytes(heap_object, size);
+ }
+ }
+
+ private:
+ void EmptyLocalMarkingWorklist() {
+ HeapObject* object = nullptr;
+ while (marking_worklist_.Pop(&object)) {
+ const int size = visitor_.Visit(object);
+ IncrementLiveBytes(object, size);
+ }
+ }
+
+ void EmptyMarkingWorklist() {
+ HeapObject* object = nullptr;
+ while (marking_worklist_.Pop(&object)) {
+ const int size = visitor_.Visit(object);
+ IncrementLiveBytes(object, size);
+ }
+ }
+
+ void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
+ local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
+ bytes;
+ }
+
+ void FlushLiveBytes() {
+ for (auto pair : local_live_bytes_) {
+ marking_state_->IncrementLiveBytes(pair.first, pair.second);
+ }
+ }
+
+ MinorMarkCompactCollector* collector_;
+ MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
+ MinorMarkCompactCollector::MarkingState* marking_state_;
+ YoungGenerationMarkingVisitor visitor_;
+ std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
+};
+
+class BatchedRootMarkingItem : public MarkingItem {
+ public:
+ explicit BatchedRootMarkingItem(std::vector<Object*>&& objects)
+ : objects_(objects) {}
+ virtual ~BatchedRootMarkingItem() {}
+
+ void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "BatchedRootMarkingItem::Process");
+ for (Object* object : objects_) {
+ task->MarkObject(object);
+ }
+ }
+
+ private:
+ std::vector<Object*> objects_;
+};
+
+class PageMarkingItem : public MarkingItem {
+ public:
+ explicit PageMarkingItem(MemoryChunk* chunk,
+ base::AtomicNumber<intptr_t>* global_slots)
+ : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
+ virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
+
+ void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "PageMarkingItem::Process");
+ base::LockGuard<base::Mutex> guard(chunk_->mutex());
+ MarkUntypedPointers(task);
+ MarkTypedPointers(task);
+ }
+
+ private:
+ inline Heap* heap() { return chunk_->heap(); }
+
+ void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ }
+
+ void MarkTypedPointers(YoungGenerationMarkingTask* task) {
+ Isolate* isolate = heap()->isolate();
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ chunk_, [this, isolate, task](SlotType slot_type, Address host_addr,
+ Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate, slot_type, slot, [this, task](MaybeObject** slot) {
+ return CheckAndMarkObject(task,
+ reinterpret_cast<Address>(slot));
+ });
+ });
+ }
+
+ SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
+ Address slot_address) {
+ MaybeObject* object = *reinterpret_cast<MaybeObject**>(slot_address);
+ if (heap()->InNewSpace(object)) {
+ // Marking happens before flipping the young generation, so the object
+ // has to be in ToSpace.
+ DCHECK(heap()->InToSpace(object));
+ HeapObject* heap_object;
+ bool success = object->ToStrongOrWeakHeapObject(&heap_object);
+ USE(success);
+ DCHECK(success);
+ task->MarkObject(heap_object);
+ slots_++;
+ return KEEP_SLOT;
+ }
+ return REMOVE_SLOT;
+ }
+
+ MemoryChunk* chunk_;
+ base::AtomicNumber<intptr_t>* global_slots_;
+ intptr_t slots_;
+};
+
+class GlobalHandlesMarkingItem : public MarkingItem {
+ public:
+ GlobalHandlesMarkingItem(GlobalHandles* global_handles, size_t start,
+ size_t end)
+ : global_handles_(global_handles), start_(start), end_(end) {}
+ virtual ~GlobalHandlesMarkingItem() {}
+
+ void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "GlobalHandlesMarkingItem::Process");
+ GlobalHandlesRootMarkingVisitor visitor(task);
+ global_handles_
+ ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
+ &visitor, start_, end_);
+ }
+
+ private:
+ class GlobalHandlesRootMarkingVisitor : public RootVisitor {
+ public:
+ explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
+ : task_(task) {}
+
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
+ DCHECK_EQ(Root::kGlobalHandles, root);
+ task_->MarkObject(*p);
+ }
+
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
+ DCHECK_EQ(Root::kGlobalHandles, root);
+ for (Object** p = start; p < end; p++) {
+ task_->MarkObject(*p);
+ }
+ }
+
+ private:
+ YoungGenerationMarkingTask* task_;
+ };
+
+ GlobalHandles* global_handles_;
+ size_t start_;
+ size_t end_;
+};
+
+void MinorMarkCompactCollector::MarkRootSetInParallel() {
+ base::AtomicNumber<intptr_t> slots;
+ {
+ ItemParallelJob job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ // Seed the root set (roots + old->new set).
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
+ // Create batches of roots.
+ RootMarkingVisitorSeedOnly<BatchedRootMarkingItem> root_seed_visitor(
+ &job);
+ heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
+ // Create batches of global handles.
+ SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
+ &job);
+ // Create items for each page.
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [&job, &slots](MemoryChunk* chunk) {
+ job.AddItem(new PageMarkingItem(chunk, &slots));
+ });
+ // Flush any remaining objects in the seeding visitor.
+ root_seed_visitor.FlushObjects();
+ }
+
+ // Add tasks and run in parallel.
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
+ const int new_space_pages =
+ static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
+ const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
+ for (int i = 0; i < num_tasks; i++) {
+ job.AddTask(
+ new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
+ }
+ job.Run(isolate()->async_counters());
+ DCHECK(worklist()->IsGlobalEmpty());
+ }
+ }
+ old_to_new_slots_ = static_cast<int>(slots.Value());
+}
+
+void MinorMarkCompactCollector::MarkLiveObjects() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
+
+ PostponeInterruptsScope postpone(isolate());
+
+ RootMarkingVisitor root_visitor(this);
+
+ MarkRootSetInParallel();
+
+ // Mark rest on the main thread.
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
+ heap()->IterateEncounteredWeakCollections(&root_visitor);
+ ProcessMarkingWorklist();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
+ isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnmarkedObjectForYoungGeneration);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(&root_visitor);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ &root_visitor, &IsUnmarkedObjectForYoungGeneration);
+ ProcessMarkingWorklist();
+ }
+}
+
+void MinorMarkCompactCollector::ProcessMarkingWorklist() {
+ MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
+ HeapObject* object = nullptr;
+ while (marking_worklist.Pop(&object)) {
+ DCHECK(!object->IsFiller());
+ DCHECK(object->IsHeapObject());
+ DCHECK(heap()->Contains(object));
+ DCHECK(non_atomic_marking_state()->IsGrey(object));
+ main_marking_visitor()->Visit(object);
+ }
+ DCHECK(marking_worklist.IsLocalEmpty());
+}
+
+void MinorMarkCompactCollector::Evacuate() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
+ base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
+ EvacuatePrologue();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
+ EvacuatePagesInParallel();
+ }
+
+ UpdatePointersAfterEvacuation();
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
+ if (!heap()->new_space()->Rebalance()) {
+ heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
+ }
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
+ for (Page* p : new_space_evacuation_pages_) {
+ if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
+ p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+ p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ p->SetFlag(Page::SWEEP_TO_ITERATE);
+ sweep_to_iterate_pages_.push_back(p);
+ }
+ }
+ new_space_evacuation_pages_.clear();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
+ EvacuateEpilogue();
+ }
+}
+
+namespace {
+
+class YoungGenerationEvacuator : public Evacuator {
+ public:
+ YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
+ RecordMigratedSlotVisitor* record_visitor)
+ : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
+
+ GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
+ return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
+ }
+
+ protected:
+ void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+
+ MinorMarkCompactCollector* collector_;
+};
+
+void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
+ intptr_t* live_bytes) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "YoungGenerationEvacuator::RawEvacuatePage");
+ MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
+ collector_->non_atomic_marking_state();
+ *live_bytes = marking_state->live_bytes(page);
+ switch (ComputeEvacuationMode(page)) {
+ case kObjectsNewToOld:
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
+ page, marking_state, &new_space_visitor_,
+ LiveObjectVisitor::kClearMarkbits);
+ // ArrayBufferTracker will be updated during pointers updating.
+ break;
+ case kPageNewToOld:
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
+ page, marking_state, &new_to_old_page_visitor_,
+ LiveObjectVisitor::kKeepMarking);
+ new_to_old_page_visitor_.account_moved_bytes(
+ marking_state->live_bytes(page));
+ // TODO(mlippautz): If cleaning array buffers is too slow here we can
+ // delay it until the next GC.
+ ArrayBufferTracker::FreeDead(page, marking_state);
+ if (heap()->ShouldZapGarbage()) {
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
+ break;
+ case kPageNewToNew:
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
+ page, marking_state, &new_to_new_page_visitor_,
+ LiveObjectVisitor::kKeepMarking);
+ new_to_new_page_visitor_.account_moved_bytes(
+ marking_state->live_bytes(page));
+ // TODO(mlippautz): If cleaning array buffers is too slow here we can
+ // delay it until the next GC.
+ ArrayBufferTracker::FreeDead(page, marking_state);
+ if (heap()->ShouldZapGarbage()) {
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
+ break;
+ case kObjectsOldToOld:
+ UNREACHABLE();
+ break;
+ }
+}
+
+} // namespace
+
+void MinorMarkCompactCollector::EvacuatePagesInParallel() {
+ ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+ intptr_t live_bytes = 0;
+
+ for (Page* page : new_space_evacuation_pages_) {
+ intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
+ if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
+ live_bytes += live_bytes_on_page;
+ if (ShouldMovePage(page, live_bytes_on_page)) {
+ if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
+ EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
+ } else {
+ EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
+ }
+ }
+ evacuation_job.AddItem(new PageEvacuationItem(page));
+ }
+ if (evacuation_job.NumberOfItems() == 0) return;
+
+ YoungGenerationMigrationObserver observer(heap(),
+ heap()->mark_compact_collector());
+ YoungGenerationRecordMigratedSlotVisitor record_visitor(
+ heap()->mark_compact_collector());
+ CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
+ this, &evacuation_job, &record_visitor, &observer, live_bytes);
+}
+
+int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
+ ItemParallelJob* job) {
+ int pages = 0;
+ for (Page* p : new_space_evacuation_pages_) {
+ if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
+ if (p->local_tracker() == nullptr) continue;
+
+ pages++;
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(
+ p, ArrayBufferTrackerUpdatingItem::kRegular));
+ }
+ }
+ return pages;
+}
+
+#endif // ENABLE_MINOR_MC
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 755f0eb4eb..944b139a59 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -117,7 +117,7 @@ class MarkBitCellIterator {
return cell_base_;
}
- MUST_USE_RESULT inline bool Advance() {
+ V8_WARN_UNUSED_RESULT inline bool Advance() {
cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
return ++cell_index_ != last_cell_index_;
}
@@ -354,76 +354,6 @@ class MinorNonAtomicMarkingState final
}
};
-// Collector for young-generation only.
-class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
- public:
- using MarkingState = MinorMarkingState;
- using NonAtomicMarkingState = MinorNonAtomicMarkingState;
-
- explicit MinorMarkCompactCollector(Heap* heap);
- ~MinorMarkCompactCollector();
-
- MarkingState* marking_state() { return &marking_state_; }
-
- NonAtomicMarkingState* non_atomic_marking_state() {
- return &non_atomic_marking_state_;
- }
-
- void SetUp() override;
- void TearDown() override;
- void CollectGarbage() override;
-
- void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
- FreeSpaceTreatmentMode free_space_mode);
- void CleanupSweepToIteratePages();
-
- private:
- using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
- class RootMarkingVisitor;
-
- static const int kNumMarkers = 8;
- static const int kMainMarker = 0;
-
- inline MarkingWorklist* worklist() { return worklist_; }
-
- inline YoungGenerationMarkingVisitor* main_marking_visitor() {
- return main_marking_visitor_;
- }
-
- void MarkLiveObjects() override;
- void MarkRootSetInParallel();
- void ProcessMarkingWorklist() override;
- void ClearNonLiveReferences() override;
-
- void EvacuatePrologue() override;
- void EvacuateEpilogue() override;
- void Evacuate() override;
- void EvacuatePagesInParallel() override;
- void UpdatePointersAfterEvacuation() override;
-
- UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
- Address end) override;
- UpdatingItem* CreateRememberedSetUpdatingItem(
- MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
-
- int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
-
- int NumberOfParallelMarkingTasks(int pages);
-
- MarkingWorklist* worklist_;
-
- YoungGenerationMarkingVisitor* main_marking_visitor_;
- base::Semaphore page_parallel_job_semaphore_;
- std::vector<Page*> new_space_evacuation_pages_;
- std::vector<Page*> sweep_to_iterate_pages_;
-
- MarkingState marking_state_;
- NonAtomicMarkingState non_atomic_marking_state_;
-
- friend class YoungGenerationMarkingTask;
- friend class YoungGenerationMarkingVisitor;
-};
-
// This marking state is used when concurrent marking is running.
class IncrementalMarkingState final
: public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
@@ -495,6 +425,10 @@ class MajorNonAtomicMarkingState final
struct WeakObjects {
Worklist<WeakCell*, 64> weak_cells;
Worklist<TransitionArray*, 64> transition_arrays;
+ // TODO(marja): For old space, we only need the slot, not the host
+ // object. Optimize this by adding a different storage for old space.
+ Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;
+ Worklist<std::pair<HeapObject*, Code*>, 64> weak_objects_in_code;
};
// Collector for young and old generation.
@@ -658,7 +592,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void AbortCompaction();
- static inline bool IsOnEvacuationCandidate(HeapObject* obj) {
+ static inline bool IsOnEvacuationCandidate(Object* obj) {
+ return Page::FromAddress(reinterpret_cast<Address>(obj))
+ ->IsEvacuationCandidate();
+ }
+
+ static inline bool IsOnEvacuationCandidate(MaybeObject* obj) {
return Page::FromAddress(reinterpret_cast<Address>(obj))
->IsEvacuationCandidate();
}
@@ -666,6 +605,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
V8_INLINE static void RecordSlot(HeapObject* object, Object** slot,
Object* target);
+ V8_INLINE static void RecordSlot(HeapObject* object,
+ HeapObjectReference** slot, Object* target);
void RecordLiveSlotsOnPage(Page* page);
void UpdateSlots(SlotsBuffer* buffer);
@@ -699,6 +640,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
+ void AddWeakReference(HeapObject* host, HeapObjectReference** slot) {
+ weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
+ }
+
+ void AddWeakObjectInCode(HeapObject* object, Code* code) {
+ weak_objects_.weak_objects_in_code.Push(kMainThread,
+ std::make_pair(object, code));
+ }
+
Sweeper* sweeper() { return sweeper_; }
#ifdef DEBUG
@@ -711,9 +661,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
#ifdef VERIFY_HEAP
void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
+ void VerifyMarkbitsAreDirty(PagedSpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
void VerifyMarkbitsAreClean(NewSpace* space);
- void VerifyWeakEmbeddedObjectsInCode();
#endif
private:
@@ -774,13 +724,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Clear non-live references in weak cells, transition and descriptor arrays,
// and deoptimize dependent code of non-live maps.
void ClearNonLiveReferences() override;
- void MarkDependentCodeForDeoptimization(DependentCode* list);
+ void MarkDependentCodeForDeoptimization();
// Checks if the given weak cell is a simple transition from the parent map
// of the given dead target. If so it clears the transition and trims
// the descriptor array of the parent if needed.
- void ClearSimpleMapTransition(WeakCell* potential_transition,
- Map* dead_target);
- void ClearSimpleMapTransition(Map* map, Map* dead_target);
+ void ClearPotentialSimpleMapTransition(Map* dead_target);
+ void ClearPotentialSimpleMapTransition(Map* map, Map* dead_target);
// Compact every array in the global list of transition arrays and
// trim the corresponding descriptor array if a transition target is non-live.
void ClearFullMapTransitions();
@@ -807,8 +756,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// dead values. If the value is a dead map and the parent map transitions to
// the dead map via weak cell, then this function also clears the map
// transition.
- void ClearWeakCellsAndSimpleMapTransitions(
- DependentCode** dependent_code_list);
+ void ClearWeakCells();
+ void ClearWeakReferences();
void AbortWeakObjects();
// Starts sweeping of spaces by contributing on the main thread and setting
@@ -917,8 +866,11 @@ class MarkingVisitor final
// ObjectVisitor implementation.
V8_INLINE void VisitPointer(HeapObject* host, Object** p) final;
+ V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** p) final;
V8_INLINE void VisitPointers(HeapObject* host, Object** start,
Object** end) final;
+ V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final;
V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
@@ -962,6 +914,80 @@ class EvacuationScope {
MarkCompactCollector* collector_;
};
+#ifdef ENABLE_MINOR_MC
+
+// Collector for young-generation only.
+class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
+ public:
+ using MarkingState = MinorMarkingState;
+ using NonAtomicMarkingState = MinorNonAtomicMarkingState;
+
+ explicit MinorMarkCompactCollector(Heap* heap);
+ ~MinorMarkCompactCollector();
+
+ MarkingState* marking_state() { return &marking_state_; }
+
+ NonAtomicMarkingState* non_atomic_marking_state() {
+ return &non_atomic_marking_state_;
+ }
+
+ void SetUp() override;
+ void TearDown() override;
+ void CollectGarbage() override;
+
+ void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
+ FreeSpaceTreatmentMode free_space_mode);
+ void CleanupSweepToIteratePages();
+
+ private:
+ using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
+ class RootMarkingVisitor;
+
+ static const int kNumMarkers = 8;
+ static const int kMainMarker = 0;
+
+ inline MarkingWorklist* worklist() { return worklist_; }
+
+ inline YoungGenerationMarkingVisitor* main_marking_visitor() {
+ return main_marking_visitor_;
+ }
+
+ void MarkLiveObjects() override;
+ void MarkRootSetInParallel();
+ void ProcessMarkingWorklist() override;
+ void ClearNonLiveReferences() override;
+
+ void EvacuatePrologue() override;
+ void EvacuateEpilogue() override;
+ void Evacuate() override;
+ void EvacuatePagesInParallel() override;
+ void UpdatePointersAfterEvacuation() override;
+
+ UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
+ Address end) override;
+ UpdatingItem* CreateRememberedSetUpdatingItem(
+ MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
+
+ int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+
+ int NumberOfParallelMarkingTasks(int pages);
+
+ MarkingWorklist* worklist_;
+
+ YoungGenerationMarkingVisitor* main_marking_visitor_;
+ base::Semaphore page_parallel_job_semaphore_;
+ std::vector<Page*> new_space_evacuation_pages_;
+ std::vector<Page*> sweep_to_iterate_pages_;
+
+ MarkingState marking_state_;
+ NonAtomicMarkingState non_atomic_marking_state_;
+
+ friend class YoungGenerationMarkingTask;
+ friend class YoungGenerationMarkingVisitor;
+};
+
+#endif // ENABLE_MINOR_MC
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/marking.cc b/deps/v8/src/heap/marking.cc
index 5e631187d2..23fbdd3465 100644
--- a/deps/v8/src/heap/marking.cc
+++ b/deps/v8/src/heap/marking.cc
@@ -17,6 +17,16 @@ void Bitmap::Clear() {
base::SeqCst_MemoryFence();
}
+void Bitmap::MarkAllBits() {
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (int i = 0; i < CellsCount(); i++) {
+ base::Relaxed_Store(cell_base + i, 0xffffffff);
+ }
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // clearing stores.
+ base::SeqCst_MemoryFence();
+}
+
void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index 58630c52f0..bfa813091e 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -142,6 +142,8 @@ class V8_EXPORT_PRIVATE Bitmap {
void Clear();
+ void MarkAllBits();
+
// Clears bits in the given cell. The mask specifies bits to clear: if a
// bit is set in the mask then the corresponding bit is cleared in the cell.
template <AccessMode mode = AccessMode::NON_ATOMIC>
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 77317a7b8a..baa4d6c00b 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -201,8 +201,8 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
- if (!heap()->use_tasks()) return;
DCHECK_LT(0, delay_ms);
+ if (heap()->IsTearingDown()) return;
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index b854dabb2c..008ba7879d 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -277,7 +277,7 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject* obj,
CowMode check_cow_array) {
- if (obj->IsFixedArray()) {
+ if (obj->IsFixedArrayExact()) {
FixedArray* fixed_array = FixedArray::cast(obj);
bool cow_check = check_cow_array == kIgnoreCow || !IsCowArray(fixed_array);
return CanRecordFixedArray(fixed_array) && cow_check;
@@ -480,7 +480,7 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
Object* raw_object = vector->get(slot.ToInt() + i);
if (!raw_object->IsHeapObject()) continue;
HeapObject* object = HeapObject::cast(raw_object);
- if (object->IsCell() || object->IsFixedArray()) {
+ if (object->IsCell() || object->IsFixedArrayExact()) {
RecordSimpleVirtualObjectStats(
vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
}
@@ -531,7 +531,7 @@ void ObjectStatsCollectorImpl::CollectStatistics(HeapObject* obj, Phase phase) {
RecordVirtualContext(Context::cast(obj));
} else if (obj->IsScript()) {
RecordVirtualScriptDetails(Script::cast(obj));
- } else if (obj->IsFixedArray()) {
+ } else if (obj->IsFixedArrayExact()) {
// Has to go last as it triggers too eagerly.
RecordVirtualFixedArrayDetails(FixedArray::cast(obj));
}
@@ -552,9 +552,6 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
}
// FixedArray.
- RecordSimpleVirtualObjectStats(
- nullptr, heap_->weak_new_space_object_to_code_list(),
- ObjectStats::WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE);
RecordSimpleVirtualObjectStats(nullptr, heap_->serialized_objects(),
ObjectStats::SERIALIZED_OBJECTS_TYPE);
RecordSimpleVirtualObjectStats(nullptr, heap_->number_string_cache(),
@@ -569,23 +566,20 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
RecordSimpleVirtualObjectStats(nullptr, heap_->retained_maps(),
ObjectStats::RETAINED_MAPS_TYPE);
- // WeakFixedArray.
+ // FixedArrayOfWeakCells.
RecordSimpleVirtualObjectStats(
- nullptr, WeakFixedArray::cast(heap_->noscript_shared_function_infos()),
+ nullptr,
+ FixedArrayOfWeakCells::cast(heap_->noscript_shared_function_infos()),
ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE);
- RecordSimpleVirtualObjectStats(nullptr,
- WeakFixedArray::cast(heap_->script_list()),
- ObjectStats::SCRIPT_LIST_TYPE);
+ RecordSimpleVirtualObjectStats(
+ nullptr, FixedArrayOfWeakCells::cast(heap_->script_list()),
+ ObjectStats::SCRIPT_LIST_TYPE);
// HashTable.
RecordHashTableVirtualObjectStats(nullptr, heap_->string_table(),
ObjectStats::STRING_TABLE_TYPE);
RecordHashTableVirtualObjectStats(nullptr, heap_->code_stubs(),
ObjectStats::CODE_STUBS_TABLE_TYPE);
-
- // WeakHashTable.
- RecordHashTableVirtualObjectStats(nullptr, heap_->weak_object_to_code_table(),
- ObjectStats::OBJECT_TO_CODE_TYPE);
}
void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
@@ -630,8 +624,8 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
if (map->prototype_info()->IsPrototypeInfo()) {
PrototypeInfo* info = PrototypeInfo::cast(map->prototype_info());
Object* users = info->prototype_users();
- if (users->IsWeakFixedArray()) {
- RecordSimpleVirtualObjectStats(map, WeakFixedArray::cast(users),
+ if (users->IsFixedArrayOfWeakCells()) {
+ RecordSimpleVirtualObjectStats(map, FixedArrayOfWeakCells::cast(users),
ObjectStats::PROTOTYPE_USERS_TYPE);
}
}
@@ -639,18 +633,9 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
}
void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
- FixedArray* infos = script->shared_function_infos();
RecordSimpleVirtualObjectStats(
script, script->shared_function_infos(),
ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
- // Split off weak cells from the regular weak cell type.
- for (int i = 0; i < infos->length(); i++) {
- if (infos->get(i)->IsWeakCell()) {
- RecordSimpleVirtualObjectStats(
- infos, WeakCell::cast(infos->get(i)),
- ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
- }
- }
// Log the size of external source code.
Object* source = script->source();
@@ -681,11 +666,6 @@ void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
RecordSimpleVirtualObjectStats(
nullptr, info, ObjectStats::UNCOMPILED_SHARED_FUNCTION_INFO_TYPE);
}
- // SharedFunctonInfo::feedback_metadata() is a COW array.
- FeedbackMetadata* fm = FeedbackMetadata::cast(info->feedback_metadata());
- RecordVirtualObjectStats(info, fm, ObjectStats::FEEDBACK_METADATA_TYPE,
- fm->Size(), ObjectStats::kNoOverAllocation,
- kIgnoreCow);
}
void ObjectStatsCollectorImpl::RecordVirtualJSFunctionDetails(
@@ -702,7 +682,7 @@ namespace {
bool MatchesConstantElementsPair(Object* object) {
if (!object->IsTuple2()) return false;
Tuple2* tuple = Tuple2::cast(object);
- return tuple->value1()->IsSmi() && tuple->value2()->IsFixedArray();
+ return tuple->value1()->IsSmi() && tuple->value2()->IsFixedArrayExact();
}
} // namespace
@@ -711,20 +691,19 @@ void ObjectStatsCollectorImpl::
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
HeapObject* parent, HeapObject* object,
ObjectStats::VirtualInstanceType type) {
- if (RecordSimpleVirtualObjectStats(parent, object, type)) {
- if (object->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(object);
- for (int i = 0; i < array->length(); i++) {
- Object* entry = array->get(i);
- if (!entry->IsHeapObject()) continue;
- RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
- array, HeapObject::cast(entry), type);
- }
- } else if (MatchesConstantElementsPair(object)) {
- Tuple2* tuple = Tuple2::cast(object);
+ if (!RecordSimpleVirtualObjectStats(parent, object, type)) return;
+ if (object->IsFixedArrayExact()) {
+ FixedArray* array = FixedArray::cast(object);
+ for (int i = 0; i < array->length(); i++) {
+ Object* entry = array->get(i);
+ if (!entry->IsHeapObject()) continue;
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
- tuple, HeapObject::cast(tuple->value2()), type);
+ array, HeapObject::cast(entry), type);
}
+ } else if (MatchesConstantElementsPair(object)) {
+ Tuple2* tuple = Tuple2::cast(object);
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ tuple, HeapObject::cast(tuple->value2()), type);
}
}
@@ -738,7 +717,7 @@ void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
FixedArray* constant_pool = FixedArray::cast(bytecode->constant_pool());
for (int i = 0; i < constant_pool->length(); i++) {
Object* entry = constant_pool->get(i);
- if (entry->IsFixedArray() || MatchesConstantElementsPair(entry)) {
+ if (entry->IsFixedArrayExact() || MatchesConstantElementsPair(entry)) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
constant_pool, HeapObject::cast(entry),
ObjectStats::EMBEDDED_OBJECT_TYPE);
@@ -786,7 +765,7 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Object* target = it.rinfo()->target_object();
- if (target->IsFixedArray() || MatchesConstantElementsPair(target)) {
+ if (target->IsFixedArrayExact() || MatchesConstantElementsPair(target)) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
}
@@ -796,11 +775,9 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
void ObjectStatsCollectorImpl::RecordVirtualContext(Context* context) {
if (context->IsNativeContext()) {
- RecordSimpleVirtualObjectStats(nullptr, context,
- ObjectStats::NATIVE_CONTEXT_TYPE);
+ RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context->Size());
} else if (context->IsFunctionContext()) {
- RecordSimpleVirtualObjectStats(nullptr, context,
- ObjectStats::FUNCTION_CONTEXT_TYPE);
+ RecordObjectStats(context, FUNCTION_CONTEXT_TYPE, context->Size());
} else {
RecordSimpleVirtualObjectStats(nullptr, context,
ObjectStats::OTHER_CONTEXT_TYPE);
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 723ae53fd5..3648b9985b 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -29,7 +29,6 @@
V(EMBEDDED_OBJECT_TYPE) \
V(ENUM_CACHE_TYPE) \
V(ENUM_INDICES_CACHE_TYPE) \
- V(FEEDBACK_METADATA_TYPE) \
V(FEEDBACK_VECTOR_ENTRY_TYPE) \
V(FEEDBACK_VECTOR_HEADER_TYPE) \
V(FEEDBACK_VECTOR_SLOT_CALL_TYPE) \
@@ -40,14 +39,12 @@
V(FEEDBACK_VECTOR_SLOT_OTHER_TYPE) \
V(FEEDBACK_VECTOR_SLOT_STORE_TYPE) \
V(FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE) \
- V(FUNCTION_CONTEXT_TYPE) \
V(FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE) \
V(GLOBAL_ELEMENTS_TYPE) \
V(GLOBAL_PROPERTIES_TYPE) \
V(JS_ARRAY_BOILERPLATE_TYPE) \
V(JS_COLLETION_TABLE_TYPE) \
V(JS_OBJECT_BOILERPLATE_TYPE) \
- V(NATIVE_CONTEXT_TYPE) \
V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
V(NUMBER_STRING_CACHE_TYPE) \
V(OBJECT_PROPERTY_DICTIONARY_TYPE) \
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 8384cead02..b47dba7830 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -71,17 +71,22 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
host, reinterpret_cast<Object**>(map));
}
-#define VISIT(type) \
- template <typename ResultType, typename ConcreteVisitor> \
- ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
- Map* map, type* object) { \
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
- if (!visitor->ShouldVisit(object)) return ResultType(); \
- int size = type::BodyDescriptor::SizeOf(map, object); \
- if (visitor->ShouldVisitMapPointer()) \
- visitor->VisitMapPointer(object, object->map_slot()); \
- type::BodyDescriptor::IterateBody(object, size, visitor); \
- return static_cast<ResultType>(size); \
+#define VISIT(type) \
+ template <typename ResultType, typename ConcreteVisitor> \
+ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
+ Map* map, type* object) { \
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
+ if (!visitor->ShouldVisit(object)) return ResultType(); \
+ if (!visitor->AllowDefaultJSObjectVisit()) { \
+ DCHECK_WITH_MSG(!map->IsJSObjectMap(), \
+ "Implement custom visitor for new JSObject subclass in " \
+ "concurrent marker"); \
+ } \
+ int size = type::BodyDescriptor::SizeOf(map, object); \
+ if (visitor->ShouldVisitMapPointer()) \
+ visitor->VisitMapPointer(object, object->map_slot()); \
+ type::BodyDescriptor::IterateBody(map, object, size, visitor); \
+ return static_cast<ResultType>(size); \
}
TYPED_VISITOR_ID_LIST(VISIT)
#undef VISIT
@@ -100,7 +105,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitNativeContext(
int size = Context::BodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer())
visitor->VisitMapPointer(object, object->map_slot());
- Context::BodyDescriptor::IterateBody(object, size, visitor);
+ Context::BodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -123,7 +128,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer())
visitor->VisitMapPointer(object, object->map_slot());
- JSObject::FastBodyDescriptor::IterateBody(object, size, visitor);
+ JSObject::FastBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -135,7 +140,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
int size = JSObject::BodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer())
visitor->VisitMapPointer(object, object->map_slot());
- JSObject::BodyDescriptor::IterateBody(object, size, visitor);
+ JSObject::BodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -147,7 +152,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
int size = map->instance_size();
if (visitor->ShouldVisitMapPointer())
visitor->VisitMapPointer(object, object->map_slot());
- StructBodyDescriptor::IterateBody(object, size, visitor);
+ StructBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -166,7 +171,7 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitJSFunction(Map* map,
JSFunction* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
- JSFunction::BodyDescriptorWeak::IterateBody(object, size, visitor);
+ JSFunction::BodyDescriptorWeak::IterateBody(map, object, size, visitor);
return size;
}
@@ -175,7 +180,7 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map* map,
Context* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
int size = Context::BodyDescriptor::SizeOf(map, object);
- Context::BodyDescriptor::IterateBody(object, size, visitor);
+ Context::BodyDescriptor::IterateBody(map, object, size, visitor);
return size;
}
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 7746c91c71..88e38dcb0c 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -40,7 +40,6 @@ class JSWeakCollection;
V(JSArrayBuffer) \
V(JSFunction) \
V(JSObject) \
- V(JSRegExp) \
V(JSWeakCollection) \
V(Map) \
V(Oddball) \
@@ -55,7 +54,9 @@ class JSWeakCollection;
V(Symbol) \
V(ThinString) \
V(TransitionArray) \
- V(WeakCell)
+ V(WasmInstanceObject) \
+ V(WeakCell) \
+ V(WeakFixedArray)
// The base class for visitors that need to dispatch on object type. The default
// behavior of all visit functions is to iterate body of the given object using
@@ -83,6 +84,9 @@ class HeapVisitor : public ObjectVisitor {
V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
V8_INLINE void VisitMapPointer(HeapObject* host, HeapObject** map);
+ // If this predicate returns false, then the heap visitor will fail
+ // in default Visit implemention for subclasses of JSObject.
+ V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
#define VISIT(type) V8_INLINE ResultType Visit##type(Map* map, type* object);
TYPED_VISITOR_ID_LIST(VISIT)
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 4e0f259c00..45a6422204 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -280,7 +280,9 @@ class UpdateTypedSlotHelper {
Callback callback) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
- SlotCallbackResult result = callback(&code);
+ SlotCallbackResult result =
+ callback(reinterpret_cast<MaybeObject**>(&code));
+ DCHECK(!HasWeakHeapObjectTag(code));
if (code != old_code) {
Memory::Address_at(entry_address) =
reinterpret_cast<Code*>(code)->entry();
@@ -296,9 +298,12 @@ class UpdateTypedSlotHelper {
DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Object* new_target = old_target;
- SlotCallbackResult result = callback(&new_target);
+ SlotCallbackResult result =
+ callback(reinterpret_cast<MaybeObject**>(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
- rinfo->set_target_address(Code::cast(new_target)->instruction_start());
+ rinfo->set_target_address(
+ Code::cast(new_target)->raw_instruction_start());
}
return result;
}
@@ -311,7 +316,9 @@ class UpdateTypedSlotHelper {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* old_target = rinfo->target_object();
Object* new_target = old_target;
- SlotCallbackResult result = callback(&new_target);
+ SlotCallbackResult result =
+ callback(reinterpret_cast<MaybeObject**>(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
rinfo->set_target_object(HeapObject::cast(new_target));
}
@@ -319,7 +326,7 @@ class UpdateTypedSlotHelper {
}
// Updates a typed slot using an untyped slot callback.
- // The callback accepts Object** and returns SlotCallbackResult.
+ // The callback accepts MaybeObject** and returns SlotCallbackResult.
template <typename Callback>
static SlotCallbackResult UpdateTypedSlot(Isolate* isolate,
SlotType slot_type, Address addr,
@@ -337,7 +344,7 @@ class UpdateTypedSlotHelper {
return UpdateEmbeddedPointer(&rinfo, callback);
}
case OBJECT_SLOT: {
- return callback(reinterpret_cast<Object**>(addr));
+ return callback(reinterpret_cast<MaybeObject**>(addr));
}
case CLEARED_SLOT:
break;
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index b649c010ae..9feebbf4d5 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -103,7 +103,7 @@ void ScavengeJob::ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated) {
void ScavengeJob::ScheduleIdleTask(Heap* heap) {
- if (!idle_task_pending_ && heap->use_tasks()) {
+ if (!idle_task_pending_ && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
idle_task_pending_ = true;
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 2971db98cc..4b07f16d11 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SCAVENGER_INL_H_
#include "src/heap/scavenger.h"
+#include "src/objects-inl.h"
#include "src/objects/map.h"
namespace v8 {
@@ -30,13 +31,13 @@ bool Scavenger::ContainsOnlyData(VisitorId visitor_id) {
return false;
}
-void Scavenger::PageMemoryFence(Object* object) {
+void Scavenger::PageMemoryFence(MaybeObject* object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
// with page initialization.
- if (object->IsHeapObject()) {
- MemoryChunk* chunk =
- MemoryChunk::FromAddress(HeapObject::cast(object)->address());
+ HeapObject* heap_object;
+ if (object->ToStrongOrWeakHeapObject(&heap_object)) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
CHECK_NOT_NULL(chunk->synchronized_heap());
}
#endif
@@ -68,7 +69,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
return true;
}
-bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
+bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObjectReference** slot,
HeapObject* object, int object_size) {
DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
@@ -83,10 +84,10 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
MapWord map_word = object->map_word();
- *slot = map_word.ToForwardingAddress();
+ HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
return true;
}
- *slot = target;
+ HeapObjectReference::Update(slot, target);
copied_list_.Push(ObjectAndSize(target, object_size));
copied_size_ += object_size;
@@ -95,8 +96,8 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
return false;
}
-bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
- int object_size) {
+bool Scavenger::PromoteObject(Map* map, HeapObjectReference** slot,
+ HeapObject* object, int object_size) {
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(OLD_SPACE, object_size, alignment);
@@ -109,11 +110,10 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size);
MapWord map_word = object->map_word();
- *slot = map_word.ToForwardingAddress();
+ HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
return true;
}
- *slot = target;
-
+ HeapObjectReference::Update(slot, target);
if (!ContainsOnlyData(map->visitor_id())) {
promotion_list_.Push(ObjectAndSize(target, object_size));
}
@@ -123,7 +123,7 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
return false;
}
-void Scavenger::EvacuateObjectDefault(Map* map, HeapObject** slot,
+void Scavenger::EvacuateObjectDefault(Map* map, HeapObjectReference** slot,
HeapObject* object, int object_size) {
SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
SLOW_DCHECK(object->SizeFromMap(map) == object_size);
@@ -139,7 +139,7 @@ void Scavenger::EvacuateObjectDefault(Map* map, HeapObject** slot,
// If promotion failed, we try to copy the object to the other semi-space
if (SemiSpaceCopyObject(map, slot, object, object_size)) return;
- FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
+ heap()->FatalProcessOutOfMemory("Scavenger: semi-space copy");
}
void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
@@ -157,7 +157,8 @@ void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
return;
}
- EvacuateObjectDefault(map, slot, object, object_size);
+ EvacuateObjectDefault(map, reinterpret_cast<HeapObjectReference**>(slot),
+ object, object_size);
}
void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
@@ -187,17 +188,19 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
return;
}
Map* map = first_word.ToMap();
- EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map));
+ EvacuateObjectDefault(map, reinterpret_cast<HeapObjectReference**>(slot),
+ first, first->SizeFromMap(map));
base::AsAtomicPointer::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(*slot).ToMap());
return;
}
- EvacuateObjectDefault(map, slot, object, object_size);
+ EvacuateObjectDefault(map, reinterpret_cast<HeapObjectReference**>(slot),
+ object, object_size);
}
-void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
+void Scavenger::EvacuateObject(HeapObjectReference** slot, Map* map,
HeapObject* source) {
SLOW_DCHECK(heap_->InFromSpace(source));
SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
@@ -206,11 +209,15 @@ void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
// that require re-reading the map.
switch (map->visitor_id()) {
case kVisitThinString:
- EvacuateThinString(map, slot, reinterpret_cast<ThinString*>(source),
- size);
+ // At the moment we don't allow weak pointers to thin strings.
+ DCHECK(!(*slot)->IsWeakHeapObject());
+ EvacuateThinString(map, reinterpret_cast<HeapObject**>(slot),
+ reinterpret_cast<ThinString*>(source), size);
break;
case kVisitShortcutCandidate:
- EvacuateShortcutCandidate(map, slot,
+ DCHECK(!(*slot)->IsWeakHeapObject());
+ // At the moment we don't allow weak pointers to cons strings.
+ EvacuateShortcutCandidate(map, reinterpret_cast<HeapObject**>(slot),
reinterpret_cast<ConsString*>(source), size);
break;
default:
@@ -219,7 +226,7 @@ void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
}
}
-void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
+void Scavenger::ScavengeObject(HeapObjectReference** p, HeapObject* object) {
DCHECK(heap()->InFromSpace(object));
// Synchronized load that consumes the publishing CAS of MigrateObject.
@@ -228,8 +235,14 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
+ HeapObject* dest = first_word.ToForwardingAddress();
DCHECK(heap()->InFromSpace(*p));
- *p = first_word.ToForwardingAddress();
+ if ((*p)->IsWeakHeapObject()) {
+ *p = HeapObjectReference::Weak(dest);
+ } else {
+ DCHECK((*p)->IsStrongHeapObject());
+ *p = HeapObjectReference::Strong(dest);
+ }
return;
}
@@ -242,13 +255,16 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
Address slot_address) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* object = *slot;
+ MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
+ MaybeObject* object = *slot;
if (heap->InFromSpace(object)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ HeapObject* heap_object;
+ bool success = object->ToStrongOrWeakHeapObject(&heap_object);
+ USE(success);
+ DCHECK(success);
DCHECK(heap_object->IsHeapObject());
- ScavengeObject(reinterpret_cast<HeapObject**>(slot), heap_object);
+ ScavengeObject(reinterpret_cast<HeapObjectReference**>(slot), heap_object);
object = *slot;
// If the object was in from space before and is after executing the
@@ -274,11 +290,27 @@ void ScavengeVisitor::VisitPointers(HeapObject* host, Object** start,
for (Object** p = start; p < end; p++) {
Object* object = *p;
if (!heap_->InNewSpace(object)) continue;
- scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
reinterpret_cast<HeapObject*>(object));
}
}
+void ScavengeVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) {
+ for (MaybeObject** p = start; p < end; p++) {
+ MaybeObject* object = *p;
+ if (!heap_->InNewSpace(object)) continue;
+ // Treat the weak reference as strong.
+ HeapObject* heap_object;
+ if (object->ToStrongOrWeakHeapObject(&heap_object)) {
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
+ heap_object);
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 3baba9521b..cd6c534704 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -23,34 +23,53 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
inline void VisitPointers(HeapObject* host, Object** start,
Object** end) final {
- for (Address slot_address = reinterpret_cast<Address>(start);
- slot_address < reinterpret_cast<Address>(end);
- slot_address += kPointerSize) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
+ for (Object** slot = start; slot < end; ++slot) {
Object* target = *slot;
- scavenger_->PageMemoryFence(target);
-
+ DCHECK(!HasWeakHeapObjectTag(target));
if (target->IsHeapObject()) {
- if (heap_->InFromSpace(target)) {
- scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(target));
- target = *slot;
- scavenger_->PageMemoryFence(target);
-
- if (heap_->InNewSpace(target)) {
- SLOW_DCHECK(target->IsHeapObject());
- SLOW_DCHECK(heap_->InToSpace(target));
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot_address),
- slot_address);
- }
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target)));
- } else if (record_slots_ &&
- MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target))) {
- heap_->mark_compact_collector()->RecordSlot(host, slot, target);
- }
+ HandleSlot(host, reinterpret_cast<Address>(slot),
+ HeapObject::cast(target));
+ }
+ }
+ }
+
+ inline void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ // Treat weak references as strong. TODO(marja): Proper weakness handling in
+ // the young generation.
+ for (MaybeObject** slot = start; slot < end; ++slot) {
+ MaybeObject* target = *slot;
+ HeapObject* heap_object;
+ if (target->ToStrongOrWeakHeapObject(&heap_object)) {
+ HandleSlot(host, reinterpret_cast<Address>(slot), heap_object);
+ }
+ }
+ }
+
+ inline void HandleSlot(HeapObject* host, Address slot_address,
+ HeapObject* target) {
+ HeapObjectReference** slot =
+ reinterpret_cast<HeapObjectReference**>(slot_address);
+ scavenger_->PageMemoryFence(reinterpret_cast<MaybeObject*>(target));
+
+ if (heap_->InFromSpace(target)) {
+ scavenger_->ScavengeObject(slot, target);
+ bool success = (*slot)->ToStrongOrWeakHeapObject(&target);
+ USE(success);
+ DCHECK(success);
+ scavenger_->PageMemoryFence(reinterpret_cast<MaybeObject*>(target));
+
+ if (heap_->InNewSpace(target)) {
+ SLOW_DCHECK(target->IsHeapObject());
+ SLOW_DCHECK(heap_->InToSpace(target));
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot_address),
+ slot_address);
}
+ SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target)));
+ } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target))) {
+ heap_->mark_compact_collector()->RecordSlot(host, slot, target);
}
}
@@ -84,7 +103,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
is_compacting_ &&
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
- target->IterateBody(target->map()->instance_type(), size, &visitor);
+ target->IterateBodyFast(target->map(), size, &visitor);
}
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
@@ -106,7 +125,7 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
RememberedSet<OLD_TO_NEW>::IterateTyped(
page, [this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap_->isolate(), type, addr, [this](Object** addr) {
+ heap_->isolate(), type, addr, [this](MaybeObject** addr) {
return CheckAndScavengeObject(heap(),
reinterpret_cast<Address>(addr));
});
@@ -164,6 +183,7 @@ void Scavenger::Finalize() {
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
Object** p) {
+ DCHECK(!HasWeakHeapObjectTag(*p));
ScavengePointer(p);
}
@@ -175,9 +195,10 @@ void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
void RootScavengeVisitor::ScavengePointer(Object** p) {
Object* object = *p;
+ DCHECK(!HasWeakHeapObjectTag(object));
if (!heap_->InNewSpace(object)) return;
- scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
reinterpret_cast<HeapObject*>(object));
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index e0008ae694..de2f49f0e2 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -50,7 +50,7 @@ class Scavenger {
inline Heap* heap() { return heap_; }
- inline void PageMemoryFence(Object* object);
+ inline void PageMemoryFence(MaybeObject* object);
void AddPageToSweeperIfNecessary(MemoryChunk* page);
@@ -61,24 +61,24 @@ class Scavenger {
// Scavenges an object |object| referenced from slot |p|. |object| is required
// to be in from space.
- inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ inline void ScavengeObject(HeapObjectReference** p, HeapObject* object);
// Copies |source| to |target| and sets the forwarding pointer in |source|.
V8_INLINE bool MigrateObject(Map* map, HeapObject* source, HeapObject* target,
int size);
- V8_INLINE bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
+ V8_INLINE bool SemiSpaceCopyObject(Map* map, HeapObjectReference** slot,
HeapObject* object, int object_size);
- V8_INLINE bool PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
- int object_size);
+ V8_INLINE bool PromoteObject(Map* map, HeapObjectReference** slot,
+ HeapObject* object, int object_size);
- V8_INLINE void EvacuateObject(HeapObject** slot, Map* map,
+ V8_INLINE void EvacuateObject(HeapObjectReference** slot, Map* map,
HeapObject* source);
// Different cases for object evacuation.
- V8_INLINE void EvacuateObjectDefault(Map* map, HeapObject** slot,
+ V8_INLINE void EvacuateObjectDefault(Map* map, HeapObjectReference** slot,
HeapObject* object, int object_size);
V8_INLINE void EvacuateJSFunction(Map* map, HeapObject** slot,
@@ -135,6 +135,8 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
V8_INLINE void VisitPointers(HeapObject* host, Object** start,
Object** end) final;
+ V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final;
private:
Heap* const heap_;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 8a7aca1694..ca6ce64e2f 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -8,8 +8,8 @@
#include "src/ast/context-slot-cache.h"
#include "src/compilation-cache.h"
#include "src/contexts.h"
-#include "src/factory.h"
#include "src/heap-symbols.h"
+#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
@@ -27,6 +27,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
#include "src/regexp/jsregexp.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -77,11 +78,63 @@ const Heap::StructTable Heap::struct_table[] = {
#undef DATA_HANDLER_ELEMENT
};
+AllocationResult Heap::AllocateMap(InstanceType instance_type,
+ int instance_size,
+ ElementsKind elements_kind,
+ int inobject_properties) {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ DCHECK_IMPLIES(instance_type >= FIRST_JS_OBJECT_TYPE &&
+ !Map::CanHaveFastTransitionableElementsKind(instance_type),
+ IsDictionaryElementsKind(elements_kind) ||
+ IsTerminalElementsKind(elements_kind));
+ HeapObject* result = nullptr;
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
+ if (!allocation.To(&result)) return allocation;
+
+ result->set_map_after_allocation(meta_map(), SKIP_WRITE_BARRIER);
+ return isolate()->factory()->InitializeMap(Map::cast(result), instance_type,
+ instance_size, elements_kind,
+ inobject_properties);
+}
+
+AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
+ int instance_size) {
+ Object* result = nullptr;
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
+ if (!allocation.To(&result)) return allocation;
+ // Map::cast cannot be used due to uninitialized map field.
+ Map* map = reinterpret_cast<Map*>(result);
+ map->set_map_after_allocation(reinterpret_cast<Map*>(root(kMetaMapRootIndex)),
+ SKIP_WRITE_BARRIER);
+ map->set_instance_type(instance_type);
+ map->set_instance_size(instance_size);
+ // Initialize to only containing tagged fields.
+ if (FLAG_unbox_double_fields) {
+ map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ }
+ // GetVisitorId requires a properly initialized LayoutDescriptor.
+ map->set_visitor_id(Map::GetVisitorId(map));
+ map->set_inobject_properties_start_or_constructor_function_index(0);
+ DCHECK(!map->IsJSObjectMap());
+ map->set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
+ map->SetInObjectUnusedPropertyFields(0);
+ map->set_bit_field(0);
+ map->set_bit_field2(0);
+ DCHECK(!map->is_in_retained_map_list());
+ int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
+ Map::OwnsDescriptorsBit::encode(true) |
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
+ map->set_bit_field3(bit_field3);
+ map->set_weak_cell_cache(Smi::kZero);
+ map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
+ return map;
+}
+
namespace {
void FinalizePartialMap(Heap* heap, Map* map) {
map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
- map->set_raw_transitions(Smi::kZero);
+ map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
map->set_instance_descriptors(heap->empty_descriptor_array());
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -92,6 +145,41 @@ void FinalizePartialMap(Heap* heap, Map* map) {
} // namespace
+AllocationResult Heap::Allocate(Map* map, AllocationSpace space) {
+ DCHECK(map->instance_type() != MAP_TYPE);
+ int size = map->instance_size();
+ HeapObject* result = nullptr;
+ AllocationResult allocation = AllocateRaw(size, space);
+ if (!allocation.To(&result)) return allocation;
+ // New space objects are allocated white.
+ WriteBarrierMode write_barrier_mode =
+ space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ result->set_map_after_allocation(map, write_barrier_mode);
+ return result;
+}
+
+AllocationResult Heap::AllocateEmptyFixedTypedArray(
+ ExternalArrayType array_type) {
+ int size = OBJECT_POINTER_ALIGN(FixedTypedArrayBase::kDataOffset);
+
+ HeapObject* object = nullptr;
+ AllocationResult allocation = AllocateRaw(
+ size, OLD_SPACE,
+ array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
+ if (!allocation.To(&object)) return allocation;
+
+ object->set_map_after_allocation(MapForFixedTypedArray(array_type),
+ SKIP_WRITE_BARRIER);
+ FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
+ elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
+ elements->set_external_pointer(
+ ExternalReference::fixed_typed_array_base_data_offset(isolate())
+ .address(),
+ SKIP_WRITE_BARRIER);
+ elements->set_length(0);
+ return elements;
+}
+
bool Heap::CreateInitialMaps() {
HeapObject* obj = nullptr;
{
@@ -112,6 +200,8 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
+ ALLOCATE_PARTIAL_MAP(WEAK_FIXED_ARRAY_TYPE, kVariableSizeSentinel,
+ weak_fixed_array);
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel,
fixed_cow_array)
DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
@@ -128,12 +218,22 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty array.
{
- AllocationResult allocation = AllocateEmptyFixedArray();
- if (!allocation.To(&obj)) return false;
+ AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), OLD_SPACE);
+ if (!alloc.To(&obj)) return false;
+ obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
+ FixedArray::cast(obj)->set_length(0);
}
set_empty_fixed_array(FixedArray::cast(obj));
{
+ AllocationResult alloc = AllocateRaw(WeakFixedArray::SizeFor(0), OLD_SPACE);
+ if (!alloc.To(&obj)) return false;
+ obj->set_map_after_allocation(weak_fixed_array_map(), SKIP_WRITE_BARRIER);
+ WeakFixedArray::cast(obj)->set_length(0);
+ }
+ set_empty_weak_fixed_array(WeakFixedArray::cast(obj));
+
+ {
AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
if (!allocation.To(&obj)) return false;
}
@@ -177,11 +277,12 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty descriptor array.
{
STATIC_ASSERT(DescriptorArray::kFirstIndex != 0);
- AllocationResult allocation =
- AllocateUninitializedFixedArray(DescriptorArray::kFirstIndex, TENURED);
- if (!allocation.To(&obj)) return false;
+ int length = DescriptorArray::kFirstIndex;
+ int size = FixedArray::SizeFor(length);
+ if (!AllocateRaw(size, OLD_SPACE).To(&obj)) return false;
+ obj->set_map_after_allocation(descriptor_array_map(), SKIP_WRITE_BARRIER);
+ DescriptorArray::cast(obj)->set_length(length);
}
- obj->set_map_no_write_barrier(descriptor_array_map());
set_empty_descriptor_array(DescriptorArray::cast(obj));
DescriptorArray::cast(obj)->set(DescriptorArray::kDescriptorLengthIndex,
Smi::kZero);
@@ -191,6 +292,7 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
FinalizePartialMap(this, meta_map());
FinalizePartialMap(this, fixed_array_map());
+ FinalizePartialMap(this, weak_fixed_array_map());
FinalizePartialMap(this, fixed_cow_array_map());
FinalizePartialMap(this, descriptor_array_map());
FinalizePartialMap(this, undefined_map());
@@ -269,6 +371,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
+ ALLOCATE_VARSIZE_MAP(FEEDBACK_METADATA_TYPE, feedback_metadata)
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
@@ -286,7 +389,17 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell);
+ {
+ // The invalid_prototype_validity_cell is needed for JSObject maps.
+ Smi* value = Smi::FromInt(Map::kPrototypeChainInvalid);
+ AllocationResult alloc = AllocateRaw(Cell::kSize, OLD_SPACE);
+ if (!alloc.To(&obj)) return false;
+ obj->set_map_after_allocation(cell_map(), SKIP_WRITE_BARRIER);
+ Cell::cast(obj)->set_value(value);
+ set_invalid_prototype_validity_cell(Cell::cast(obj));
+ }
+
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
@@ -310,23 +423,29 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, number_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, simple_number_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, string_table)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, weak_hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, array_list)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
+ ALLOCATE_VARSIZE_MAP(FUNCTION_CONTEXT_TYPE, function_context)
+ ALLOCATE_VARSIZE_MAP(CATCH_CONTEXT_TYPE, catch_context)
+ ALLOCATE_VARSIZE_MAP(WITH_CONTEXT_TYPE, with_context)
+ ALLOCATE_VARSIZE_MAP(DEBUG_EVALUATE_CONTEXT_TYPE, debug_evaluate_context)
+ ALLOCATE_VARSIZE_MAP(BLOCK_CONTEXT_TYPE, block_context)
+ ALLOCATE_VARSIZE_MAP(MODULE_CONTEXT_TYPE, module_context)
+ ALLOCATE_VARSIZE_MAP(EVAL_CONTEXT_TYPE, eval_context)
+ ALLOCATE_VARSIZE_MAP(SCRIPT_CONTEXT_TYPE, script_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
+ ALLOCATE_VARSIZE_MAP(BOILERPLATE_DESCRIPTION_TYPE, boilerplate_description)
+
+ ALLOCATE_VARSIZE_MAP(NATIVE_CONTEXT_TYPE, native_context)
native_context_map()->set_visitor_id(kVisitNativeContext);
+ ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
+ side_effect_call_handler_info)
+ ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
+ side_effect_free_call_handler_info)
+
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
@@ -342,11 +461,22 @@ bool Heap::CreateInitialMaps() {
}
{
- AllocationResult allocation = AllocateEmptyScopeInfo();
- if (!allocation.To(&obj)) return false;
+ AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), OLD_SPACE);
+ if (!alloc.To(&obj)) return false;
+ obj->set_map_after_allocation(scope_info_map(), SKIP_WRITE_BARRIER);
+ FixedArray::cast(obj)->set_length(0);
}
-
set_empty_scope_info(ScopeInfo::cast(obj));
+
+ {
+ AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), OLD_SPACE);
+ if (!alloc.To(&obj)) return false;
+ obj->set_map_after_allocation(boilerplate_description_map(),
+ SKIP_WRITE_BARRIER);
+ FixedArray::cast(obj)->set_length(0);
+ }
+ set_empty_boilerplate_description(BoilerplateDescription::cast(obj));
+
{
AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
if (!allocation.To(&obj)) return false;
@@ -361,30 +491,34 @@ bool Heap::CreateInitialMaps() {
set_false_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kFalse);
- { // Empty arrays
- {
- ByteArray * byte_array;
- if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
- set_empty_byte_array(byte_array);
- }
+ // Empty arrays.
+ {
+ if (!AllocateRaw(ByteArray::SizeFor(0), OLD_SPACE).To(&obj)) return false;
+ obj->set_map_after_allocation(byte_array_map(), SKIP_WRITE_BARRIER);
+ ByteArray::cast(obj)->set_length(0);
+ set_empty_byte_array(ByteArray::cast(obj));
+ }
- {
- PropertyArray* property_array;
- if (!AllocatePropertyArray(0, TENURED).To(&property_array)) return false;
- set_empty_property_array(property_array);
+ {
+ if (!AllocateRaw(FixedArray::SizeFor(0), OLD_SPACE).To(&obj)) {
+ return false;
}
+ obj->set_map_after_allocation(property_array_map(), SKIP_WRITE_BARRIER);
+ PropertyArray::cast(obj)->initialize_length(0);
+ set_empty_property_array(PropertyArray::cast(obj));
+ }
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- { \
- FixedTypedArrayBase* obj; \
- if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
- return false; \
- set_empty_fixed_##type##_array(obj); \
- }
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ FixedTypedArrayBase* obj; \
+ if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
+ return false; \
+ set_empty_fixed_##type##_array(obj); \
+ }
- TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
+ TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
- }
+
DCHECK(!InNewSpace(empty_fixed_array()));
return true;
}
@@ -566,10 +700,6 @@ void Heap::CreateInitialObjects() {
set_retained_maps(ArrayList::cast(empty_fixed_array()));
set_retaining_path_targets(undefined_value());
- set_weak_object_to_code_table(*WeakHashTable::New(isolate(), 16, TENURED));
-
- set_weak_new_space_object_to_code_list(*ArrayList::New(isolate(), 16));
-
set_feedback_vectors_for_profiling_tools(undefined_value());
set_script_list(Smi::kZero);
@@ -584,6 +714,7 @@ void Heap::CreateInitialObjects() {
// Handling of script id generation is in Heap::NextScriptId().
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+ set_last_debugging_id(Smi::FromInt(SharedFunctionInfo::kNoDebuggingId));
set_next_template_serial_number(Smi::kZero);
// Allocate the empty OrderedHashMap.
@@ -606,6 +737,11 @@ void Heap::CreateInitialObjects() {
}
set_empty_ordered_hash_set(*empty_ordered_hash_set);
+ // Allocate the empty FeedbackMetadata.
+ Handle<FeedbackMetadata> empty_feedback_metadata =
+ factory->NewFeedbackMetadata(0);
+ set_empty_feedback_metadata(*empty_feedback_metadata);
+
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
script->set_type(Script::TYPE_NATIVE);
@@ -633,16 +769,20 @@ void Heap::CreateInitialObjects() {
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_species_protector(*cell);
+ set_array_species_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_typed_array_species_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_promise_species_protector(*cell);
Handle<Cell> string_length_overflow_cell = factory->NewCell(
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_string_length_protector(*string_length_overflow_cell);
- Handle<Cell> fast_array_iteration_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_fast_array_iteration_protector(*fast_array_iteration_cell);
-
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
@@ -651,6 +791,10 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_promise_hook_protector(*cell);
+ Handle<Cell> promise_resolve_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_promise_resolve_protector(*promise_resolve_cell);
+
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_promise_then_protector(*cell);
@@ -690,6 +834,12 @@ void Heap::CreateInternalAccessorInfoObjects() {
roots_[k##AccessorName##AccessorRootIndex] = *acessor_info;
ACCESSOR_INFO_LIST(INIT_ACCESSOR_INFO)
#undef INIT_ACCESSOR_INFO
+
+#define INIT_SIDE_EFFECT_FLAG(AccessorName) \
+ AccessorInfo::cast(roots_[k##AccessorName##AccessorRootIndex]) \
+ ->set_has_no_side_effect(true);
+ SIDE_EFFECT_FREE_ACCESSOR_INFO_LIST(INIT_SIDE_EFFECT_FLAG)
+#undef INIT_SIDE_EFFECT_FLAG
}
} // namespace internal
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 498c34bd54..c9cd68d5c6 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -281,8 +281,9 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
}
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
- if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit())
+ if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
+ }
return SlowRefillLinearAllocationArea(size_in_bytes);
}
@@ -458,8 +459,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
#endif
}
-
-MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
+V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment) {
base::LockGuard<base::Mutex> guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment);
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index d90cac90f2..5a94e1c3b9 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
+#include "src/base/template-utils.h"
#include "src/counters.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/concurrent-marking.h"
@@ -46,7 +47,8 @@ HeapObjectIterator::HeapObjectIterator(Page* page)
Space* owner = page->owner();
DCHECK(owner == page->heap()->old_space() ||
owner == page->heap()->map_space() ||
- owner == page->heap()->code_space());
+ owner == page->heap()->code_space() ||
+ owner == page->heap()->read_only_space());
#endif // DEBUG
}
@@ -59,10 +61,14 @@ bool HeapObjectIterator::AdvanceToNextPage() {
Heap* heap = space_->heap();
heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
+#ifdef ENABLE_MINOR_MC
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
heap->minor_mark_compact_collector()->MakeIterable(
cur_page, MarkingTreatmentMode::CLEAR,
FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
+#else
+ DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
+#endif // ENABLE_MINOR_MC
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
@@ -338,7 +344,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
- if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
+ if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
if (!MakeRoomForNewTasks()) {
// kMaxUnmapperTasks are already running. Avoid creating any more.
if (FLAG_trace_unmapper) {
@@ -348,7 +354,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
return;
}
- UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
+ auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
if (FLAG_trace_unmapper) {
PrintIsolate(heap_->isolate(),
"Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
@@ -359,8 +365,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
DCHECK_GE(active_unmapping_tasks_.Value(), 0);
active_unmapping_tasks_.Increment(1);
task_ids_[pending_unmapping_tasks_++] = task->id();
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else {
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
@@ -631,7 +636,15 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->categories_[i] = nullptr;
}
- heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
+ if (owner->identity() == RO_SPACE) {
+ heap->incremental_marking()
+ ->non_atomic_marking_state()
+ ->bitmap(chunk)
+ ->MarkAllBits();
+ } else {
+ heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(
+ chunk);
+ }
DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
@@ -678,6 +691,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
heap()->incremental_marking()->SetNewSpacePageFlags(page);
page->AllocateLocalTracker();
+#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
@@ -685,6 +699,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
->non_atomic_marking_state()
->ClearLiveness(page);
}
+#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
return page;
}
@@ -1402,15 +1417,6 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
// -----------------------------------------------------------------------------
// PagedSpace implementation
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
- ObjectSpace::kObjectSpaceNewSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
- ObjectSpace::kObjectSpaceOldSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
- ObjectSpace::kObjectSpaceCodeSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
- ObjectSpace::kObjectSpaceMapSpace);
-
void Space::AddAllocationObserver(AllocationObserver* observer) {
allocation_observers_.push_back(observer);
StartNextInlineAllocationStep();
@@ -1472,7 +1478,6 @@ bool PagedSpace::HasBeenSetUp() { return true; }
void PagedSpace::TearDown() {
for (auto it = begin(); it != end();) {
Page* page = *(it++); // Will be erased.
- ArrayBufferTracker::FreeAll(page);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
}
anchor_.set_next_page(&anchor_);
@@ -1484,7 +1489,7 @@ void PagedSpace::RefillFreeList() {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
- identity() != MAP_SPACE) {
+ identity() != MAP_SPACE && identity() != RO_SPACE) {
return;
}
MarkCompactCollector* collector = heap()->mark_compact_collector();
@@ -1784,6 +1789,13 @@ void PagedSpace::FreeLinearAllocationArea() {
InlineAllocationStep(current_top, nullptr, nullptr, 0);
SetTopAndLimit(nullptr, nullptr);
DCHECK_GE(current_limit, current_top);
+
+ // The code page of the linear allocation area needs to be unprotected
+ // because we are going to write a filler into that memory area below.
+ if (identity() == CODE_SPACE) {
+ heap_->UnprotectAndRegisterMemoryChunk(
+ MemoryChunk::FromAddress(current_top));
+ }
Free(current_top, current_limit - current_top,
SpaceAccountingMode::kSpaceAccounted);
}
@@ -1850,7 +1862,8 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
if (!is_local()) {
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
+ heap()->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
}
size_t new_node_size = 0;
@@ -1859,13 +1872,6 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
DCHECK_GE(new_node_size, size_in_bytes);
-#ifdef DEBUG
- for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
- reinterpret_cast<Object**>(new_node->address())[i] =
- Smi::FromInt(kCodeZapValue);
- }
-#endif
-
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
@@ -1873,7 +1879,8 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
- IncreaseAllocatedBytes(new_node_size, Page::FromAddress(new_node->address()));
+ Page* page = Page::FromAddress(new_node->address());
+ IncreaseAllocatedBytes(new_node_size, page);
Address start = new_node->address();
Address end = new_node->address() + new_node_size;
@@ -1881,6 +1888,9 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
+ if (identity() == CODE_SPACE) {
+ heap_->UnprotectAndRegisterMemoryChunk(page);
+ }
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
@@ -1927,7 +1937,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap.
int size = object->Size();
- object->IterateBody(map->instance_type(), size, visitor);
+ object->IterateBody(map, size, visitor);
CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
}
@@ -2379,7 +2389,7 @@ void NewSpace::Verify() {
// All the interior pointers should be contained in the heap.
VerifyPointersVisitor visitor;
int size = object->Size();
- object->IterateBody(map->instance_type(), size, &visitor);
+ object->IterateBody(map, size, &visitor);
current += size;
} else {
@@ -2414,9 +2424,6 @@ void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
void SemiSpace::TearDown() {
// Properly uncommit memory to keep the allocator counters in sync.
if (is_committed()) {
- for (Page* p : *this) {
- ArrayBufferTracker::FreeAll(p);
- }
Uncommit();
}
current_capacity_ = maximum_capacity_ = 0;
@@ -2714,25 +2721,17 @@ void FreeListCategory::Reset() {
available_ = 0;
}
-FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
+FreeSpace* FreeListCategory::PickNodeFromList(size_t minimum_size,
+ size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* node = top();
- if (node == nullptr) return nullptr;
- set_top(node->next());
- *node_size = node->Size();
- available_ -= *node_size;
- return node;
-}
-
-FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
- size_t* node_size) {
- DCHECK(page()->CanAllocate());
- FreeSpace* node = PickNodeFromList(node_size);
- if ((node != nullptr) && (*node_size < minimum_size)) {
- Free(node->address(), *node_size, kLinkCategory);
+ if (node == nullptr || static_cast<size_t>(node->Size()) < minimum_size) {
*node_size = 0;
return nullptr;
}
+ set_top(node->next());
+ *node_size = node->Size();
+ available_ -= *node_size;
return node;
}
@@ -2750,6 +2749,11 @@ FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
set_top(cur_node->next());
}
if (prev_non_evac_node != nullptr) {
+ MemoryChunk* chunk =
+ MemoryChunk::FromAddress(prev_non_evac_node->address());
+ if (chunk->owner()->identity() == CODE_SPACE) {
+ chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
+ }
prev_non_evac_node->set_next(cur_node->next());
}
*node_size = size;
@@ -2829,12 +2833,13 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
return 0;
}
-FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size) {
FreeListCategoryIterator it(this, type);
FreeSpace* node = nullptr;
while (it.HasNext()) {
FreeListCategory* current = it.Next();
- node = current->PickNodeFromList(node_size);
+ node = current->PickNodeFromList(minimum_size, node_size);
if (node != nullptr) {
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
@@ -2844,11 +2849,11 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
return node;
}
-FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
- size_t minimum_size) {
+FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type,
+ size_t minimum_size, size_t* node_size) {
if (categories_[type] == nullptr) return nullptr;
FreeSpace* node =
- categories_[type]->TryPickNodeFromList(minimum_size, node_size);
+ categories_[type]->PickNodeFromList(minimum_size, node_size);
if (node != nullptr) {
DCHECK(IsVeryLong() || Available() == SumFreeLists());
}
@@ -2882,7 +2887,8 @@ FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
FreeListCategoryType type =
SelectFastAllocationFreeListCategoryType(size_in_bytes);
for (int i = type; i < kHuge && node == nullptr; i++) {
- node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
+ node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
}
if (node == nullptr) {
@@ -2895,7 +2901,7 @@ FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
// We didn't find anything in the huge list. Now search the best fitting
// free list for a node that has at least the requested size.
type = SelectFreeListCategoryType(size_in_bytes);
- node = TryFindNodeIn(type, node_size, size_in_bytes);
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
}
if (node != nullptr) {
@@ -3276,7 +3282,8 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
}
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
+ heap()->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) {
@@ -3471,7 +3478,7 @@ void LargeObjectSpace::Verify() {
// Byte arrays and strings don't have interior pointers.
if (object->IsAbstractCode()) {
VerifyPointersVisitor code_visitor;
- object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
+ object->IterateBody(map, object->Size(), &code_visitor);
} else if (object->IsFixedArray()) {
FixedArray* array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 1c8bad8dc5..e5377b0336 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -179,14 +179,10 @@ class FreeListCategory {
void Free(Address address, size_t size_in_bytes, FreeMode mode);
- // Picks a node from the list and stores its size in |node_size|. Returns
- // nullptr if the category is empty.
- FreeSpace* PickNodeFromList(size_t* node_size);
-
// Performs a single try to pick a node of at least |minimum_size| from the
// category. Stores the actual size in |node_size|. Returns nullptr if no
// node is found.
- FreeSpace* TryPickNodeFromList(size_t minimum_size, size_t* node_size);
+ FreeSpace* PickNodeFromList(size_t minimum_size, size_t* node_size);
// Picks a node of at least |minimum_size| from the category. Stores the
// actual size in |node_size|. Returns nullptr if no node is found.
@@ -1052,9 +1048,9 @@ class CodeRange {
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
- MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
- const size_t commit_size,
- size_t* allocated);
+ V8_WARN_UNUSED_RESULT Address AllocateRawMemory(const size_t requested_size,
+ const size_t commit_size,
+ size_t* allocated);
bool CommitRawMemory(Address start, size_t length);
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
@@ -1389,9 +1385,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// filling it up with a recognizable non-nullptr bit pattern.
void ZapBlock(Address start, size_t size);
- MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, Address start,
- size_t commit_size,
- size_t reserved_size);
+ V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
CodeRange* code_range() { return code_range_; }
Unmapper* unmapper() { return &unmapper_; }
@@ -1438,6 +1435,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
executable_memory_.erase(chunk);
+ chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
}
Isolate* isolate_;
@@ -1779,7 +1777,8 @@ class V8_EXPORT_PRIVATE FreeList {
// bytes. Returns the actual node size in node_size which can be bigger than
// size_in_bytes. This method returns null if the allocation request cannot be
// handled by the free list.
- MUST_USE_RESULT FreeSpace* Allocate(size_t size_in_bytes, size_t* node_size);
+ V8_WARN_UNUSED_RESULT FreeSpace* Allocate(size_t size_in_bytes,
+ size_t* node_size);
// Clear the free list.
void Reset();
@@ -1879,12 +1878,14 @@ class V8_EXPORT_PRIVATE FreeList {
// Walks all available categories for a given |type| and tries to retrieve
// a node. Returns nullptr if the category is empty.
- FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
+ FreeSpace* FindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
// Tries to retrieve a node from the first category in a given |type|.
- // Returns nullptr if the category is empty.
- FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
- size_t minimum_size);
+ // Returns nullptr if the category is empty or the top entry is smaller
+ // than minimum_size.
+ FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
// Searches a given |type| for a node of at least |minimum_size|.
FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
@@ -1948,7 +1949,7 @@ class LocalAllocationBuffer {
LocalAllocationBuffer(const LocalAllocationBuffer& other);
LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
- MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
inline bool IsValid() { return allocation_info_.top() != nullptr; }
@@ -2103,17 +2104,17 @@ class V8_EXPORT_PRIVATE PagedSpace
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
// to be manually updated later.
- MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
- MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
// Allocate the requested number of bytes in the space and consider allocation
// alignment if needed.
- MUST_USE_RESULT inline AllocationResult AllocateRaw(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
@@ -2293,24 +2294,25 @@ class V8_EXPORT_PRIVATE PagedSpace
inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
- MUST_USE_RESULT bool RefillLinearAllocationAreaFromFreeList(
+ V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
// has to retry after collecting garbage.
- MUST_USE_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
+ V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
- MUST_USE_RESULT virtual bool SlowRefillLinearAllocationArea(
+ V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
int size_in_bytes);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
- MUST_USE_RESULT bool RawSlowRefillLinearAllocationArea(int size_in_bytes);
+ V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
+ int size_in_bytes);
size_t area_size_;
@@ -2681,16 +2683,16 @@ class NewSpace : public SpaceWithLinearArea {
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
- MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
+ V8_WARN_UNUSED_RESULT INLINE(AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment));
- MUST_USE_RESULT INLINE(
+ V8_WARN_UNUSED_RESULT INLINE(
AllocationResult AllocateRawUnaligned(int size_in_bytes));
- MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
+ V8_WARN_UNUSED_RESULT INLINE(AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment));
- MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment);
// Reset the allocation pointer to the beginning of the active semispace.
@@ -2806,9 +2808,10 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
- MUST_USE_RESULT bool SweepAndRetryAllocation(int size_in_bytes) override;
+ V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
+ int size_in_bytes) override;
- MUST_USE_RESULT bool SlowRefillLinearAllocationArea(
+ V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes) override;
};
@@ -2880,6 +2883,14 @@ class MapSpace : public PagedSpace {
#endif
};
+// -----------------------------------------------------------------------------
+// Read Only space for all Immortal Immovable and Immutable objects
+
+class ReadOnlySpace : public PagedSpace {
+ public:
+ ReadOnlySpace(Heap* heap, AllocationSpace id, Executability executable)
+ : PagedSpace(heap, id, executable) {}
+};
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
@@ -2908,8 +2919,8 @@ class LargeObjectSpace : public Space {
// Shared implementation of AllocateRaw, AllocateRawCode and
// AllocateRawFixedArray.
- MUST_USE_RESULT AllocationResult
- AllocateRaw(int object_size, Executability executable);
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
+ Executability executable);
// Available bytes for objects in this space.
inline size_t Available() override;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 724edf5721..3df5a5a53f 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include "src/base/macros.h"
+#include "src/base/template-utils.h"
#include "src/counters.h"
#include "src/heap/incremental-marking.h"
#include "src/isolate.h"
@@ -35,7 +36,7 @@ void StoreBuffer::SetUp() {
VirtualMemory reservation;
if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(),
&reservation)) {
- V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
+ heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
uintptr_t start_as_int = reinterpret_cast<uintptr_t>(reservation.address());
start_[0] =
@@ -59,7 +60,7 @@ void StoreBuffer::SetUp() {
if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize * kStoreBuffers,
PageAllocator::kReadWrite)) {
- V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
+ heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
current_ = 0;
top_ = start_[current_];
@@ -94,9 +95,8 @@ void StoreBuffer::FlipStoreBuffers() {
if (!task_running_ && FLAG_concurrent_store_buffer) {
task_running_ = true;
- Task* task = new Task(heap_->isolate(), this);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(
+ base::make_unique<Task>(heap_->isolate(), this));
}
}
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 2072e407e9..f72f041c78 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -4,6 +4,7 @@
#include "src/heap/sweeper.h"
+#include "src/base/template-utils.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/mark-compact-inl.h"
@@ -47,15 +48,18 @@ Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
USE(pause_or_complete_scope_);
if (!sweeping_in_progress_) return;
- old_space_sweeping_list_ = std::move(sweeper_->sweeping_list_[OLD_SPACE]);
- sweeper_->sweeping_list_[OLD_SPACE].clear();
+ int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
+ old_space_sweeping_list_ =
+ std::move(sweeper_->sweeping_list_[old_space_index]);
+ sweeper_->sweeping_list_[old_space_index].clear();
}
Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
if (!sweeping_in_progress_) return;
- sweeper_->sweeping_list_[OLD_SPACE] = std::move(old_space_sweeping_list_);
+ sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)] =
+ std::move(old_space_sweeping_list_);
// old_space_sweeping_list_ does not need to be cleared as we don't use it.
}
@@ -78,17 +82,16 @@ class Sweeper::SweeperTask final : public CancelableTask {
void RunInternal() final {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
- DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
- DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
- const int offset = space_to_start_ - FIRST_PAGED_SPACE;
- const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
- for (int i = 0; i < num_spaces; i++) {
- const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
+ DCHECK(IsValidSweepingSpace(space_to_start_));
+ const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
+ for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
+ const AllocationSpace space_id = static_cast<AllocationSpace>(
+ FIRST_GROWABLE_PAGED_SPACE +
+ ((i + offset) % kNumberOfSweepingSpaces));
// Do not sweep code space concurrently.
- if (static_cast<AllocationSpace>(space_id) == CODE_SPACE) continue;
- DCHECK_GE(space_id, FIRST_PAGED_SPACE);
- DCHECK_LE(space_id, LAST_PAGED_SPACE);
- sweeper_->SweepSpaceFromTask(static_cast<AllocationSpace>(space_id));
+ if (space_id == CODE_SPACE) continue;
+ DCHECK(IsValidSweepingSpace(space_id));
+ sweeper_->SweepSpaceFromTask(space_id);
}
num_sweeping_tasks_->Decrement(1);
pending_sweeper_tasks_->Signal();
@@ -136,7 +139,9 @@ void Sweeper::StartSweeping() {
MajorNonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
- std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
+ int space_index = GetSweepSpaceIndex(space);
+ std::sort(sweeping_list_[space_index].begin(),
+ sweeping_list_[space_index].end(),
[marking_state](Page* a, Page* b) {
return marking_state->live_bytes(a) <
marking_state->live_bytes(b);
@@ -152,13 +157,12 @@ void Sweeper::StartSweeperTasks() {
ForAllSweepingSpaces([this](AllocationSpace space) {
DCHECK(IsValidSweepingSpace(space));
num_sweeping_tasks_.Increment(1);
- SweeperTask* task = new SweeperTask(heap_->isolate(), this,
- &pending_sweeper_tasks_semaphore_,
- &num_sweeping_tasks_, space);
+ auto task = base::make_unique<SweeperTask>(
+ heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
+ &num_sweeping_tasks_, space);
DCHECK_LT(num_tasks_, kMaxSweeperTasks);
task_ids_[num_tasks_++] = task->id();
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
});
ScheduleIncrementalSweepingTask();
}
@@ -178,7 +182,7 @@ void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
base::LockGuard<base::Mutex> guard(&mutex_);
- SweptList& list = swept_list_[space->identity()];
+ SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
if (!list.empty()) {
auto last_page = list.back();
list.pop_back();
@@ -215,8 +219,9 @@ void Sweeper::EnsureCompleted() {
AbortAndWaitForTasks();
- ForAllSweepingSpaces(
- [this](AllocationSpace space) { CHECK(sweeping_list_[space].empty()); });
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
+ });
sweeping_in_progress_ = false;
}
@@ -283,8 +288,9 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
- p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo);
+ p->heap()->CreateFillerObjectAt(
+ free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
+ ClearFreedMemoryMode::kClearFreedMemory);
}
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
@@ -323,7 +329,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo);
+ ClearRecordedSlots::kNo,
+ ClearFreedMemoryMode::kClearFreedMemory);
}
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
@@ -378,7 +385,7 @@ bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity);
}
- return sweeping_list_[identity].empty();
+ return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
}
int Sweeper::ParallelSweepSpace(AllocationSpace identity,
@@ -435,7 +442,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
{
base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[identity].push_back(page);
+ swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
}
return max_freed;
}
@@ -463,7 +470,7 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
}
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
- sweeping_list_[space].push_back(page);
+ sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
@@ -480,10 +487,11 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
base::LockGuard<base::Mutex> guard(&mutex_);
DCHECK(IsValidSweepingSpace(space));
+ int space_index = GetSweepSpaceIndex(space);
Page* page = nullptr;
- if (!sweeping_list_[space].empty()) {
- page = sweeping_list_[space].front();
- sweeping_list_[space].pop_front();
+ if (!sweeping_list_[space_index].empty()) {
+ page = sweeping_list_[space_index].front();
+ sweeping_list_[space_index].pop_front();
}
return page;
}
@@ -550,12 +558,11 @@ void Sweeper::StartIterabilityTasks() {
DCHECK(!iterability_task_started_);
if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
- IterabilityTask* task = new IterabilityTask(heap_->isolate(), this,
- &iterability_task_semaphore_);
+ auto task = base::make_unique<IterabilityTask>(
+ heap_->isolate(), this, &iterability_task_semaphore_);
iterability_task_id_ = task->id();
iterability_task_started_ = true;
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 6eee902bcc..ecf1f8d4d2 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -51,7 +51,8 @@ class Sweeper {
void FilterOldSpaceSweepingPages(Callback callback) {
if (!sweeping_in_progress_) return;
- SweepingList* sweeper_list = &sweeper_->sweeping_list_[OLD_SPACE];
+ SweepingList* sweeper_list =
+ &sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)];
// Iteration here is from most free space to least free space.
for (auto it = old_space_sweeping_list_.begin();
it != old_space_sweeping_list_.end(); it++) {
@@ -123,7 +124,8 @@ class Sweeper {
class IterabilityTask;
class SweeperTask;
- static const int kNumberOfSweepingSpaces = LAST_PAGED_SPACE + 1;
+ static const int kNumberOfSweepingSpaces =
+ LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
static const int kMaxSweeperTasks = 3;
template <typename Callback>
@@ -137,7 +139,7 @@ class Sweeper {
bool IsDoneSweeping() const {
bool is_done = true;
ForAllSweepingSpaces([this, &is_done](AllocationSpace space) {
- if (!sweeping_list_[space].empty()) is_done = false;
+ if (!sweeping_list_[GetSweepSpaceIndex(space)].empty()) is_done = false;
});
return is_done;
}
@@ -159,11 +161,17 @@ class Sweeper {
void MakeIterable(Page* page);
bool IsValidIterabilitySpace(AllocationSpace space) {
- return space == NEW_SPACE;
+ return space == NEW_SPACE || space == RO_SPACE;
}
- bool IsValidSweepingSpace(AllocationSpace space) {
- return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
+ static bool IsValidSweepingSpace(AllocationSpace space) {
+ return space >= FIRST_GROWABLE_PAGED_SPACE &&
+ space <= LAST_GROWABLE_PAGED_SPACE;
+ }
+
+ static int GetSweepSpaceIndex(AllocationSpace space) {
+ DCHECK(IsValidSweepingSpace(space));
+ return space - FIRST_GROWABLE_PAGED_SPACE;
}
Heap* const heap_;