summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc25
-rw-r--r--deps/v8/src/heap/array-buffer-collector.h4
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h105
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc33
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h25
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h22
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc49
-rw-r--r--deps/v8/src/heap/concurrent-marking.h6
-rw-r--r--deps/v8/src/heap/embedder-tracing.h7
-rw-r--r--deps/v8/src/heap/factory-inl.h27
-rw-r--r--deps/v8/src/heap/factory.cc349
-rw-r--r--deps/v8/src/heap/factory.h141
-rw-r--r--deps/v8/src/heap/heap-inl.h45
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h4
-rw-r--r--deps/v8/src/heap/heap.cc235
-rw-r--r--deps/v8/src/heap/heap.h92
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc8
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h78
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc49
-rw-r--r--deps/v8/src/heap/invalidated-slots.h17
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h6
-rw-r--r--deps/v8/src/heap/mark-compact.cc86
-rw-r--r--deps/v8/src/heap/memory-measurement.cc80
-rw-r--r--deps/v8/src/heap/memory-measurement.h29
-rw-r--r--deps/v8/src/heap/memory-reducer.cc5
-rw-r--r--deps/v8/src/heap/object-stats.cc16
-rw-r--r--deps/v8/src/heap/object-stats.h1
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h6
-rw-r--r--deps/v8/src/heap/objects-visiting.h109
-rw-r--r--deps/v8/src/heap/remembered-set.h188
-rw-r--r--deps/v8/src/heap/scavenge-job.cc2
-rw-r--r--deps/v8/src/heap/scavenger.cc64
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc168
-rw-r--r--deps/v8/src/heap/slot-set.cc32
-rw-r--r--deps/v8/src/heap/slot-set.h133
-rw-r--r--deps/v8/src/heap/spaces.cc163
-rw-r--r--deps/v8/src/heap/spaces.h32
-rw-r--r--deps/v8/src/heap/store-buffer-inl.h26
-rw-r--r--deps/v8/src/heap/store-buffer.cc174
-rw-r--r--deps/v8/src/heap/store-buffer.h153
-rw-r--r--deps/v8/src/heap/sweeper.cc32
41 files changed, 1387 insertions, 1439 deletions
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index b6d7df8191..672d5e68f0 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -14,33 +14,22 @@
namespace v8 {
namespace internal {
-namespace {
-
-void FreeAllocationsHelper(
- Heap* heap, const std::vector<JSArrayBuffer::Allocation>& allocations) {
- for (JSArrayBuffer::Allocation alloc : allocations) {
- JSArrayBuffer::FreeBackingStore(heap->isolate(), alloc);
- }
-}
-
-} // namespace
-
void ArrayBufferCollector::QueueOrFreeGarbageAllocations(
- std::vector<JSArrayBuffer::Allocation> allocations) {
+ std::vector<std::shared_ptr<BackingStore>> backing_stores) {
if (heap_->ShouldReduceMemory()) {
- FreeAllocationsHelper(heap_, allocations);
+ // Destruct the vector, which destructs the std::shared_ptrs, freeing
+ // the backing stores.
+ backing_stores.clear();
} else {
base::MutexGuard guard(&allocations_mutex_);
- allocations_.push_back(std::move(allocations));
+ allocations_.push_back(std::move(backing_stores));
}
}
void ArrayBufferCollector::PerformFreeAllocations() {
base::MutexGuard guard(&allocations_mutex_);
- for (const std::vector<JSArrayBuffer::Allocation>& allocations :
- allocations_) {
- FreeAllocationsHelper(heap_, allocations);
- }
+ // Destruct the vector, which destructs the vecotr of std::shared_ptrs,
+ // freeing the backing stores if their refcount drops to zero.
allocations_.clear();
}
diff --git a/deps/v8/src/heap/array-buffer-collector.h b/deps/v8/src/heap/array-buffer-collector.h
index 784092e936..2d060cc595 100644
--- a/deps/v8/src/heap/array-buffer-collector.h
+++ b/deps/v8/src/heap/array-buffer-collector.h
@@ -31,7 +31,7 @@ class ArrayBufferCollector {
//
// FreeAllocations() potentially triggers a background task for processing.
void QueueOrFreeGarbageAllocations(
- std::vector<JSArrayBuffer::Allocation> allocations);
+ std::vector<std::shared_ptr<BackingStore>> allocations);
// Calls FreeAllocations() on a background thread.
void FreeAllocations();
@@ -45,7 +45,7 @@ class ArrayBufferCollector {
Heap* const heap_;
base::Mutex allocations_mutex_;
- std::vector<std::vector<JSArrayBuffer::Allocation>> allocations_;
+ std::vector<std::vector<std::shared_ptr<BackingStore>>> allocations_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 763300cffe..21106cee4b 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -12,16 +12,31 @@
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects.h"
+#define TRACE_BS(...) \
+ do { \
+ if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace v8 {
namespace internal {
-void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) {
- if (buffer.backing_store() == nullptr) return;
+inline size_t PerIsolateAccountingLength(JSArrayBuffer buffer) {
+ // TODO(titzer): SharedArrayBuffers and shared WasmMemorys cause problems with
+ // accounting for per-isolate external memory. In particular, sharing the same
+ // array buffer or memory multiple times, which happens in stress tests, can
+ // cause overcounting, leading to GC thrashing. Fix with global accounting?
+ return buffer.is_shared() ? 0 : buffer.byte_length();
+}
+
+void ArrayBufferTracker::RegisterNew(
+ Heap* heap, JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store) {
+ if (!backing_store) return;
// ArrayBuffer tracking works only for small objects.
DCHECK(!heap->IsLargeObject(buffer));
+ DCHECK_EQ(backing_store->buffer_start(), buffer.backing_store());
- const size_t length = buffer.byte_length();
Page* page = Page::FromHeapObject(buffer);
{
base::MutexGuard guard(page->mutex());
@@ -31,44 +46,63 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) {
tracker = page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
- tracker->Add(buffer, length);
+ TRACE_BS("ABT:reg bs=%p mem=%p (length=%zu) cnt=%ld\n",
+ backing_store.get(), backing_store->buffer_start(),
+ backing_store->byte_length(), backing_store.use_count());
+ tracker->Add(buffer, std::move(backing_store));
}
// TODO(wez): Remove backing-store from external memory accounting.
// We may go over the limit of externally allocated memory here. We call the
// api function to trigger a GC in this case.
+ const size_t length = PerIsolateAccountingLength(buffer);
reinterpret_cast<v8::Isolate*>(heap->isolate())
->AdjustAmountOfExternalAllocatedMemory(length);
}
-void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer buffer) {
- if (buffer.backing_store() == nullptr) return;
+std::shared_ptr<BackingStore> ArrayBufferTracker::Unregister(
+ Heap* heap, JSArrayBuffer buffer) {
+ std::shared_ptr<BackingStore> backing_store;
+ const size_t length = PerIsolateAccountingLength(buffer);
Page* page = Page::FromHeapObject(buffer);
- const size_t length = buffer.byte_length();
{
base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker);
- tracker->Remove(buffer, length);
+ backing_store = tracker->Remove(buffer);
}
// TODO(wez): Remove backing-store from external memory accounting.
heap->update_external_memory(-static_cast<intptr_t>(length));
+ return backing_store;
+}
+
+std::shared_ptr<BackingStore> ArrayBufferTracker::Lookup(Heap* heap,
+ JSArrayBuffer buffer) {
+ if (buffer.backing_store() == nullptr) return {};
+
+ Page* page = Page::FromHeapObject(buffer);
+ base::MutexGuard guard(page->mutex());
+ LocalArrayBufferTracker* tracker = page->local_tracker();
+ DCHECK_NOT_NULL(tracker);
+ return tracker->Lookup(buffer);
}
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
- Isolate* isolate = page_->heap()->isolate();
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
// Unchecked cast because the map might already be dead at this point.
JSArrayBuffer buffer = JSArrayBuffer::unchecked_cast(it->first);
- const size_t length = it->second.length;
+ const size_t length = PerIsolateAccountingLength(buffer);
if (should_free(buffer)) {
- JSArrayBuffer::FreeBackingStore(isolate, it->second);
+ // Destroy the shared pointer, (perhaps) freeing the backing store.
+ TRACE_BS("ABT:die bs=%p mem=%p (length=%zu) cnt=%ld\n",
+ it->second.get(), it->second->buffer_start(),
+ it->second->byte_length(), it->second.use_count());
it = array_buffers_.erase(it);
freed_memory += length;
} else {
@@ -97,35 +131,60 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
}
}
-void LocalArrayBufferTracker::Add(JSArrayBuffer buffer, size_t length) {
+void LocalArrayBufferTracker::Add(JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store) {
+ auto length = PerIsolateAccountingLength(buffer);
page_->IncrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, length);
- AddInternal(buffer, length);
+ AddInternal(buffer, std::move(backing_store));
}
-void LocalArrayBufferTracker::AddInternal(JSArrayBuffer buffer, size_t length) {
- auto ret = array_buffers_.insert(
- {buffer,
- {buffer.backing_store(), length, buffer.backing_store(),
- buffer.is_wasm_memory()}});
+void LocalArrayBufferTracker::AddInternal(
+ JSArrayBuffer buffer, std::shared_ptr<BackingStore> backing_store) {
+ auto ret = array_buffers_.insert({buffer, std::move(backing_store)});
USE(ret);
// Check that we indeed inserted a new value and did not overwrite an existing
// one (which would be a bug).
DCHECK(ret.second);
}
-void LocalArrayBufferTracker::Remove(JSArrayBuffer buffer, size_t length) {
- page_->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, length);
-
+std::shared_ptr<BackingStore> LocalArrayBufferTracker::Remove(
+ JSArrayBuffer buffer) {
TrackingData::iterator it = array_buffers_.find(buffer);
+
// Check that we indeed find a key to remove.
DCHECK(it != array_buffers_.end());
- DCHECK_EQ(length, it->second.length);
+
+ // Steal the underlying shared pointer before erasing the entry.
+ std::shared_ptr<BackingStore> backing_store = std::move(it->second);
+
+ TRACE_BS("ABT:rm bs=%p mem=%p (length=%zu) cnt=%ld\n", backing_store.get(),
+ backing_store->buffer_start(), backing_store->byte_length(),
+ backing_store.use_count());
+
+ // Erase the entry.
array_buffers_.erase(it);
+
+ // Update accounting.
+ auto length = PerIsolateAccountingLength(buffer);
+ page_->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, length);
+
+ return backing_store;
+}
+
+std::shared_ptr<BackingStore> LocalArrayBufferTracker::Lookup(
+ JSArrayBuffer buffer) {
+ TrackingData::iterator it = array_buffers_.find(buffer);
+ if (it != array_buffers_.end()) {
+ return it->second;
+ }
+ return {};
}
+#undef TRACE_BS
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index fdca6e8df2..b284a65f66 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -11,6 +11,11 @@
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
+#define TRACE_BS(...) \
+ do { \
+ if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace v8 {
namespace internal {
@@ -20,7 +25,7 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
template <typename Callback>
void LocalArrayBufferTracker::Process(Callback callback) {
- std::vector<JSArrayBuffer::Allocation> backing_stores_to_free;
+ std::vector<std::shared_ptr<BackingStore>> backing_stores_to_free;
TrackingData kept_array_buffers;
JSArrayBuffer new_buffer;
@@ -32,8 +37,9 @@ void LocalArrayBufferTracker::Process(Callback callback) {
DCHECK_EQ(page_, Page::FromHeapObject(old_buffer));
const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
- kept_array_buffers.insert(*it);
+ kept_array_buffers.insert(std::move(*it));
} else if (result == kUpdateEntry) {
+ DCHECK_EQ(old_buffer.byte_length(), new_buffer.byte_length());
DCHECK(!new_buffer.is_null());
Page* target_page = Page::FromHeapObject(new_buffer);
{
@@ -44,22 +50,28 @@ void LocalArrayBufferTracker::Process(Callback callback) {
tracker = target_page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
- const size_t length = it->second.length;
+ const size_t length = PerIsolateAccountingLength(old_buffer);
// We should decrement before adding to avoid potential overflows in
// the external memory counters.
- DCHECK_EQ(it->first.is_wasm_memory(), it->second.is_wasm_memory);
- tracker->AddInternal(new_buffer, length);
+ tracker->AddInternal(new_buffer, std::move(it->second));
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer,
static_cast<MemoryChunk*>(page_),
static_cast<MemoryChunk*>(target_page), length);
}
} else if (result == kRemoveEntry) {
- freed_memory += it->second.length;
- // We pass backing_store() and stored length to the collector for freeing
- // the backing store. Wasm allocations will go through their own tracker
- // based on the backing store.
- backing_stores_to_free.push_back(it->second);
+ freed_memory += PerIsolateAccountingLength(old_buffer);
+ auto backing_store = std::move(it->second);
+ TRACE_BS("ABT:queue bs=%p mem=%p (length=%zu) cnt=%ld\n",
+ backing_store.get(), backing_store->buffer_start(),
+ backing_store->byte_length(), backing_store.use_count());
+ if (!backing_store->is_shared()) {
+ // Only retain non-shared backing stores. For shared backing stores,
+ // drop the shared_ptr right away, since this should be cheap,
+ // as it only updates a refcount, except that last, which will
+ // destruct it, which is rare.
+ backing_stores_to_free.push_back(backing_store);
+ }
} else {
UNREACHABLE();
}
@@ -147,3 +159,4 @@ void ArrayBufferTracker::TearDown(Heap* heap) {
} // namespace internal
} // namespace v8
+#undef TRACE_BS
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index b7950c2506..156c226406 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -9,6 +9,7 @@
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
+#include "src/objects/backing-store.h"
#include "src/objects/js-array-buffer.h"
#include "src/utils/allocation.h"
@@ -31,8 +32,12 @@ class ArrayBufferTracker : public AllStatic {
// Register/unregister a new JSArrayBuffer |buffer| for tracking. Guards all
// access to the tracker by taking the page lock for the corresponding page.
- inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer);
- inline static void Unregister(Heap* heap, JSArrayBuffer buffer);
+ inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore>);
+ inline static std::shared_ptr<BackingStore> Unregister(Heap* heap,
+ JSArrayBuffer buffer);
+ inline static std::shared_ptr<BackingStore> Lookup(Heap* heap,
+ JSArrayBuffer buffer);
// Identifies all backing store pointers for dead JSArrayBuffers in new space.
// Does not take any locks and can only be called during Scavenge.
@@ -70,8 +75,10 @@ class LocalArrayBufferTracker {
explicit LocalArrayBufferTracker(Page* page) : page_(page) {}
~LocalArrayBufferTracker();
- inline void Add(JSArrayBuffer buffer, size_t length);
- inline void Remove(JSArrayBuffer buffer, size_t length);
+ inline void Add(JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store);
+ inline std::shared_ptr<BackingStore> Remove(JSArrayBuffer buffer);
+ inline std::shared_ptr<BackingStore> Lookup(JSArrayBuffer buffer);
// Frees up array buffers.
//
@@ -105,17 +112,13 @@ class LocalArrayBufferTracker {
}
};
- // Keep track of the backing store and the corresponding length at time of
- // registering. The length is accessed from JavaScript and can be a
- // HeapNumber. The reason for tracking the length is that in the case of
- // length being a HeapNumber, the buffer and its length may be stored on
- // different memory pages, making it impossible to guarantee order of freeing.
using TrackingData =
- std::unordered_map<JSArrayBuffer, JSArrayBuffer::Allocation, Hasher>;
+ std::unordered_map<JSArrayBuffer, std::shared_ptr<BackingStore>, Hasher>;
// Internal version of add that does not update counters. Requires separate
// logic for updating external memory counters.
- inline void AddInternal(JSArrayBuffer buffer, size_t length);
+ inline void AddInternal(JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store);
Page* page_;
// The set contains raw heap pointers which are removed by the GC upon
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index 65fc072bd2..c0d4ade522 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -10,12 +10,19 @@
#include "src/base/atomic-utils.h"
#include "src/common/globals.h"
#include "src/heap/marking.h"
+#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
class MemoryChunk;
+enum RememberedSetType {
+ OLD_TO_NEW,
+ OLD_TO_OLD,
+ NUMBER_OF_REMEMBERED_SET_TYPES
+};
+
class BasicMemoryChunk {
public:
enum Flag {
@@ -170,6 +177,11 @@ class BasicMemoryChunk {
static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
static const intptr_t kHeaderSentinelOffset =
kHeapOffset + kSystemPointerSize;
+ static const intptr_t kAreaStartOffset =
+ kHeaderSentinelOffset + kSystemPointerSize;
+ static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize;
+ static const intptr_t kOldToNewSlotSetOffset =
+ kAreaEndOffset + kSystemPointerSize;
static const size_t kHeaderSize =
kSizeOffset + kSizetSize // size_t size
@@ -178,7 +190,8 @@ class BasicMemoryChunk {
+ kSystemPointerSize // Heap* heap_
+ kSystemPointerSize // Address header_sentinel_
+ kSystemPointerSize // Address area_start_
- + kSystemPointerSize; // Address area_end_
+ + kSystemPointerSize // Address area_end_
+ + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES; // SlotSet* array
protected:
// Overall size of the chunk, including the header and guards.
@@ -204,6 +217,11 @@ class BasicMemoryChunk {
Address area_start_;
Address area_end_;
+ // A single slot set for small pages (of size kPageSize) or an array of slot
+ // set for large pages. In the latter case the number of entries in the array
+ // is ceil(size() / kPageSize).
+ SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+
friend class BasicMemoryChunkValidator;
};
@@ -221,6 +239,8 @@ class BasicMemoryChunkValidator {
offsetof(BasicMemoryChunk, heap_));
STATIC_ASSERT(BasicMemoryChunk::kHeaderSentinelOffset ==
offsetof(BasicMemoryChunk, header_sentinel_));
+ STATIC_ASSERT(BasicMemoryChunk::kOldToNewSlotSetOffset ==
+ offsetof(BasicMemoryChunk, slot_set_));
};
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 12bb28f1c8..6a155c78ea 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -8,7 +8,6 @@
#include <unordered_map>
#include "include/v8config.h"
-#include "src/base/template-utils.h"
#include "src/execution/isolate.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
@@ -225,6 +224,9 @@ class ConcurrentMarkingVisitor final
}
if (weak_ref.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_ref.target());
+#ifdef THREAD_SANITIZER
+ MemoryChunk::FromHeapObject(target)->SynchronizedHeapLoad();
+#endif
if (marking_state_.IsBlackOrGrey(target)) {
// Record the slot inside the JSWeakRef, since the
// VisitJSObjectSubclass above didn't visit it.
@@ -247,6 +249,9 @@ class ConcurrentMarkingVisitor final
WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
if (weak_cell.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_cell.target());
+#ifdef THREAD_SANITIZER
+ MemoryChunk::FromHeapObject(target)->SynchronizedHeapLoad();
+#endif
if (marking_state_.IsBlackOrGrey(target)) {
// Record the slot inside the WeakCell, since the IterateBody above
// didn't visit it.
@@ -478,6 +483,9 @@ class ConcurrentMarkingVisitor final
ObjectSlot key_slot =
table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
HeapObject key = HeapObject::cast(table.KeyAt(i));
+#ifdef THREAD_SANITIZER
+ MemoryChunk::FromHeapObject(key)->SynchronizedHeapLoad();
+#endif
MarkCompactCollector::RecordSlot(table, key_slot, key);
ObjectSlot value_slot =
@@ -491,6 +499,9 @@ class ConcurrentMarkingVisitor final
if (value_obj.IsHeapObject()) {
HeapObject value = HeapObject::cast(value_obj);
+#ifdef THREAD_SANITIZER
+ MemoryChunk::FromHeapObject(value)->SynchronizedHeapLoad();
+#endif
MarkCompactCollector::RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
@@ -864,8 +875,7 @@ void ConcurrentMarking::ScheduleTasks() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(!heap_->IsTearingDown());
base::MutexGuard guard(&pending_lock_);
- DCHECK_EQ(0, pending_task_count_);
- if (task_count_ == 0) {
+ if (total_task_count_ == 0) {
static const int num_cores =
V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
#if defined(V8_OS_MACOSX)
@@ -873,15 +883,18 @@ void ConcurrentMarking::ScheduleTasks() {
// marking on competing hyper-threads (regresses Octane/Splay). As such,
// only use num_cores/2, leaving one of those for the main thread.
// TODO(ulan): Use all cores on Mac 10.12+.
- task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
+ total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
#else // defined(OS_MACOSX)
// On other platforms use all logical cores, leaving one for the main
// thread.
- task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
+ total_task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
#endif // defined(OS_MACOSX)
+ DCHECK_LE(total_task_count_, kMaxTasks);
+ // One task is for the main thread.
+ STATIC_ASSERT(kMaxTasks + 1 <= MarkingWorklist::kMaxNumTasks);
}
// Task id 0 is for the main thread.
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
if (!is_pending_[i]) {
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
@@ -894,12 +907,12 @@ void ConcurrentMarking::ScheduleTasks() {
is_pending_[i] = true;
++pending_task_count_;
auto task =
- base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
+ std::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
cancelable_id_[i] = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
- DCHECK_EQ(task_count_, pending_task_count_);
+ DCHECK_EQ(total_task_count_, pending_task_count_);
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
@@ -907,11 +920,15 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
if (heap_->IsTearingDown()) return;
{
base::MutexGuard guard(&pending_lock_);
- if (pending_task_count_ > 0) return;
+ // The total task count is initialized in ScheduleTasks from
+ // NumberOfWorkerThreads of the platform.
+ if (total_task_count_ > 0 && pending_task_count_ == total_task_count_) {
+ return;
+ }
}
if (!shared_->IsGlobalPoolEmpty() ||
- !weak_objects_->current_ephemerons.IsEmpty() ||
- !weak_objects_->discovered_ephemerons.IsEmpty()) {
+ !weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
+ !weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
ScheduleTasks();
}
}
@@ -925,7 +942,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
CancelableTaskManager* task_manager =
heap_->isolate()->cancelable_task_manager();
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
if (is_pending_[i]) {
if (task_manager->TryAbort(cancelable_id_[i]) ==
TryAbortResult::kTaskAborted) {
@@ -940,7 +957,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
while (pending_task_count_ > 0) {
pending_condition_.Wait(&pending_lock_);
}
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
DCHECK(!is_pending_[i]);
}
return true;
@@ -956,7 +973,7 @@ bool ConcurrentMarking::IsStopped() {
void ConcurrentMarking::FlushMemoryChunkData(
MajorNonAtomicMarkingState* marking_state) {
DCHECK_EQ(pending_task_count_, 0);
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
for (auto& pair : memory_chunk_data) {
// ClearLiveness sets the live bytes to zero.
@@ -978,7 +995,7 @@ void ConcurrentMarking::FlushMemoryChunkData(
}
void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
auto it = task_state_[i].memory_chunk_data.find(chunk);
if (it != task_state_[i].memory_chunk_data.end()) {
it->second.live_bytes = 0;
@@ -989,7 +1006,7 @@ void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
size_t ConcurrentMarking::TotalMarkedBytes() {
size_t result = 0;
- for (int i = 1; i <= task_count_; i++) {
+ for (int i = 1; i <= total_task_count_; i++) {
result +=
base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
}
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index be2fc03d46..c08a9c47b0 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_CONCURRENT_MARKING_H_
#define V8_HEAP_CONCURRENT_MARKING_H_
+#include <memory>
+
#include "include/v8-platform.h"
#include "src/base/atomic-utils.h"
#include "src/base/platform/condition-variable.h"
@@ -86,8 +88,6 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// scavenge and is going to be re-used.
void ClearMemoryChunkData(MemoryChunk* chunk);
- int TaskCount() { return task_count_; }
-
// Checks if all threads are stopped.
bool IsStopped();
@@ -124,7 +124,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
int pending_task_count_ = 0;
bool is_pending_[kMaxTasks + 1] = {};
CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
- int task_count_ = 0;
+ int total_task_count_ = 0;
};
} // namespace internal
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 7c67ccfab7..a150f2c26a 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -57,7 +57,12 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
}
- void ResetHandleInNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
+
+ bool IsRootForNonTracingGC(const v8::TracedReference<v8::Value>& handle) {
+ return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
+ }
+
+ void ResetHandleInNonTracingGC(const v8::TracedReference<v8::Value>& handle) {
// Resetting is only called when IsRootForNonTracingGC returns false which
// can only happen the EmbedderHeapTracer is set on API level.
DCHECK(InUse());
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index bcad5d2714..f0f61bbb2c 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -43,44 +43,41 @@ Handle<String> Factory::NewSubString(Handle<String> str, int begin, int end) {
return NewProperSubString(str, begin, end);
}
-Handle<Object> Factory::NewNumberFromSize(size_t value,
- AllocationType allocation) {
+Handle<Object> Factory::NewNumberFromSize(size_t value) {
// We can't use Smi::IsValid() here because that operates on a signed
// intptr_t, and casting from size_t could create a bogus sign bit.
if (value <= static_cast<size_t>(Smi::kMaxValue)) {
return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
isolate());
}
- return NewNumber(static_cast<double>(value), allocation);
+ return NewNumber(static_cast<double>(value));
}
-Handle<Object> Factory::NewNumberFromInt64(int64_t value,
- AllocationType allocation) {
+Handle<Object> Factory::NewNumberFromInt64(int64_t value) {
if (value <= std::numeric_limits<int32_t>::max() &&
value >= std::numeric_limits<int32_t>::min() &&
Smi::IsValid(static_cast<int32_t>(value))) {
return Handle<Object>(Smi::FromInt(static_cast<int32_t>(value)), isolate());
}
- return NewNumber(static_cast<double>(value), allocation);
+ return NewNumber(static_cast<double>(value));
}
-Handle<HeapNumber> Factory::NewHeapNumber(double value,
- AllocationType allocation) {
- Handle<HeapNumber> heap_number = NewHeapNumber(allocation);
+template <AllocationType allocation>
+Handle<HeapNumber> Factory::NewHeapNumber(double value) {
+ Handle<HeapNumber> heap_number = NewHeapNumber<allocation>();
heap_number->set_value(value);
return heap_number;
}
-Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits,
- AllocationType allocation) {
- Handle<HeapNumber> heap_number = NewHeapNumber(allocation);
+template <AllocationType allocation>
+Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits) {
+ Handle<HeapNumber> heap_number = NewHeapNumber<allocation>();
heap_number->set_value_as_bits(bits);
return heap_number;
}
-Handle<HeapNumber> Factory::NewHeapNumberWithHoleNaN(
- AllocationType allocation) {
- return NewHeapNumberFromBits(kHoleNanInt64, allocation);
+Handle<HeapNumber> Factory::NewHeapNumberWithHoleNaN() {
+ return NewHeapNumberFromBits(kHoleNanInt64);
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 9bf46be6e8..721682f00f 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -11,6 +11,7 @@
#include "src/builtins/constants-table-builder.h"
#include "src/codegen/compiler.h"
#include "src/execution/isolate-inl.h"
+#include "src/execution/protectors-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
@@ -117,11 +118,11 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject result;
if (retry_allocation_or_fail) {
- result =
- heap->AllocateRawWithRetryOrFail(object_size, AllocationType::kCode);
+ result = heap->AllocateRawWith<Heap::kRetryOrFail>(object_size,
+ AllocationType::kCode);
} else {
- result =
- heap->AllocateRawWithLightRetry(object_size, AllocationType::kCode);
+ result = heap->AllocateRawWith<Heap::kLightRetry>(object_size,
+ AllocationType::kCode);
// Return an empty handle if we cannot allocate the code object.
if (result.is_null()) return MaybeHandle<Code>();
}
@@ -209,8 +210,8 @@ HeapObject Factory::AllocateRawWithImmortalMap(int size,
AllocationType allocation,
Map map,
AllocationAlignment alignment) {
- HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
- size, allocation, alignment);
+ HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
+ size, allocation, AllocationOrigin::kRuntime, alignment);
result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
return result;
}
@@ -222,7 +223,7 @@ HeapObject Factory::AllocateRawWithAllocationSite(
int size = map->instance_size();
if (!allocation_site.is_null()) size += AllocationMemento::kSize;
HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
+ isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
@@ -247,7 +248,7 @@ void Factory::InitializeAllocationMemento(AllocationMemento memento,
HeapObject Factory::AllocateRawArray(int size, AllocationType allocation) {
HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
+ isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
@@ -275,7 +276,7 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
+ isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
// New space objects are allocated white.
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
@@ -289,8 +290,8 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
AllocationOrigin origin) {
AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap();
- HeapObject result =
- heap->AllocateRawWithRetryOrFail(size, allocation, origin, alignment);
+ HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
+ size, allocation, origin, alignment);
heap->CreateFillerObjectAt(result.address(), size, ClearRecordedSlots::kNo);
return Handle<HeapObject>(result, isolate());
}
@@ -323,17 +324,6 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
return result;
}
-Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
- Handle<Object> value3,
- AllocationType allocation) {
- Handle<Tuple3> result =
- Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE, allocation));
- result->set_value1(*value1);
- result->set_value2(*value2);
- result->set_value3(*value3);
- return result;
-}
-
Handle<ArrayBoilerplateDescription> Factory::NewArrayBoilerplateDescription(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
Handle<ArrayBoilerplateDescription> result =
@@ -358,24 +348,23 @@ Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
- const char* type_of, byte kind,
- AllocationType allocation) {
- Handle<Oddball> oddball(Oddball::cast(New(map, allocation)), isolate());
+ const char* type_of, byte kind) {
+ Handle<Oddball> oddball(Oddball::cast(New(map, AllocationType::kReadOnly)),
+ isolate());
Oddball::Initialize(isolate(), oddball, to_string, to_number, type_of, kind);
return oddball;
}
-Handle<Oddball> Factory::NewSelfReferenceMarker(AllocationType allocation) {
+Handle<Oddball> Factory::NewSelfReferenceMarker() {
return NewOddball(self_reference_marker_map(), "self_reference_marker",
handle(Smi::FromInt(-1), isolate()), "undefined",
- Oddball::kSelfReferenceMarker, allocation);
+ Oddball::kSelfReferenceMarker);
}
-Handle<PropertyArray> Factory::NewPropertyArray(int length,
- AllocationType allocation) {
+Handle<PropertyArray> Factory::NewPropertyArray(int length) {
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
- HeapObject result = AllocateRawFixedArray(length, allocation);
+ HeapObject result = AllocateRawFixedArray(length, AllocationType::kYoung);
result.set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
Handle<PropertyArray> array(PropertyArray::cast(result), isolate());
array->initialize_length(length);
@@ -419,7 +408,7 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
DCHECK_LT(0, length);
HeapObject result =
- AllocateRawArray(WeakFixedArray::SizeFor(length), allocation);
+ AllocateRawArray(WeakFixedArray::SizeFor(length), AllocationType::kOld);
Map map = Map::cast(isolate()->root(map_root_index));
result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
@@ -485,8 +474,7 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length,
*the_hole_value(), allocation);
}
-Handle<FixedArray> Factory::NewUninitializedFixedArray(
- int length, AllocationType allocation) {
+Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
@@ -494,30 +482,30 @@ Handle<FixedArray> Factory::NewUninitializedFixedArray(
// array. After getting canary/performance coverage, either remove the
// function or revert to returning uninitilized array.
return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
- *undefined_value(), allocation);
+ *undefined_value(), AllocationType::kYoung);
}
Handle<ClosureFeedbackCellArray> Factory::NewClosureFeedbackCellArray(
- int length, AllocationType allocation) {
+ int length) {
if (length == 0) return empty_closure_feedback_cell_array();
Handle<ClosureFeedbackCellArray> feedback_cell_array =
NewFixedArrayWithMap<ClosureFeedbackCellArray>(
- RootIndex::kClosureFeedbackCellArrayMap, length, allocation);
+ RootIndex::kClosureFeedbackCellArrayMap, length,
+ AllocationType::kYoung);
return feedback_cell_array;
}
Handle<FeedbackVector> Factory::NewFeedbackVector(
Handle<SharedFunctionInfo> shared,
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
- AllocationType allocation) {
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array) {
int length = shared->feedback_metadata().slot_count();
DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length);
- HeapObject result =
- AllocateRawWithImmortalMap(size, allocation, *feedback_vector_map());
+ HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kOld,
+ *feedback_vector_map());
Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
vector->set_shared_function_info(*shared);
vector->set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(
@@ -534,13 +522,12 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
return vector;
}
-Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(
- int length, AllocationType allocation) {
+Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(int length) {
DCHECK_LE(0, length);
int size = EmbedderDataArray::SizeFor(length);
- HeapObject result =
- AllocateRawWithImmortalMap(size, allocation, *embedder_data_array_map());
+ HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kYoung,
+ *embedder_data_array_map());
Handle<EmbedderDataArray> array(EmbedderDataArray::cast(result), isolate());
array->set_length(length);
@@ -589,25 +576,23 @@ Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
return description;
}
-Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length,
- AllocationType allocation) {
+Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length) {
if (length == 0) return empty_fixed_array();
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
int size = FixedDoubleArray::SizeFor(length);
Map map = *fixed_double_array_map();
- HeapObject result =
- AllocateRawWithImmortalMap(size, allocation, map, kDoubleAligned);
+ HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kYoung,
+ map, kDoubleAligned);
Handle<FixedDoubleArray> array(FixedDoubleArray::cast(result), isolate());
array->set_length(length);
return array;
}
-Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
- int length, AllocationType allocation) {
+Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(int length) {
DCHECK_LE(0, length);
- Handle<FixedArrayBase> array = NewFixedDoubleArray(length, allocation);
+ Handle<FixedArrayBase> array = NewFixedDoubleArray(length);
if (length > 0) {
Handle<FixedDoubleArray>::cast(array)->FillWithHoles(0, length);
}
@@ -633,11 +618,10 @@ Handle<FeedbackMetadata> Factory::NewFeedbackMetadata(
return data;
}
-Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
- AllocationType allocation) {
+Handle<FrameArray> Factory::NewFrameArray(int number_of_frames) {
DCHECK_LE(0, number_of_frames);
- Handle<FixedArray> result = NewFixedArrayWithHoles(
- FrameArray::LengthFor(number_of_frames), allocation);
+ Handle<FixedArray> result =
+ NewFixedArrayWithHoles(FrameArray::LengthFor(number_of_frames));
result->set(FrameArray::kFrameCountIndex, Smi::kZero);
return Handle<FrameArray>::cast(result);
}
@@ -1438,7 +1422,7 @@ Handle<Context> Factory::NewContext(RootIndex map_root_index, int size,
Map map = Map::cast(isolate()->root(map_root_index));
HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
Handle<Context> context(Context::cast(result), isolate());
- context->set_length(variadic_part_length);
+ context->initialize_length_and_extension_bit(variadic_part_length);
DCHECK_EQ(context->SizeFromMap(map), size);
if (size > Context::kTodoHeaderSize) {
ObjectSlot start = context->RawField(Context::kTodoHeaderSize);
@@ -1461,6 +1445,7 @@ Handle<NativeContext> Factory::NewNativeContext() {
context->set_math_random_index(Smi::zero());
context->set_serialized_objects(*empty_fixed_array());
context->set_microtask_queue(nullptr);
+ context->set_osr_code_cache(*empty_weak_fixed_array());
return context;
}
@@ -1549,8 +1534,8 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension,
Handle<Context> wrapped,
- Handle<StringSet> whitelist) {
- STATIC_ASSERT(Context::WHITE_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1);
+ Handle<StringSet> blacklist) {
+ STATIC_ASSERT(Context::BLACK_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1);
DCHECK(scope_info->IsDebugEvaluateScope());
Handle<HeapObject> ext = extension.is_null()
? Handle<HeapObject>::cast(the_hole_value())
@@ -1565,7 +1550,7 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
c->set_native_context(previous->native_context());
c->set_extension(*ext);
if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped);
- if (!whitelist.is_null()) c->set(Context::WHITE_LIST_INDEX, *whitelist);
+ if (!blacklist.is_null()) c->set(Context::BLACK_LIST_INDEX, *blacklist);
return c;
}
@@ -1648,20 +1633,16 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
return info;
}
-Handle<Script> Factory::NewScript(Handle<String> source,
- AllocationType allocation) {
- return NewScriptWithId(source, isolate()->heap()->NextScriptId(), allocation);
+Handle<Script> Factory::NewScript(Handle<String> source) {
+ return NewScriptWithId(source, isolate()->heap()->NextScriptId());
}
-Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id,
- AllocationType allocation) {
- DCHECK(allocation == AllocationType::kOld ||
- allocation == AllocationType::kReadOnly);
+Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id) {
// Create and initialize script object.
Heap* heap = isolate()->heap();
ReadOnlyRoots roots(heap);
Handle<Script> script =
- Handle<Script>::cast(NewStruct(SCRIPT_TYPE, allocation));
+ Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld));
script->set_source(*source);
script->set_name(roots.undefined_value());
script->set_id(script_id);
@@ -1748,20 +1729,19 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
return microtask;
}
-Handle<Foreign> Factory::NewForeign(Address addr, AllocationType allocation) {
+Handle<Foreign> Factory::NewForeign(Address addr) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
Map map = *foreign_map();
- HeapObject result =
- AllocateRawWithImmortalMap(map.instance_size(), allocation, map);
+ HeapObject result = AllocateRawWithImmortalMap(map.instance_size(),
+ AllocationType::kYoung, map);
Handle<Foreign> foreign(Foreign::cast(result), isolate());
foreign->set_foreign_address(addr);
return foreign;
}
Handle<ByteArray> Factory::NewByteArray(int length, AllocationType allocation) {
- DCHECK_LE(0, length);
- if (length > ByteArray::kMaxLength) {
+ if (length < 0 || length > ByteArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
int size = ByteArray::SizeFor(length);
@@ -1776,8 +1756,7 @@ Handle<ByteArray> Factory::NewByteArray(int length, AllocationType allocation) {
Handle<BytecodeArray> Factory::NewBytecodeArray(
int length, const byte* raw_bytecodes, int frame_size, int parameter_count,
Handle<FixedArray> constant_pool) {
- DCHECK_LE(0, length);
- if (length > BytecodeArray::kMaxLength) {
+ if (length < 0 || length > BytecodeArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
// Bytecode array is AllocationType::kOld, so constant pool array should be
@@ -1806,7 +1785,6 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
}
Handle<Cell> Factory::NewCell(Handle<Object> value) {
- AllowDeferredHandleDereference convert_to_cell;
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
HeapObject result = AllocateRawWithImmortalMap(
Cell::kSize, AllocationType::kOld, *cell_map());
@@ -1816,7 +1794,6 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
}
Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
- AllowDeferredHandleDereference convert_to_cell;
HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
AllocationType::kOld, *no_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
@@ -1827,7 +1804,6 @@ Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
}
Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
- AllowDeferredHandleDereference convert_to_cell;
HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
AllocationType::kOld, *one_closure_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
@@ -1838,7 +1814,6 @@ Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
}
Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
- AllowDeferredHandleDereference convert_to_cell;
HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
AllocationType::kOld, *many_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
@@ -1864,15 +1839,13 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
}
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
- int slack,
- AllocationType allocation) {
- DCHECK(Heap::IsRegularObjectAllocation(allocation));
+ int slack) {
int number_of_all_descriptors = number_of_descriptors + slack;
// Zero-length case must be handled outside.
DCHECK_LT(0, number_of_all_descriptors);
int size = DescriptorArray::SizeFor(number_of_all_descriptors);
- HeapObject obj =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
+ HeapObject obj = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
+ size, AllocationType::kYoung);
obj.set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER);
DescriptorArray array = DescriptorArray::cast(obj);
array.Initialize(*empty_enum_cache(), *undefined_value(),
@@ -1923,7 +1896,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
!Map::CanHaveFastTransitionableElementsKind(type),
IsDictionaryElementsKind(elements_kind) ||
IsTerminalElementsKind(elements_kind));
- HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
+ HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
Map::kSize, AllocationType::kMap);
result.set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
return handle(InitializeMap(Map::cast(result), type, instance_size,
@@ -1985,23 +1958,23 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
// We can only clone regexps, normal objects, api objects, errors or arrays.
// Copying anything else will break invariants.
- CHECK(map->instance_type() == JS_REGEXP_TYPE ||
+ CHECK(map->instance_type() == JS_REG_EXP_TYPE ||
map->instance_type() == JS_OBJECT_TYPE ||
map->instance_type() == JS_ERROR_TYPE ||
map->instance_type() == JS_ARRAY_TYPE ||
map->instance_type() == JS_API_OBJECT_TYPE ||
- map->instance_type() == WASM_GLOBAL_TYPE ||
- map->instance_type() == WASM_INSTANCE_TYPE ||
- map->instance_type() == WASM_MEMORY_TYPE ||
- map->instance_type() == WASM_MODULE_TYPE ||
- map->instance_type() == WASM_TABLE_TYPE ||
+ map->instance_type() == WASM_GLOBAL_OBJECT_TYPE ||
+ map->instance_type() == WASM_INSTANCE_OBJECT_TYPE ||
+ map->instance_type() == WASM_MEMORY_OBJECT_TYPE ||
+ map->instance_type() == WASM_MODULE_OBJECT_TYPE ||
+ map->instance_type() == WASM_TABLE_OBJECT_TYPE ||
map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
DCHECK(site.is_null() || AllocationSite::CanTrack(map->instance_type()));
int object_size = map->instance_size();
int adjusted_object_size =
site.is_null() ? object_size : object_size + AllocationMemento::kSize;
- HeapObject raw_clone = isolate()->heap()->AllocateRawWithRetryOrFail(
+ HeapObject raw_clone = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
adjusted_object_size, AllocationType::kYoung);
DCHECK(Heap::InYoungGeneration(raw_clone) || FLAG_single_generation);
@@ -2062,6 +2035,13 @@ void initialize_length<PropertyArray>(Handle<PropertyArray> array, int length) {
array->initialize_length(length);
}
+inline void ZeroEmbedderFields(i::Handle<i::JSObject> obj) {
+ auto count = obj->GetEmbedderFieldCount();
+ for (int i = 0; i < count; i++) {
+ obj->SetEmbedderField(i, Smi::kZero);
+ }
+}
+
} // namespace
template <typename T>
@@ -2107,15 +2087,14 @@ Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
}
Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array,
- int grow_by,
- AllocationType allocation) {
- return CopyArrayAndGrow(array, grow_by, allocation);
+ int grow_by) {
+ return CopyArrayAndGrow(array, grow_by, AllocationType::kYoung);
}
Handle<WeakFixedArray> Factory::CopyWeakFixedArrayAndGrow(
- Handle<WeakFixedArray> src, int grow_by, AllocationType allocation) {
+ Handle<WeakFixedArray> src, int grow_by) {
DCHECK(!src->IsTransitionArray()); // Compacted by GC, this code doesn't work
- return CopyArrayAndGrow(src, grow_by, allocation);
+ return CopyArrayAndGrow(src, grow_by, AllocationType::kOld);
}
Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
@@ -2142,8 +2121,8 @@ Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
}
Handle<PropertyArray> Factory::CopyPropertyArrayAndGrow(
- Handle<PropertyArray> array, int grow_by, AllocationType allocation) {
- return CopyArrayAndGrow(array, grow_by, allocation);
+ Handle<PropertyArray> array, int grow_by) {
+ return CopyArrayAndGrow(array, grow_by, AllocationType::kYoung);
}
Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
@@ -2187,8 +2166,8 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) {
int len = array->length();
if (len == 0) return array;
- Handle<FixedDoubleArray> result = Handle<FixedDoubleArray>::cast(
- NewFixedDoubleArray(len, AllocationType::kYoung));
+ Handle<FixedDoubleArray> result =
+ Handle<FixedDoubleArray>::cast(NewFixedDoubleArray(len));
Heap::CopyBlock(
result->address() + FixedDoubleArray::kLengthOffset,
array->address() + FixedDoubleArray::kLengthOffset,
@@ -2196,32 +2175,39 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
return result;
}
-Handle<Object> Factory::NewNumber(double value, AllocationType allocation) {
+template <AllocationType allocation>
+Handle<Object> Factory::NewNumber(double value) {
// Materialize as a SMI if possible.
int32_t int_value;
if (DoubleToSmiInteger(value, &int_value)) {
return handle(Smi::FromInt(int_value), isolate());
}
- return NewHeapNumber(value, allocation);
+ return NewHeapNumber<allocation>(value);
}
-Handle<Object> Factory::NewNumberFromInt(int32_t value,
- AllocationType allocation) {
+template Handle<Object> V8_EXPORT_PRIVATE
+Factory::NewNumber<AllocationType::kYoung>(double);
+template Handle<Object> V8_EXPORT_PRIVATE
+Factory::NewNumber<AllocationType::kOld>(double);
+template Handle<Object> V8_EXPORT_PRIVATE
+Factory::NewNumber<AllocationType::kReadOnly>(double);
+
+Handle<Object> Factory::NewNumberFromInt(int32_t value) {
if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
// Bypass NewNumber to avoid various redundant checks.
- return NewHeapNumber(FastI2D(value), allocation);
+ return NewHeapNumber(FastI2D(value));
}
-Handle<Object> Factory::NewNumberFromUint(uint32_t value,
- AllocationType allocation) {
+Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
int32_t int32v = static_cast<int32_t>(value);
if (int32v >= 0 && Smi::IsValid(int32v)) {
return handle(Smi::FromInt(int32v), isolate());
}
- return NewHeapNumber(FastUI2D(value), allocation);
+ return NewHeapNumber(FastUI2D(value));
}
-Handle<HeapNumber> Factory::NewHeapNumber(AllocationType allocation) {
+template <AllocationType allocation>
+Handle<HeapNumber> Factory::NewHeapNumber() {
STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
Map map = *heap_number_map();
HeapObject result = AllocateRawWithImmortalMap(HeapNumber::kSize, allocation,
@@ -2229,10 +2215,17 @@ Handle<HeapNumber> Factory::NewHeapNumber(AllocationType allocation) {
return handle(HeapNumber::cast(result), isolate());
}
+template Handle<HeapNumber> V8_EXPORT_PRIVATE
+Factory::NewHeapNumber<AllocationType::kYoung>();
+template Handle<HeapNumber> V8_EXPORT_PRIVATE
+Factory::NewHeapNumber<AllocationType::kOld>();
+template Handle<HeapNumber> V8_EXPORT_PRIVATE
+Factory::NewHeapNumber<AllocationType::kReadOnly>();
+
Handle<HeapNumber> Factory::NewHeapNumberForCodeAssembler(double value) {
- return NewHeapNumber(value, isolate()->heap()->CanAllocateInReadOnlySpace()
- ? AllocationType::kReadOnly
- : AllocationType::kOld);
+ return isolate()->heap()->CanAllocateInReadOnlySpace()
+ ? NewHeapNumber<AllocationType::kReadOnly>(value)
+ : NewHeapNumber<AllocationType::kOld>(value);
}
Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
@@ -2299,8 +2292,8 @@ Handle<Object> Factory::NewInvalidStringLengthError() {
FATAL("Aborting on invalid string length");
}
// Invalidate the "string length" protector.
- if (isolate()->IsStringLengthOverflowIntact()) {
- isolate()->InvalidateStringLengthOverflowProtector();
+ if (Protectors::IsStringLengthOverflowLookupChainIntact(isolate())) {
+ Protectors::InvalidateStringLengthOverflowLookupChain(isolate());
}
return NewRangeError(MessageTemplate::kInvalidStringLength);
}
@@ -2412,7 +2405,7 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
case JS_ARRAY_TYPE:
elements_kind = PACKED_SMI_ELEMENTS;
break;
- case JS_ARGUMENTS_TYPE:
+ case JS_ARGUMENTS_OBJECT_TYPE:
elements_kind = PACKED_ELEMENTS;
break;
default:
@@ -2679,8 +2672,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
{
int obj_size = code->Size();
CodePageCollectionMemoryModificationScope code_allocation(heap);
- HeapObject result =
- heap->AllocateRawWithRetryOrFail(obj_size, AllocationType::kCode);
+ HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
+ obj_size, AllocationType::kCode);
// Copy code object.
Address old_addr = code->address();
@@ -2696,7 +2689,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
// allocation is on.
heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
// Record all references to embedded objects in the new code object.
+#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrierForCode(*new_code);
+#endif
}
#ifdef VERIFY_HEAP
@@ -2737,9 +2732,8 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
return NewJSObjectFromMap(map, allocation);
}
-Handle<JSObject> Factory::NewJSObjectWithNullProto(AllocationType allocation) {
- Handle<JSObject> result =
- NewJSObject(isolate()->object_function(), allocation);
+Handle<JSObject> Factory::NewJSObjectWithNullProto() {
+ Handle<JSObject> result = NewJSObject(isolate()->object_function());
Handle<Map> new_map = Map::Copy(
isolate(), Handle<Map>(result->map(), isolate()), "ObjectWithNullProto");
Map::SetPrototype(isolate(), new_map, null_value());
@@ -2776,7 +2770,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
Handle<DescriptorArray> descs(map->instance_descriptors(), isolate());
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ for (InternalIndex i : InternalIndex::Range(map->NumberOfOwnDescriptors())) {
PropertyDetails details = descs->GetDetails(i);
// Only accessors are expected.
DCHECK_EQ(kAccessor, details.kind());
@@ -2888,13 +2882,14 @@ Handle<JSObject> Factory::NewSlowJSObjectFromMap(
Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
Handle<HeapObject> prototype, Handle<NameDictionary> properties,
- Handle<FixedArrayBase> elements, AllocationType allocation) {
+ Handle<FixedArrayBase> elements) {
Handle<Map> object_map = isolate()->slow_object_with_object_prototype_map();
if (object_map->prototype() != *prototype) {
object_map = Map::TransitionToPrototype(isolate(), object_map, prototype);
}
DCHECK(object_map->is_dictionary_map());
- Handle<JSObject> object = NewJSObjectFromMap(object_map, allocation);
+ Handle<JSObject> object =
+ NewJSObjectFromMap(object_map, AllocationType::kYoung);
object->set_raw_properties_or_hash(*properties);
if (*elements != ReadOnlyRoots(isolate()).empty_fixed_array()) {
DCHECK(elements->IsNumberDictionary());
@@ -3010,7 +3005,7 @@ Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
Handle<JSModuleNamespace> module_namespace(
Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map)));
FieldIndex index = FieldIndex::ForDescriptor(
- *map, JSModuleNamespace::kToStringTagFieldIndex);
+ *map, InternalIndex(JSModuleNamespace::kToStringTagFieldIndex));
module_namespace->FastPropertyAtPut(index,
ReadOnlyRoots(isolate()).Module_string());
return module_namespace;
@@ -3042,6 +3037,7 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
Handle<FixedArray> requested_modules =
requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
: empty_fixed_array();
+ Handle<ArrayList> async_parent_modules = ArrayList::New(isolate(), 0);
ReadOnlyRoots roots(isolate());
Handle<SourceTextModule> module(
@@ -3061,6 +3057,12 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
module->set_import_meta(roots.the_hole_value());
module->set_dfs_index(-1);
module->set_dfs_ancestor_index(-1);
+ module->set_top_level_capability(roots.undefined_value());
+ module->set_flags(0);
+ module->set_async(IsAsyncModule(code->kind()));
+ module->set_async_evaluating(false);
+ module->set_async_parent_modules(*async_parent_modules);
+ module->set_pending_async_dependencies(0);
return module;
}
@@ -3086,15 +3088,43 @@ Handle<SyntheticModule> Factory::NewSyntheticModule(
return module;
}
-Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
- AllocationType allocation) {
- Handle<JSFunction> array_buffer_fun(
- shared == SharedFlag::kShared
- ? isolate()->native_context()->shared_array_buffer_fun()
- : isolate()->native_context()->array_buffer_fun(),
+Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(
+ std::shared_ptr<BackingStore> backing_store, AllocationType allocation) {
+ Handle<Map> map(isolate()->native_context()->array_buffer_fun().initial_map(),
+ isolate());
+ auto result =
+ Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
+ result->Setup(SharedFlag::kNotShared, std::move(backing_store));
+ return result;
+}
+
+MaybeHandle<JSArrayBuffer> Factory::NewJSArrayBufferAndBackingStore(
+ size_t byte_length, InitializedFlag initialized,
+ AllocationType allocation) {
+ std::unique_ptr<BackingStore> backing_store = nullptr;
+
+ if (byte_length > 0) {
+ backing_store = BackingStore::Allocate(isolate(), byte_length,
+ SharedFlag::kNotShared, initialized);
+ if (!backing_store) return MaybeHandle<JSArrayBuffer>();
+ }
+ Handle<Map> map(isolate()->native_context()->array_buffer_fun().initial_map(),
+ isolate());
+ auto array_buffer =
+ Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
+ array_buffer->Setup(SharedFlag::kNotShared, std::move(backing_store));
+ return array_buffer;
+}
+
+Handle<JSArrayBuffer> Factory::NewJSSharedArrayBuffer(
+ std::shared_ptr<BackingStore> backing_store) {
+ Handle<Map> map(
+ isolate()->native_context()->shared_array_buffer_fun().initial_map(),
isolate());
- Handle<Map> map(array_buffer_fun->initial_map(), isolate());
- return Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
+ auto result = Handle<JSArrayBuffer>::cast(
+ NewJSObjectFromMap(map, AllocationType::kYoung));
+ result->Setup(SharedFlag::kShared, std::move(backing_store));
+ return result;
}
Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
@@ -3172,20 +3202,17 @@ void ForFixedTypedArray(ExternalArrayType array_type, size_t* element_size,
Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
Handle<Map> map, Handle<FixedArrayBase> elements,
- Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
- AllocationType allocation) {
+ Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length) {
CHECK_LE(byte_length, buffer->byte_length());
CHECK_LE(byte_offset, buffer->byte_length());
CHECK_LE(byte_offset + byte_length, buffer->byte_length());
- Handle<JSArrayBufferView> array_buffer_view =
- Handle<JSArrayBufferView>::cast(NewJSObjectFromMap(map, allocation));
+ Handle<JSArrayBufferView> array_buffer_view = Handle<JSArrayBufferView>::cast(
+ NewJSObjectFromMap(map, AllocationType::kYoung));
array_buffer_view->set_elements(*elements);
array_buffer_view->set_buffer(*buffer);
array_buffer_view->set_byte_offset(byte_offset);
array_buffer_view->set_byte_length(byte_length);
- for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) {
- array_buffer_view->SetEmbedderField(i, Smi::kZero);
- }
+ ZeroEmbedderFields(array_buffer_view);
DCHECK_EQ(array_buffer_view->GetEmbedderFieldCount(),
v8::ArrayBufferView::kEmbedderFieldCount);
return array_buffer_view;
@@ -3193,8 +3220,8 @@ Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSArrayBuffer> buffer,
- size_t byte_offset, size_t length,
- AllocationType allocation) {
+ size_t byte_offset,
+ size_t length) {
size_t element_size;
ElementsKind elements_kind;
ForFixedTypedArray(type, &element_size, &elements_kind);
@@ -3219,24 +3246,21 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
default:
UNREACHABLE();
}
- Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(
- NewJSArrayBufferView(map, empty_byte_array(), buffer, byte_offset,
- byte_length, allocation));
+ Handle<JSTypedArray> typed_array =
+ Handle<JSTypedArray>::cast(NewJSArrayBufferView(
+ map, empty_byte_array(), buffer, byte_offset, byte_length));
typed_array->set_length(length);
- typed_array->set_external_pointer(
- reinterpret_cast<byte*>(buffer->backing_store()) + byte_offset);
- typed_array->set_base_pointer(Smi::kZero);
+ typed_array->SetOffHeapDataPtr(buffer->backing_store(), byte_offset);
return typed_array;
}
Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
size_t byte_offset,
- size_t byte_length,
- AllocationType allocation) {
+ size_t byte_length) {
Handle<Map> map(isolate()->native_context()->data_view_fun().initial_map(),
isolate());
Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView(
- map, empty_fixed_array(), buffer, byte_offset, byte_length, allocation));
+ map, empty_fixed_array(), buffer, byte_offset, byte_length));
obj->set_data_pointer(static_cast<uint8_t*>(buffer->backing_store()) +
byte_offset);
return obj;
@@ -3499,11 +3523,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->clear_padding();
}
- // Link into the list.
- Handle<WeakArrayList> noscript_list = noscript_shared_function_infos();
- noscript_list = WeakArrayList::AddToEnd(isolate(), noscript_list,
- MaybeObjectHandle::Weak(share));
- isolate()->heap()->set_noscript_shared_function_infos(*noscript_list);
#ifdef VERIFY_HEAP
share->SharedFunctionInfoVerify(isolate());
@@ -3894,6 +3913,9 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
JSRegExp::Flags flags, int capture_count) {
Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
Smi uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
+ Smi ticks_until_tier_up = FLAG_regexp_tier_up
+ ? Smi::FromInt(FLAG_regexp_tier_up_ticks)
+ : uninitialized;
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
@@ -3904,7 +3926,7 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::kZero);
store->set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count));
store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
- store->set(JSRegExp::kIrregexpTierUpTicksIndex, Smi::kZero);
+ store->set(JSRegExp::kIrregexpTicksUntilTierUpIndex, ticks_until_tier_up);
regexp->set_data(*store);
}
@@ -4141,19 +4163,18 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
return map;
}
-Handle<JSPromise> Factory::NewJSPromiseWithoutHook(AllocationType allocation) {
- Handle<JSPromise> promise = Handle<JSPromise>::cast(
- NewJSObject(isolate()->promise_function(), allocation));
+Handle<JSPromise> Factory::NewJSPromiseWithoutHook() {
+ Handle<JSPromise> promise =
+ Handle<JSPromise>::cast(NewJSObject(isolate()->promise_function()));
promise->set_reactions_or_result(Smi::kZero);
promise->set_flags(0);
- for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
- promise->SetEmbedderField(i, Smi::kZero);
- }
+ ZeroEmbedderFields(promise);
+ DCHECK_EQ(promise->GetEmbedderFieldCount(), v8::Promise::kEmbedderFieldCount);
return promise;
}
-Handle<JSPromise> Factory::NewJSPromise(AllocationType allocation) {
- Handle<JSPromise> promise = NewJSPromiseWithoutHook(allocation);
+Handle<JSPromise> Factory::NewJSPromise() {
+ Handle<JSPromise> promise = NewJSPromiseWithoutHook();
isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value());
return promise;
}
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 1e47926e8e..35de6425c9 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -74,7 +74,8 @@ class WeakCell;
struct SourceRange;
template <typename T>
class ZoneVector;
-enum class SharedFlag : uint32_t;
+enum class SharedFlag : uint8_t;
+enum class InitializedFlag : uint8_t;
enum FunctionMode {
kWithNameBit = 1 << 0,
@@ -107,14 +108,12 @@ enum FunctionMode {
// Interface for handle based allocation.
class V8_EXPORT_PRIVATE Factory {
public:
- Handle<Oddball> NewOddball(
- Handle<Map> map, const char* to_string, Handle<Object> to_number,
- const char* type_of, byte kind,
- AllocationType allocation = AllocationType::kReadOnly);
+ Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
+ Handle<Object> to_number, const char* type_of,
+ byte kind);
// Marks self references within code generation.
- Handle<Oddball> NewSelfReferenceMarker(
- AllocationType allocation = AllocationType::kOld);
+ Handle<Oddball> NewSelfReferenceMarker();
// Allocates a fixed array-like object with given map and initialized with
// undefined values.
@@ -140,8 +139,7 @@ class V8_EXPORT_PRIVATE Factory {
int length, AllocationType allocation = AllocationType::kYoung);
// Allocates a property array initialized with undefined values.
- Handle<PropertyArray> NewPropertyArray(
- int length, AllocationType allocation = AllocationType::kYoung);
+ Handle<PropertyArray> NewPropertyArray(int length);
// Tries allocating a fixed array initialized with undefined values.
// In case of an allocation failure (OOM) an empty handle is returned.
// The caller has to manually signal an
@@ -156,24 +154,20 @@ class V8_EXPORT_PRIVATE Factory {
int length, AllocationType allocation = AllocationType::kYoung);
// Allocates an uninitialized fixed array. It must be filled by the caller.
- Handle<FixedArray> NewUninitializedFixedArray(
- int length, AllocationType allocation = AllocationType::kYoung);
+ Handle<FixedArray> NewUninitializedFixedArray(int length);
// Allocates a closure feedback cell array whose feedback cells are
// initialized with undefined values.
- Handle<ClosureFeedbackCellArray> NewClosureFeedbackCellArray(
- int num_slots, AllocationType allocation = AllocationType::kYoung);
+ Handle<ClosureFeedbackCellArray> NewClosureFeedbackCellArray(int num_slots);
// Allocates a feedback vector whose slots are initialized with undefined
// values.
Handle<FeedbackVector> NewFeedbackVector(
Handle<SharedFunctionInfo> shared,
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array);
// Allocates a clean embedder data array with given capacity.
- Handle<EmbedderDataArray> NewEmbedderDataArray(
- int length, AllocationType allocation = AllocationType::kYoung);
+ Handle<EmbedderDataArray> NewEmbedderDataArray(int length);
// Allocates a fixed array for name-value pairs of boilerplate properties and
// calculates the number of properties we need to store in the backing store.
@@ -183,20 +177,17 @@ class V8_EXPORT_PRIVATE Factory {
// Allocate a new uninitialized fixed double array.
// The function returns a pre-allocated empty fixed array for length = 0,
// so the return type must be the general fixed array class.
- Handle<FixedArrayBase> NewFixedDoubleArray(
- int length, AllocationType allocation = AllocationType::kYoung);
+ Handle<FixedArrayBase> NewFixedDoubleArray(int length);
// Allocate a new fixed double array with hole values.
- Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(
- int size, AllocationType allocation = AllocationType::kYoung);
+ Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(int size);
// Allocates a FeedbackMedata object and zeroes the data section.
Handle<FeedbackMetadata> NewFeedbackMetadata(
int slot_count, int feedback_cell_count,
AllocationType allocation = AllocationType::kOld);
- Handle<FrameArray> NewFrameArray(
- int number_of_frames, AllocationType allocation = AllocationType::kYoung);
+ Handle<FrameArray> NewFrameArray(int number_of_frames);
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
@@ -223,10 +214,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Tuple2> NewTuple2(Handle<Object> value1, Handle<Object> value2,
AllocationType allocation);
- // Create a new Tuple3 struct.
- Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2,
- Handle<Object> value3, AllocationType allocation);
-
// Create a new ArrayBoilerplateDescription struct.
Handle<ArrayBoilerplateDescription> NewArrayBoilerplateDescription(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values);
@@ -451,11 +438,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<AccessorInfo> NewAccessorInfo();
- Handle<Script> NewScript(Handle<String> source,
- AllocationType allocation = AllocationType::kOld);
- Handle<Script> NewScriptWithId(
- Handle<String> source, int script_id,
- AllocationType allocation = AllocationType::kOld);
+ Handle<Script> NewScript(Handle<String> source);
+ Handle<Script> NewScriptWithId(Handle<String> source, int script_id);
Handle<Script> CloneScript(Handle<Script> script);
Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
@@ -479,8 +463,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSReceiver> thenable, Handle<Context> context);
// Foreign objects are pretenured when allocated by the bootstrapper.
- Handle<Foreign> NewForeign(
- Address addr, AllocationType allocation = AllocationType::kYoung);
+ Handle<Foreign> NewForeign(Address addr);
Handle<ByteArray> NewByteArray(
int length, AllocationType allocation = AllocationType::kYoung);
@@ -498,9 +481,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
- Handle<DescriptorArray> NewDescriptorArray(
- int number_of_entries, int slack = 0,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<DescriptorArray> NewDescriptorArray(int number_of_entries,
+ int slack = 0);
Handle<TransitionArray> NewTransitionArray(int number_of_transitions,
int slack = 0);
@@ -537,21 +519,18 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FixedArray> CopyFixedArrayWithMap(Handle<FixedArray> array,
Handle<Map> map);
- Handle<FixedArray> CopyFixedArrayAndGrow(
- Handle<FixedArray> array, int grow_by,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<FixedArray> CopyFixedArrayAndGrow(Handle<FixedArray> array,
+ int grow_by);
- Handle<WeakFixedArray> CopyWeakFixedArrayAndGrow(
- Handle<WeakFixedArray> array, int grow_by,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<WeakFixedArray> CopyWeakFixedArrayAndGrow(Handle<WeakFixedArray> array,
+ int grow_by);
Handle<WeakArrayList> CopyWeakArrayListAndGrow(
Handle<WeakArrayList> array, int grow_by,
AllocationType allocation = AllocationType::kYoung);
- Handle<PropertyArray> CopyPropertyArrayAndGrow(
- Handle<PropertyArray> array, int grow_by,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<PropertyArray> CopyPropertyArrayAndGrow(Handle<PropertyArray> array,
+ int grow_by);
Handle<FixedArray> CopyFixedArrayUpTo(
Handle<FixedArray> array, int new_len,
@@ -567,32 +546,28 @@ class V8_EXPORT_PRIVATE Factory {
// Numbers (e.g. literals) are pretenured by the parser.
// The return value may be a smi or a heap number.
- Handle<Object> NewNumber(double value,
- AllocationType allocation = AllocationType::kYoung);
-
- Handle<Object> NewNumberFromInt(
- int32_t value, AllocationType allocation = AllocationType::kYoung);
- Handle<Object> NewNumberFromUint(
- uint32_t value, AllocationType allocation = AllocationType::kYoung);
- inline Handle<Object> NewNumberFromSize(
- size_t value, AllocationType allocation = AllocationType::kYoung);
- inline Handle<Object> NewNumberFromInt64(
- int64_t value, AllocationType allocation = AllocationType::kYoung);
- inline Handle<HeapNumber> NewHeapNumber(
- double value, AllocationType allocation = AllocationType::kYoung);
- inline Handle<HeapNumber> NewHeapNumberFromBits(
- uint64_t bits, AllocationType allocation = AllocationType::kYoung);
+ template <AllocationType allocation = AllocationType::kYoung>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<Object> NewNumber(double value);
+ Handle<Object> NewNumberFromInt(int32_t value);
+ Handle<Object> NewNumberFromUint(uint32_t value);
+ inline Handle<Object> NewNumberFromSize(size_t value);
+ inline Handle<Object> NewNumberFromInt64(int64_t value);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<HeapNumber> NewHeapNumber(double value);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<HeapNumber> NewHeapNumberFromBits(uint64_t bits);
// Creates heap number object with not yet set value field.
- Handle<HeapNumber> NewHeapNumber(
- AllocationType allocation = AllocationType::kYoung);
+ template <AllocationType allocation = AllocationType::kYoung>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<HeapNumber> NewHeapNumber();
// Creates a new HeapNumber in read-only space if possible otherwise old
// space.
Handle<HeapNumber> NewHeapNumberForCodeAssembler(double value);
- inline Handle<HeapNumber> NewHeapNumberWithHoleNaN(
- AllocationType allocation = AllocationType::kYoung);
+ inline Handle<HeapNumber> NewHeapNumberWithHoleNaN();
// Allocates a new BigInt with {length} digits. Only to be used by
// MutableBigInt::New*.
@@ -609,8 +584,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSFunction> constructor,
AllocationType allocation = AllocationType::kYoung);
// JSObject without a prototype.
- Handle<JSObject> NewJSObjectWithNullProto(
- AllocationType allocation = AllocationType::kYoung);
+ Handle<JSObject> NewJSObjectWithNullProto();
// Global objects are pretenured and initialized based on a constructor.
Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
@@ -644,8 +618,7 @@ class V8_EXPORT_PRIVATE Factory {
// object will have dictionary elements.
Handle<JSObject> NewSlowJSObjectWithPropertiesAndElements(
Handle<HeapObject> prototype, Handle<NameDictionary> properties,
- Handle<FixedArrayBase> elements,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<FixedArrayBase> elements);
// JS arrays are pretenured when allocated by the parser.
@@ -692,20 +665,27 @@ class V8_EXPORT_PRIVATE Factory {
v8::Module::SyntheticModuleEvaluationSteps evaluation_steps);
Handle<JSArrayBuffer> NewJSArrayBuffer(
- SharedFlag shared, AllocationType allocation = AllocationType::kYoung);
+ std::shared_ptr<BackingStore> backing_store,
+ AllocationType allocation = AllocationType::kYoung);
+
+ MaybeHandle<JSArrayBuffer> NewJSArrayBufferAndBackingStore(
+ size_t byte_length, InitializedFlag initialized,
+ AllocationType allocation = AllocationType::kYoung);
+
+ Handle<JSArrayBuffer> NewJSSharedArrayBuffer(
+ std::shared_ptr<BackingStore> backing_store);
static void TypeAndSizeForElementsKind(ElementsKind kind,
ExternalArrayType* array_type,
size_t* element_size);
// Creates a new JSTypedArray with the specified buffer.
- Handle<JSTypedArray> NewJSTypedArray(
- ExternalArrayType type, Handle<JSArrayBuffer> buffer, size_t byte_offset,
- size_t length, AllocationType allocation = AllocationType::kYoung);
+ Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
+ Handle<JSArrayBuffer> buffer,
+ size_t byte_offset, size_t length);
- Handle<JSDataView> NewJSDataView(
- Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
- AllocationType allocation = AllocationType::kYoung);
+ Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
+ size_t byte_offset, size_t byte_length);
Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
Handle<JSAsyncFromSyncIterator> NewJSAsyncFromSyncIterator(
@@ -922,10 +902,8 @@ class V8_EXPORT_PRIVATE Factory {
// Converts the given ToPrimitive hint to it's string representation.
Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint);
- Handle<JSPromise> NewJSPromiseWithoutHook(
- AllocationType allocation = AllocationType::kYoung);
- Handle<JSPromise> NewJSPromise(
- AllocationType allocation = AllocationType::kYoung);
+ Handle<JSPromise> NewJSPromiseWithoutHook();
+ Handle<JSPromise> NewJSPromise();
Handle<CallHandlerInfo> NewCallHandlerInfo(bool has_no_side_effect = false);
@@ -1034,8 +1012,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSArrayBufferView> NewJSArrayBufferView(
Handle<Map> map, Handle<FixedArrayBase> elements,
- Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
- AllocationType allocation);
+ Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length);
// Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
HeapObject AllocateRawArray(int size, AllocationType allocation);
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index da803f3339..56f3590b8a 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -111,10 +111,6 @@ void Heap::SetRootStringTable(StringTable value) {
roots_table()[RootIndex::kStringTable] = value.ptr();
}
-void Heap::SetRootNoScriptSharedFunctionInfos(Object value) {
- roots_table()[RootIndex::kNoScriptSharedFunctionInfos] = value.ptr();
-}
-
void Heap::SetMessageListeners(TemplateList value) {
roots_table()[RootIndex::kMessageListeners] = value.ptr();
}
@@ -163,7 +159,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK(gc_state_ == NOT_IN_GC);
+ DCHECK_EQ(gc_state_, NOT_IN_GC);
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
@@ -180,8 +176,9 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
HeapObject object;
AllocationResult allocation;
- if (FLAG_single_generation && type == AllocationType::kYoung)
+ if (FLAG_single_generation && type == AllocationType::kYoung) {
type = AllocationType::kOld;
+ }
if (AllocationType::kYoung == type) {
if (large_object) {
@@ -212,9 +209,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
} else if (AllocationType::kMap == type) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (AllocationType::kReadOnly == type) {
-#ifdef V8_USE_SNAPSHOT
DCHECK(isolate_->serializer_enabled());
-#endif
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
@@ -242,6 +237,40 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
return allocation;
}
+template <Heap::AllocationRetryMode mode>
+HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
+ AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ DCHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK(AllowHeapAllocation::IsAllowed());
+ DCHECK_EQ(gc_state_, NOT_IN_GC);
+ Heap* heap = isolate()->heap();
+ Address* top = heap->NewSpaceAllocationTopAddress();
+ Address* limit = heap->NewSpaceAllocationLimitAddress();
+ if (allocation == AllocationType::kYoung &&
+ alignment == AllocationAlignment::kWordAligned &&
+ size <= kMaxRegularHeapObjectSize &&
+ (*limit - *top >= static_cast<unsigned>(size)) &&
+ V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
+ FLAG_gc_interval == 0)) {
+ DCHECK(IsAligned(size, kTaggedSize));
+ HeapObject obj = HeapObject::FromAddress(*top);
+ *top += size;
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
+ return obj;
+ }
+ switch (mode) {
+ case kLightRetry:
+ return AllocateRawWithLightRetrySlowPath(size, allocation, origin,
+ alignment);
+ case kRetryOrFail:
+ return AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
+ alignment);
+ }
+ UNREACHABLE();
+}
+
void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
for (auto& tracker : allocation_trackers_) {
tracker->AllocationEvent(object.address(), size_in_bytes);
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index 5687284b1e..a0d9902006 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -212,6 +212,7 @@ inline void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
inline WriteBarrierMode GetWriteBarrierModeForObject(
HeapObject object, const DisallowHeapAllocation* promise) {
+ if (FLAG_disable_write_barriers) return SKIP_WRITE_BARRIER;
DCHECK(Heap_PageFlagsAreConsistent(object));
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
@@ -221,6 +222,9 @@ inline WriteBarrierMode GetWriteBarrierModeForObject(
}
inline bool ObjectInYoungGeneration(Object object) {
+ // TODO(rong): Fix caller of this function when we deploy
+ // v8_use_third_party_heap.
+ if (FLAG_single_generation) return false;
if (object.IsSmi()) return false;
return heap_internals::MemoryChunk::FromHeapObject(HeapObject::cast(object))
->InYoungGeneration();
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index ff3b34cfb4..45b2273c50 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -39,6 +39,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/memory-measurement.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
@@ -47,7 +48,6 @@
#include "src/heap/remembered-set.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
-#include "src/heap/store-buffer.h"
#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
@@ -913,23 +913,6 @@ void Heap::RemoveAllocationObserversFromAllSpaces(
}
}
-class Heap::SkipStoreBufferScope {
- public:
- explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
- : store_buffer_(store_buffer) {
- store_buffer_->MoveAllEntriesToRememberedSet();
- store_buffer_->SetMode(StoreBuffer::IN_GC);
- }
-
- ~SkipStoreBufferScope() {
- DCHECK(store_buffer_->Empty());
- store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
- }
-
- private:
- StoreBuffer* store_buffer_;
-};
-
namespace {
inline bool MakePretenureDecision(
AllocationSite site, AllocationSite::PretenureDecision current_decision,
@@ -1965,44 +1948,40 @@ bool Heap::PerformGarbageCollection(
size_t start_young_generation_size =
Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
- {
- Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get());
-
- switch (collector) {
- case MARK_COMPACTOR:
- UpdateOldGenerationAllocationCounter();
- // Perform mark-sweep with optional compaction.
- MarkCompact();
- old_generation_size_configured_ = true;
- // This should be updated before PostGarbageCollectionProcessing, which
- // can cause another GC. Take into account the objects promoted during
- // GC.
- old_generation_allocation_counter_at_last_gc_ +=
- static_cast<size_t>(promoted_objects_size_);
- old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
- break;
- case MINOR_MARK_COMPACTOR:
- MinorMarkCompact();
- break;
- case SCAVENGER:
- if ((fast_promotion_mode_ &&
- CanExpandOldGeneration(new_space()->Size() +
- new_lo_space()->Size()))) {
- tracer()->NotifyYoungGenerationHandling(
- YoungGenerationHandling::kFastPromotionDuringScavenge);
- EvacuateYoungGeneration();
- } else {
- tracer()->NotifyYoungGenerationHandling(
- YoungGenerationHandling::kRegularScavenge);
-
- Scavenge();
- }
- break;
- }
+ switch (collector) {
+ case MARK_COMPACTOR:
+ UpdateOldGenerationAllocationCounter();
+ // Perform mark-sweep with optional compaction.
+ MarkCompact();
+ old_generation_size_configured_ = true;
+ // This should be updated before PostGarbageCollectionProcessing, which
+ // can cause another GC. Take into account the objects promoted during
+ // GC.
+ old_generation_allocation_counter_at_last_gc_ +=
+ static_cast<size_t>(promoted_objects_size_);
+ old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
+ break;
+ case MINOR_MARK_COMPACTOR:
+ MinorMarkCompact();
+ break;
+ case SCAVENGER:
+ if ((fast_promotion_mode_ &&
+ CanExpandOldGeneration(new_space()->Size() +
+ new_lo_space()->Size()))) {
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kFastPromotionDuringScavenge);
+ EvacuateYoungGeneration();
+ } else {
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kRegularScavenge);
- ProcessPretenuringFeedback();
+ Scavenge();
+ }
+ break;
}
+ ProcessPretenuringFeedback();
+
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
ConfigureInitialOldGenerationSize();
@@ -2780,12 +2759,34 @@ HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
return object;
}
-void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) {
- ArrayBufferTracker::RegisterNew(this, buffer);
+void* Heap::AllocateExternalBackingStore(
+ const std::function<void*(size_t)>& allocate, size_t byte_length) {
+ // TODO(ulan): Perform GCs proactively based on the byte_length and
+ // the current external backing store counters.
+ void* result = allocate(byte_length);
+ if (result) return result;
+ for (int i = 0; i < 2; i++) {
+ CollectGarbage(OLD_SPACE, GarbageCollectionReason::kExternalMemoryPressure);
+ result = allocate(byte_length);
+ if (result) return result;
+ }
+ isolate()->counters()->gc_last_resort_from_handles()->Increment();
+ CollectAllAvailableGarbage(GarbageCollectionReason::kExternalMemoryPressure);
+ return allocate(byte_length);
+}
+
+void Heap::RegisterBackingStore(JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store) {
+ ArrayBufferTracker::RegisterNew(this, buffer, std::move(backing_store));
}
-void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) {
- ArrayBufferTracker::Unregister(this, buffer);
+std::shared_ptr<BackingStore> Heap::UnregisterBackingStore(
+ JSArrayBuffer buffer) {
+ return ArrayBufferTracker::Unregister(this, buffer);
+}
+
+std::shared_ptr<BackingStore> Heap::LookupBackingStore(JSArrayBuffer buffer) {
+ return ArrayBufferTracker::Lookup(this, buffer);
}
void Heap::ConfigureInitialOldGenerationSize() {
@@ -3387,16 +3388,23 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
}
-void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
- const DisallowHeapAllocation&) {
+void Heap::NotifyObjectLayoutChange(
+ HeapObject object, const DisallowHeapAllocation&,
+ InvalidateRecordedSlots invalidate_recorded_slots) {
if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() &&
+ invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
- ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
+ ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
}
+ if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
+ MayContainRecordedSlots(object)) {
+ MemoryChunk::FromHeapObject(object)
+ ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
+ }
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
DCHECK(pending_layout_change_object_.is_null());
@@ -3684,8 +3692,7 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
isolate()->stack_guard()->RequestGC();
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(isolate()));
- taskrunner->PostTask(
- base::make_unique<MemoryPressureInterruptTask>(this));
+ taskrunner->PostTask(std::make_unique<MemoryPressureInterruptTask>(this));
}
}
}
@@ -3748,6 +3755,11 @@ bool Heap::InvokeNearHeapLimitCallback() {
return false;
}
+Handle<JSPromise> Heap::MeasureMemory(Handle<NativeContext> context,
+ v8::MeasureMemoryMode mode) {
+ return memory_measurement_->EnqueueRequest(context, mode);
+}
+
void Heap::CollectCodeStatistics() {
TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
@@ -4096,7 +4108,19 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end,
}
return KEEP_SLOT;
},
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
+ if (direction == OLD_TO_NEW) {
+ CHECK(chunk->SweepingDone());
+ RememberedSetSweeping::Iterate(
+ chunk,
+ [start, end, untyped](MaybeObjectSlot slot) {
+ if (start <= slot.address() && slot.address() < end) {
+ untyped->insert(slot.address());
+ }
+ return KEEP_SLOT;
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
+ }
RememberedSet<direction>::IterateTyped(
chunk, [=](SlotType type, Address slot) {
if (start <= slot && slot < end) {
@@ -4117,7 +4141,6 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
std::set<Address> old_to_new;
std::set<std::pair<SlotType, Address> > typed_old_to_new;
if (!InYoungGeneration(object)) {
- store_buffer()->MoveAllEntriesToRememberedSet();
CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new,
&this->ephemeron_remembered_set_);
@@ -4288,6 +4311,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
isolate_->handle_scope_implementer()->Iterate(v);
+ isolate_->IterateDeferredHandles(&left_trim_visitor);
isolate_->IterateDeferredHandles(v);
v->Synchronize(VisitorSynchronization::kHandleScope);
@@ -4879,9 +4903,9 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
return heap_object;
}
-HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
- AllocationOrigin origin,
- AllocationAlignment alignment) {
+HeapObject Heap::AllocateRawWithLightRetrySlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
HeapObject result;
AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
if (alloc.To(&result)) {
@@ -4901,12 +4925,12 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
return HeapObject();
}
-HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType allocation,
- AllocationOrigin origin,
- AllocationAlignment alignment) {
+HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
AllocationResult alloc;
HeapObject result =
- AllocateRawWithLightRetry(size, allocation, origin, alignment);
+ AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
if (!result.is_null()) return result;
isolate()->counters()->gc_last_resort_from_handles()->Increment();
@@ -4979,8 +5003,6 @@ void Heap::SetUp() {
memory_allocator_.reset(
new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
- store_buffer_.reset(new StoreBuffer(this));
-
mark_compact_collector_.reset(new MarkCompactCollector(this));
scavenger_collector_.reset(new ScavengerCollector(this));
@@ -5039,6 +5061,7 @@ void Heap::SetUpSpaces() {
#endif // ENABLE_MINOR_MC
array_buffer_collector_.reset(new ArrayBufferCollector(this));
gc_idle_time_handler_.reset(new GCIdleTimeHandler());
+ memory_measurement_.reset(new MemoryMeasurement(isolate()));
memory_reducer_.reset(new MemoryReducer(this));
if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
live_object_stats_.reset(new ObjectStats(this));
@@ -5049,8 +5072,6 @@ void Heap::SetUpSpaces() {
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
- store_buffer()->SetUp();
-
mark_compact_collector()->SetUp();
#ifdef ENABLE_MINOR_MC
if (minor_mark_compact_collector() != nullptr) {
@@ -5282,8 +5303,6 @@ void Heap::TearDown() {
space_[i] = nullptr;
}
- store_buffer()->TearDown();
-
memory_allocator()->TearDown();
StrongRootsList* next = nullptr;
@@ -5293,7 +5312,6 @@ void Heap::TearDown() {
}
strong_roots_list_ = nullptr;
- store_buffer_.reset();
memory_allocator_.reset();
}
@@ -5404,13 +5422,6 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
DCHECK_IMPLIES(allocation == AllocationType::kOld, InOldSpace(*scripts));
scripts = CompactWeakArrayList(this, scripts, allocation);
set_script_list(*scripts);
-
- Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
- isolate());
- DCHECK_IMPLIES(allocation == AllocationType::kOld,
- InOldSpace(*no_script_list));
- no_script_list = CompactWeakArrayList(this, no_script_list, allocation);
- set_noscript_shared_function_infos(*no_script_list);
}
void Heap::AddRetainedMap(Handle<Map> map) {
@@ -5511,53 +5522,55 @@ void Heap::CheckHandleCount() {
isolate_->handle_scope_implementer()->Iterate(&v);
}
-Address* Heap::store_buffer_top_address() {
- return store_buffer()->top_address();
-}
-
-// static
-intptr_t Heap::store_buffer_mask_constant() {
- return StoreBuffer::kStoreBufferMask;
-}
-
-// static
-Address Heap::store_buffer_overflow_function_address() {
- return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
-}
-
void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
+#ifndef V8_DISABLE_WRITE_BARRIERS
DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address());
if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
- store_buffer()->MoveAllEntriesToRememberedSet();
- RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
+
+ if (!page->SweepingDone()) {
+ RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
+ }
}
+#endif
+}
+
+// static
+int Heap::InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot) {
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
+ return 0;
}
#ifdef DEBUG
void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
+#ifndef V8_DISABLE_WRITE_BARRIERS
DCHECK(!IsLargeObject(object));
if (InYoungGeneration(object)) return;
Page* page = Page::FromAddress(slot.address());
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
- store_buffer()->MoveAllEntriesToRememberedSet();
- CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
- // Old to old slots are filtered with invalidated slots.
+ // Slots are filtered with invalidated slots.
+ CHECK_IMPLIES(RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()),
+ page->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
+#endif
}
#endif
void Heap::ClearRecordedSlotRange(Address start, Address end) {
+#ifndef V8_DISABLE_WRITE_BARRIERS
Page* page = Page::FromAddress(start);
DCHECK(!page->IsLargePage());
if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
- store_buffer()->MoveAllEntriesToRememberedSet();
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
- SlotSet::KEEP_EMPTY_BUCKETS);
+
+ if (!page->SweepingDone()) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ }
}
+#endif
}
PagedSpace* PagedSpaceIterator::Next() {
@@ -6164,8 +6177,8 @@ void Heap::WriteBarrierForCodeSlow(Code code) {
void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
HeapObject value) {
- Heap* heap = Heap::FromWritableHeapObject(object);
- heap->store_buffer()->InsertEntry(slot);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
}
void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
@@ -6207,7 +6220,6 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
STATIC_ASSERT(!(kModeMask & kDoEvacuationSlotRecording) ||
(kModeMask & kDoMarking));
- StoreBuffer* store_buffer = this->store_buffer();
IncrementalMarking* incremental_marking = this->incremental_marking();
MarkCompactCollector* collector = this->mark_compact_collector();
@@ -6218,7 +6230,8 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
if ((kModeMask & kDoGenerational) &&
Heap::InYoungGeneration(value_heap_object)) {
- store_buffer->InsertEntry(slot.address());
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(source_page,
+ slot.address());
}
if ((kModeMask & kDoMarking) &&
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 2b8b963a79..182096f29c 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -7,6 +7,7 @@
#include <cmath>
#include <map>
+#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
@@ -45,7 +46,11 @@ class TestMemoryAllocatorScope;
} // namespace heap
class IncrementalMarking;
+class BackingStore;
class JSArrayBuffer;
+class JSPromise;
+class NativeContext;
+
using v8::MemoryPressureLevel;
class AllocationObserver;
@@ -62,6 +67,7 @@ class Isolate;
class JSFinalizationGroup;
class LocalEmbedderHeapTracer;
class MemoryAllocator;
+class MemoryMeasurement;
class MemoryReducer;
class MinorMarkCompactCollector;
class ObjectIterator;
@@ -74,7 +80,6 @@ class ScavengeJob;
class Scavenger;
class ScavengerCollector;
class Space;
-class StoreBuffer;
class StressScavengeObserver;
class TimedHistogram;
class WeakObjectRetainer;
@@ -86,6 +91,8 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo };
+enum class InvalidateRecordedSlots { kYes, kNo };
+
enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
@@ -560,6 +567,9 @@ class Heap {
void RecordStats(HeapStats* stats, bool take_snapshot = false);
+ Handle<JSPromise> MeasureMemory(Handle<NativeContext> context,
+ v8::MeasureMemoryMode mode);
+
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
@@ -839,12 +849,13 @@ class Heap {
void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
- Address* store_buffer_top_address();
+ V8_EXPORT_PRIVATE Address* store_buffer_top_address();
static intptr_t store_buffer_mask_constant();
static Address store_buffer_overflow_function_address();
void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end);
+ static int InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot);
#ifdef DEBUG
void VerifyClearedSlot(HeapObject object, ObjectSlot slot);
@@ -896,8 +907,13 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
// The old size is the size of the object before layout change.
- void NotifyObjectLayoutChange(HeapObject object, int old_size,
- const DisallowHeapAllocation&);
+ // By default recorded slots in the object are invalidated. Pass
+ // InvalidateRecordedSlots::kNo if this is not necessary or to perform this
+ // manually.
+ void NotifyObjectLayoutChange(
+ HeapObject object, const DisallowHeapAllocation&,
+ InvalidateRecordedSlots invalidate_recorded_slots =
+ InvalidateRecordedSlots::kYes);
#ifdef VERIFY_HEAP
// This function checks that either
@@ -1214,16 +1230,24 @@ class Heap {
AlignWithFiller(HeapObject object, int object_size, int allocation_size,
AllocationAlignment alignment);
+ // Allocate an external backing store with the given allocation callback.
+ // If the callback fails (indicated by a nullptr result) then this function
+ // will re-try the allocation after performing GCs. This is useful for
+ // external backing stores that may be retained by (unreachable) V8 objects
+ // such as ArrayBuffers, ExternalStrings, etc.
+ //
+ // The function may also proactively trigger GCs even if the allocation
+ // callback does not fail to keep the memory usage low.
+ V8_EXPORT_PRIVATE void* AllocateExternalBackingStore(
+ const std::function<void*(size_t)>& allocate, size_t byte_length);
+
// ===========================================================================
// ArrayBuffer tracking. =====================================================
// ===========================================================================
-
- // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
- // in the registration/unregistration APIs. Consider dropping the "New" from
- // "RegisterNewArrayBuffer" because one can re-register a previously
- // unregistered buffer, too, and the name is confusing.
- void RegisterNewArrayBuffer(JSArrayBuffer buffer);
- void UnregisterArrayBuffer(JSArrayBuffer buffer);
+ void RegisterBackingStore(JSArrayBuffer buffer,
+ std::shared_ptr<BackingStore> backing_store);
+ std::shared_ptr<BackingStore> UnregisterBackingStore(JSArrayBuffer buffer);
+ std::shared_ptr<BackingStore> LookupBackingStore(JSArrayBuffer buffer);
// ===========================================================================
// Allocation site tracking. =================================================
@@ -1332,9 +1356,7 @@ class Heap {
// per call to mmap(). The page is only reclaimed when the process is
// killed. Confine the hint to a 32-bit section of the virtual address
// space. See crbug.com/700928.
- uintptr_t offset =
- reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
- kMmapRegionMask;
+ uintptr_t offset = reinterpret_cast<uintptr_t>(result) & kMmapRegionMask;
result = reinterpret_cast<void*>(mmap_region_base_ + offset);
#endif // V8_OS_MACOSX
#endif // V8_TARGET_ARCH_X64
@@ -1348,8 +1370,6 @@ class Heap {
inline int MaxNumberToStringCacheSize() const;
private:
- class SkipStoreBufferScope;
-
using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
FullObjectSlot pointer);
@@ -1462,11 +1482,7 @@ class Heap {
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
- StoreBuffer* store_buffer() { return store_buffer_.get(); }
-
- void set_current_gc_flags(int flags) {
- current_gc_flags_ = flags;
- }
+ void set_current_gc_flags(int flags) { current_gc_flags_ = flags; }
inline bool ShouldReduceMemory() const {
return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
@@ -1732,20 +1748,23 @@ class Heap {
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
+ // This method will try to allocate objects quickly (AllocationType::kYoung)
+ // otherwise it falls back to a slower path indicated by the mode.
+ enum AllocationRetryMode { kLightRetry, kRetryOrFail };
+ template <AllocationRetryMode mode>
+ V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith(
+ int size, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kWordAligned);
+
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
// is triggered and the allocation is retried. This is performed multiple
// times. If after that retry procedure the allocation still fails nullptr is
// returned.
- HeapObject AllocateRawWithLightRetry(
+ V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
- HeapObject AllocateRawWithLightRetry(
- int size, AllocationType allocation,
- AllocationAlignment alignment = kWordAligned) {
- return AllocateRawWithLightRetry(size, allocation,
- AllocationOrigin::kRuntime, alignment);
- }
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
@@ -1753,17 +1772,11 @@ class Heap {
// times. If after that retry procedure the allocation still fails a "hammer"
// garbage collection is triggered which tries to significantly reduce memory.
// If the allocation still fails after that a fatal error is thrown.
- HeapObject AllocateRawWithRetryOrFail(
+ V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
- HeapObject AllocateRawWithRetryOrFail(
- int size, AllocationType allocation,
- AllocationAlignment alignment = kWordAligned) {
- return AllocateRawWithRetryOrFail(size, allocation,
- AllocationOrigin::kRuntime, alignment);
- }
- HeapObject AllocateRawCodeInLargeObjectSpace(int size);
+ V8_WARN_UNUSED_RESULT HeapObject AllocateRawCodeInLargeObjectSpace(int size);
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
@@ -1980,10 +1993,10 @@ class Heap {
std::unique_ptr<ScavengerCollector> scavenger_collector_;
std::unique_ptr<ArrayBufferCollector> array_buffer_collector_;
std::unique_ptr<MemoryAllocator> memory_allocator_;
- std::unique_ptr<StoreBuffer> store_buffer_;
std::unique_ptr<IncrementalMarking> incremental_marking_;
std::unique_ptr<ConcurrentMarking> concurrent_marking_;
std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
+ std::unique_ptr<MemoryMeasurement> memory_measurement_;
std::unique_ptr<MemoryReducer> memory_reducer_;
std::unique_ptr<ObjectStats> live_object_stats_;
std::unique_ptr<ObjectStats> dead_object_stats_;
@@ -2101,7 +2114,6 @@ class Heap {
friend class Scavenger;
friend class ScavengerCollector;
friend class Space;
- friend class StoreBuffer;
friend class Sweeper;
friend class heap::TestMemoryAllocatorScope;
@@ -2152,7 +2164,6 @@ class HeapStats {
intptr_t* end_marker; // 27
};
-
class AlwaysAllocateScope {
public:
explicit inline AlwaysAllocateScope(Heap* heap);
@@ -2232,7 +2243,6 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
Heap* heap_;
};
-
// Verify that all objects are Smis.
class VerifySmisVisitor : public RootVisitor {
public:
@@ -2263,7 +2273,7 @@ class V8_EXPORT_PRIVATE SpaceIterator : public Malloced {
private:
Heap* heap_;
- int current_space_; // from enum AllocationSpace.
+ int current_space_; // from enum AllocationSpace.
};
// A HeapObjectIterator provides iteration over the entire non-read-only heap.
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index c6e607c3ea..1f924ff139 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -54,24 +54,24 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
if (task_type == TaskType::kNormal) {
if (taskrunner->NonNestableTasksEnabled()) {
- taskrunner->PostNonNestableTask(base::make_unique<Task>(
+ taskrunner->PostNonNestableTask(std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type));
} else {
- taskrunner->PostTask(base::make_unique<Task>(
+ taskrunner->PostTask(std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type));
}
} else {
if (taskrunner->NonNestableDelayedTasksEnabled()) {
taskrunner->PostNonNestableDelayedTask(
- base::make_unique<Task>(
+ std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type),
kDelayInSeconds);
} else {
taskrunner->PostDelayedTask(
- base::make_unique<Task>(
+ std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type),
kDelayInSeconds);
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 35a08108f6..546667b2b2 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -24,42 +24,40 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
DCHECK_LE(last_slot_, slot);
last_slot_ = slot;
#endif
- while (slot >= invalidated_end_) {
- ++iterator_;
- if (iterator_ != iterator_end_) {
- // Invalidated ranges must not overlap.
- DCHECK_LE(invalidated_end_, iterator_->first.address());
- invalidated_start_ = iterator_->first.address();
- invalidated_end_ = invalidated_start_ + iterator_->second;
- invalidated_object_ = HeapObject();
- invalidated_object_size_ = 0;
- } else {
- invalidated_start_ = sentinel_;
- invalidated_end_ = sentinel_;
- }
- }
- // Now the invalidated region ends after the slot.
if (slot < invalidated_start_) {
- // The invalidated region starts after the slot.
return true;
}
- // The invalidated region includes the slot.
- // Ask the object if the slot is valid.
- if (invalidated_object_.is_null()) {
- invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
- DCHECK(!invalidated_object_.IsFiller());
- invalidated_object_size_ =
- invalidated_object_.SizeFromMap(invalidated_object_.map());
+
+ while (slot >= next_invalidated_start_) {
+ NextInvalidatedObject();
+ }
+
+ HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
+
+ if (invalidated_size_ == 0) {
+ DCHECK(invalidated_object.map().IsMap());
+ invalidated_size_ = invalidated_object.Size();
}
+
int offset = static_cast<int>(slot - invalidated_start_);
DCHECK_GT(offset, 0);
- DCHECK_LE(invalidated_object_size_,
- static_cast<int>(invalidated_end_ - invalidated_start_));
+ if (offset < invalidated_size_)
+ return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
+
+ NextInvalidatedObject();
+ return true;
+}
+
+void InvalidatedSlotsFilter::NextInvalidatedObject() {
+ invalidated_start_ = next_invalidated_start_;
+ invalidated_size_ = 0;
- if (offset >= invalidated_object_size_) {
- return slots_in_free_space_are_valid_;
+ if (iterator_ == iterator_end_) {
+ next_invalidated_start_ = sentinel_;
+ } else {
+ next_invalidated_start_ = iterator_->address();
+ iterator_++;
}
- return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset);
}
void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
@@ -72,35 +70,25 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
if (iterator_ == iterator_end_) return;
- // Ignore invalidated objects before free region
- while (free_start >= invalidated_end_) {
+ // Ignore invalidated objects that start before free region
+ while (invalidated_start_ < free_start) {
++iterator_;
NextInvalidatedObject();
}
- // Loop here: Free region might contain multiple invalidated objects
- while (free_end > invalidated_start_) {
- // Case: Free region starts before current invalidated object
- if (free_start <= invalidated_start_) {
- iterator_ = invalidated_slots_->erase(iterator_);
-
- } else {
- // Case: Free region starts within current invalidated object
- // (Can happen for right-trimmed objects)
- iterator_++;
- }
-
+ // Remove all invalidated objects that start within
+ // free region.
+ while (invalidated_start_ < free_end) {
+ iterator_ = invalidated_slots_->erase(iterator_);
NextInvalidatedObject();
}
}
void InvalidatedSlotsCleanup::NextInvalidatedObject() {
if (iterator_ != iterator_end_) {
- invalidated_start_ = iterator_->first.address();
- invalidated_end_ = invalidated_start_ + iterator_->second;
+ invalidated_start_ = iterator_->address();
} else {
invalidated_start_ = sentinel_;
- invalidated_end_ = sentinel_;
}
}
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index 8fa1518d68..9f29af218b 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -3,52 +3,35 @@
// found in the LICENSE file.
#include "src/heap/invalidated-slots.h"
+#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/spaces.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
- // The sweeper removes invalid slots and makes free space available for
- // allocation. Slots for new objects can be recorded in the free space.
- // Note that we cannot simply check for SweepingDone because pages in large
- // object space are not swept but have SweepingDone() == true.
- bool slots_in_free_space_are_valid =
- chunk->SweepingDone() && chunk->InOldSpace();
- return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
- slots_in_free_space_are_valid);
+ return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>());
}
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
- // Always treat these slots as valid for old-to-new for now. Invalid
- // old-to-new slots are always cleared.
- bool slots_in_free_space_are_valid = true;
- return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
- slots_in_free_space_are_valid);
+ return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
}
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
- MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
- bool slots_in_free_space_are_valid) {
- // Adjust slots_in_free_space_are_valid_ if more spaces are added.
- DCHECK_IMPLIES(invalidated_slots != nullptr,
- chunk->InOldSpace() || chunk->InLargeObjectSpace());
-
- slots_in_free_space_are_valid_ = slots_in_free_space_are_valid;
+ MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
- if (iterator_ != iterator_end_) {
- invalidated_start_ = iterator_->first.address();
- invalidated_end_ = invalidated_start_ + iterator_->second;
- } else {
- invalidated_start_ = sentinel_;
- invalidated_end_ = sentinel_;
- }
- // These values will be lazily set when needed.
- invalidated_object_size_ = 0;
+
+ // Invoke NextInvalidatedObject twice, to initialize
+ // invalidated_start_ to the first invalidated object and
+ // next_invalidated_object_ to the second one.
+ NextInvalidatedObject();
+ NextInvalidatedObject();
+
#ifdef DEBUG
last_slot_ = chunk->area_start();
#endif
@@ -69,13 +52,7 @@ InvalidatedSlotsCleanup::InvalidatedSlotsCleanup(
iterator_end_ = invalidated_slots_->end();
sentinel_ = chunk->area_end();
- if (iterator_ != iterator_end_) {
- invalidated_start_ = iterator_->first.address();
- invalidated_end_ = invalidated_start_ + iterator_->second;
- } else {
- invalidated_start_ = sentinel_;
- invalidated_end_ = sentinel_;
- }
+ NextInvalidatedObject();
#ifdef DEBUG
last_free_ = chunk->area_start();
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 4a72271910..15be3ce44c 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_INVALIDATED_SLOTS_H_
#define V8_HEAP_INVALIDATED_SLOTS_H_
-#include <map>
+#include <set>
#include <stack>
#include "src/base/atomic-utils.h"
@@ -20,7 +20,7 @@ namespace internal {
// that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout
// change.
-using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
+using InvalidatedSlots = std::set<HeapObject, Object::Comparer>;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
@@ -34,8 +34,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
- InvalidatedSlots* invalidated_slots,
- bool slots_in_free_space_are_valid);
+ InvalidatedSlots* invalidated_slots);
inline bool IsValid(Address slot);
private:
@@ -43,14 +42,15 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
Address invalidated_start_;
- Address invalidated_end_;
- HeapObject invalidated_object_;
- int invalidated_object_size_;
- bool slots_in_free_space_are_valid_;
+ Address next_invalidated_start_;
+ int invalidated_size_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
#endif
+
+ private:
+ inline void NextInvalidatedObject();
};
class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
@@ -71,7 +71,6 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
Address sentinel_;
Address invalidated_start_;
- Address invalidated_end_;
inline void NextInvalidatedObject();
#ifdef DEBUG
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index cf6d96cef8..ed7e251f44 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -485,7 +485,8 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
- RememberedSet<OLD_TO_OLD>::Insert(source_page, slot.address());
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page,
+ slot.address());
}
}
@@ -493,7 +494,8 @@ void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) {
MemoryChunk* target_page = MemoryChunk::FromHeapObject(target);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
- RememberedSet<OLD_TO_OLD>::Insert(source_page, slot.address());
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page,
+ slot.address());
}
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index f7067a60ea..c18b2652d7 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -2080,12 +2080,13 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
// Clear any recorded slots for the compiled data as being invalid.
+ DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
// Swap the map, using set_map_after_allocation to avoid verify heap checks
// which are not necessary since we are doing this during the GC atomic pause.
@@ -2233,12 +2234,12 @@ void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
DCHECK_LE(0, new_nof_all_descriptors);
Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address();
Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address();
- RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
- start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array),
- start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(array);
+ DCHECK_NULL(chunk->sweeping_slot_set());
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end,
+ SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end,
+ SlotSet::FREE_EMPTY_BUCKETS);
heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
ClearRecordedSlots::kNo);
array.set_number_of_all_descriptors(new_nof_all_descriptors);
@@ -3411,15 +3412,32 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
+ InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
- [this](MaybeObjectSlot slot) {
+ [this, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndUpdateOldToNewSlot(slot);
},
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
}
- DCHECK_NULL(chunk_->invalidated_slots<OLD_TO_NEW>());
+ if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
+ InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
+ RememberedSetSweeping::Iterate(
+ chunk_,
+ [this, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return CheckAndUpdateOldToNewSlot(slot);
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
+ }
+
+ if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
+ // The invalidated slots are not needed after old-to-new slots were
+ // processed.
+ chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
+ }
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
@@ -3430,17 +3448,11 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
},
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
+ chunk_->ReleaseSlotSet<OLD_TO_OLD>();
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
-#ifdef DEBUG
- for (auto object_size : *chunk_->invalidated_slots<OLD_TO_OLD>()) {
- HeapObject object = object_size.first;
- int size = object_size.second;
- DCHECK_LE(object.SizeFromMap(object.map()), size);
- }
-#endif
// The invalidated slots are not needed after old-to-old slots were
// processsed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
@@ -3557,15 +3569,18 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
const bool contains_old_to_new_slots =
chunk->slot_set<OLD_TO_NEW>() != nullptr ||
chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
+ const bool contains_old_to_new_sweeping_slots =
+ chunk->sweeping_slot_set() != nullptr;
const bool contains_old_to_old_invalidated_slots =
chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
const bool contains_old_to_new_invalidated_slots =
chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
- if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
- !contains_old_to_old_invalidated_slots &&
+ if (!contains_old_to_new_slots && !contains_old_to_new_sweeping_slots &&
+ !contains_old_to_old_slots && !contains_old_to_old_invalidated_slots &&
!contains_old_to_new_invalidated_slots)
continue;
if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
+ contains_old_to_new_sweeping_slots ||
contains_old_to_old_invalidated_slots ||
contains_old_to_new_invalidated_slots) {
job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
@@ -3773,11 +3788,22 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
// might not have recorded them in first place.
// Remove outdated slots.
+ RememberedSetSweeping::RemoveRange(page, page->address(),
+ failed_object.address(),
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
failed_object.address(),
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
failed_object.address());
+
+ // Remove invalidated slots.
+ if (failed_object.address() > page->area_start()) {
+ InvalidatedSlotsCleanup old_to_new_cleanup =
+ InvalidatedSlotsCleanup::OldToNew(page);
+ old_to_new_cleanup.Free(page->area_start(), failed_object.address());
+ }
+
// Recompute live bytes.
LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
// Re-record slots.
@@ -4350,11 +4376,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap(), [](MemoryChunk* chunk) {
- if (chunk->SweepingDone()) {
- RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- } else {
- RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
- }
+ RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
});
heap()->account_external_memory_concurrently_freed();
@@ -4651,7 +4673,15 @@ class PageMarkingItem : public MarkingItem {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndMarkObject(task, slot);
},
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ SlotSet::FREE_EMPTY_BUCKETS);
+ filter = InvalidatedSlotsFilter::OldToNew(chunk_);
+ RememberedSetSweeping::Iterate(
+ chunk_,
+ [this, task, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return CheckAndMarkObject(task, slot);
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
new file mode 100644
index 0000000000..62cd5dadb9
--- /dev/null
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -0,0 +1,80 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/memory-measurement.h"
+
+#include "src/execution/isolate-inl.h"
+#include "src/execution/isolate.h"
+#include "src/heap/factory-inl.h"
+#include "src/heap/factory.h"
+#include "src/objects/js-promise.h"
+
+namespace v8 {
+namespace internal {
+
+MemoryMeasurement::MemoryMeasurement(Isolate* isolate) : isolate_(isolate) {}
+
+namespace {
+
+class MemoryMeasurementResultBuilder {
+ public:
+ MemoryMeasurementResultBuilder(Isolate* isolate, Factory* factory)
+ : isolate_(isolate), factory_(factory) {
+ result_ = NewJSObject();
+ }
+
+ void AddTotals(size_t estimate, size_t lower_bound, size_t upper_bound) {
+ Handle<JSObject> total = NewJSObject();
+ Handle<Object> estimate_obj = NewNumber(estimate);
+ AddProperty(total, factory_->jsMemoryEstimate_string(), estimate_obj);
+ Handle<Object> range = NewRange(lower_bound, upper_bound);
+ AddProperty(total, factory_->jsMemoryRange_string(), range);
+ AddProperty(result_, factory_->total_string(), total);
+ }
+
+ Handle<JSObject> Build() { return result_; }
+
+ private:
+ Handle<Object> NewNumber(size_t value) {
+ return factory_->NewNumberFromSize(value);
+ }
+
+ Handle<JSObject> NewJSObject() {
+ return factory_->NewJSObject(isolate_->object_function());
+ }
+
+ Handle<JSArray> NewRange(size_t lower_bound, size_t upper_bound) {
+ Handle<Object> lower = NewNumber(lower_bound);
+ Handle<Object> upper = NewNumber(upper_bound);
+ Handle<FixedArray> elements = factory_->NewFixedArray(2);
+ elements->set(0, *lower);
+ elements->set(1, *upper);
+ return factory_->NewJSArrayWithElements(elements);
+ }
+
+ void AddProperty(Handle<JSObject> object, Handle<String> name,
+ Handle<Object> value) {
+ JSObject::AddProperty(isolate_, object, name, value, NONE);
+ }
+
+ Isolate* isolate_;
+ Factory* factory_;
+ Handle<JSObject> result_;
+};
+
+} // anonymous namespace
+
+Handle<JSPromise> MemoryMeasurement::EnqueueRequest(
+ Handle<NativeContext> context, v8::MeasureMemoryMode mode) {
+ Handle<JSPromise> promise = isolate_->factory()->NewJSPromise();
+ MemoryMeasurementResultBuilder result_builder(isolate_, isolate_->factory());
+ result_builder.AddTotals(isolate_->heap()->SizeOfObjects(), 0,
+ isolate_->heap()->SizeOfObjects());
+ Handle<JSObject> result = result_builder.Build();
+ JSPromise::Resolve(promise, result).ToHandleChecked();
+ return promise;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/memory-measurement.h b/deps/v8/src/heap/memory-measurement.h
new file mode 100644
index 0000000000..6de7c8c970
--- /dev/null
+++ b/deps/v8/src/heap/memory-measurement.h
@@ -0,0 +1,29 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MEMORY_MEASUREMENT_H_
+#define V8_HEAP_MEMORY_MEASUREMENT_H_
+
+#include "src/common/globals.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+class V8_EXPORT_PRIVATE MemoryMeasurement {
+ public:
+ explicit MemoryMeasurement(Isolate* isolate);
+ Handle<JSPromise> EnqueueRequest(Handle<NativeContext> context,
+ v8::MeasureMemoryMode mode);
+
+ private:
+ Isolate* isolate_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_MEMORY_MEASUREMENT_H_
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 704e656796..37dca5b99c 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -214,9 +214,8 @@ void MemoryReducer::ScheduleTimer(double delay_ms) {
if (heap()->IsTearingDown()) return;
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
- taskrunner_->PostDelayedTask(
- base::make_unique<MemoryReducer::TimerTask>(this),
- (delay_ms + kSlackMs) / 1000.0);
+ taskrunner_->PostDelayedTask(std::make_unique<MemoryReducer::TimerTask>(this),
+ (delay_ms + kSlackMs) / 1000.0);
}
void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 2ee88361c9..44798a3928 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -150,9 +150,8 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
JSObjectFieldStats stats;
stats.embedded_fields_count_ = JSObject::GetEmbedderFieldCount(map);
if (!map.is_dictionary_map()) {
- int nof = map.NumberOfOwnDescriptors();
DescriptorArray descriptors = map.instance_descriptors();
- for (int descriptor = 0; descriptor < nof; descriptor++) {
+ for (InternalIndex descriptor : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == kField) {
FieldIndex index = FieldIndex::ForDescriptor(map, descriptor);
@@ -658,8 +657,7 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
Object obj = maybe_obj->GetHeapObjectOrSmi();
switch (kind) {
case FeedbackSlotKind::kCall:
- if (obj == *isolate->factory()->uninitialized_symbol() ||
- obj == *isolate->factory()->premonomorphic_symbol()) {
+ if (obj == *isolate->factory()->uninitialized_symbol()) {
return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE;
}
return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_TYPE;
@@ -669,8 +667,7 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed:
- if (obj == *isolate->factory()->uninitialized_symbol() ||
- obj == *isolate->factory()->premonomorphic_symbol()) {
+ if (obj == *isolate->factory()->uninitialized_symbol()) {
return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE;
}
return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_TYPE;
@@ -682,8 +679,7 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kStoreKeyedStrict:
- if (obj == *isolate->factory()->uninitialized_symbol() ||
- obj == *isolate->factory()->premonomorphic_symbol()) {
+ if (obj == *isolate->factory()->uninitialized_symbol()) {
return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE;
}
return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_TYPE;
@@ -829,10 +825,6 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
ObjectStats::RETAINED_MAPS_TYPE);
// WeakArrayList.
- RecordSimpleVirtualObjectStats(
- HeapObject(),
- WeakArrayList::cast(heap_->noscript_shared_function_infos()),
- ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE);
RecordSimpleVirtualObjectStats(HeapObject(),
WeakArrayList::cast(heap_->script_list()),
ObjectStats::SCRIPT_LIST_TYPE);
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 2a9b9675ef..28ef967c5c 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -54,7 +54,6 @@
V(MAP_PROTOTYPE_DICTIONARY_TYPE) \
V(MAP_PROTOTYPE_TYPE) \
V(MAP_STABLE_TYPE) \
- V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
V(NUMBER_STRING_CACHE_TYPE) \
V(OBJECT_DICTIONARY_ELEMENTS_TYPE) \
V(OBJECT_ELEMENTS_TYPE) \
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index ba0bfa2415..d4d6d9375c 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -38,7 +38,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
HeapObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
switch (map.visitor_id()) {
-#define CASE(TypeName, Type) \
+#define CASE(TypeName) \
case kVisit##TypeName: \
return visitor->Visit##TypeName( \
map, ConcreteVisitor::template Cast<TypeName>(object));
@@ -77,10 +77,10 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
static_cast<ConcreteVisitor*>(this)->VisitPointer(host, host.map_slot());
}
-#define VISIT(TypeName, Type) \
+#define VISIT(TypeName) \
template <typename ResultType, typename ConcreteVisitor> \
ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##TypeName( \
- Map map, Type object) { \
+ Map map, TypeName object) { \
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
if (!visitor->ShouldVisit(object)) return ResultType(); \
if (!visitor->AllowDefaultJSObjectVisit()) { \
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index a5c291458f..0f972737d2 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -13,59 +13,58 @@
namespace v8 {
namespace internal {
-// TODO(jkummerow): Drop the duplication: V(x, x) -> V(x).
-#define TYPED_VISITOR_ID_LIST(V) \
- V(AllocationSite, AllocationSite) \
- V(BigInt, BigInt) \
- V(ByteArray, ByteArray) \
- V(BytecodeArray, BytecodeArray) \
- V(Cell, Cell) \
- V(Code, Code) \
- V(CodeDataContainer, CodeDataContainer) \
- V(ConsString, ConsString) \
- V(Context, Context) \
- V(DataHandler, DataHandler) \
- V(DescriptorArray, DescriptorArray) \
- V(EmbedderDataArray, EmbedderDataArray) \
- V(EphemeronHashTable, EphemeronHashTable) \
- V(FeedbackCell, FeedbackCell) \
- V(FeedbackVector, FeedbackVector) \
- V(FixedArray, FixedArray) \
- V(FixedDoubleArray, FixedDoubleArray) \
- V(JSArrayBuffer, JSArrayBuffer) \
- V(JSDataView, JSDataView) \
- V(JSFunction, JSFunction) \
- V(JSObject, JSObject) \
- V(JSTypedArray, JSTypedArray) \
- V(WeakCell, WeakCell) \
- V(JSWeakCollection, JSWeakCollection) \
- V(JSWeakRef, JSWeakRef) \
- V(Map, Map) \
- V(NativeContext, NativeContext) \
- V(Oddball, Oddball) \
- V(PreparseData, PreparseData) \
- V(PropertyArray, PropertyArray) \
- V(PropertyCell, PropertyCell) \
- V(PrototypeInfo, PrototypeInfo) \
- V(SeqOneByteString, SeqOneByteString) \
- V(SeqTwoByteString, SeqTwoByteString) \
- V(SharedFunctionInfo, SharedFunctionInfo) \
- V(SlicedString, SlicedString) \
- V(SmallOrderedHashMap, SmallOrderedHashMap) \
- V(SmallOrderedHashSet, SmallOrderedHashSet) \
- V(SmallOrderedNameDictionary, SmallOrderedNameDictionary) \
- V(SourceTextModule, SourceTextModule) \
- V(Symbol, Symbol) \
- V(SyntheticModule, SyntheticModule) \
- V(ThinString, ThinString) \
- V(TransitionArray, TransitionArray) \
- V(UncompiledDataWithoutPreparseData, UncompiledDataWithoutPreparseData) \
- V(UncompiledDataWithPreparseData, UncompiledDataWithPreparseData) \
- V(WasmCapiFunctionData, WasmCapiFunctionData) \
- V(WasmIndirectFunctionTable, WasmIndirectFunctionTable) \
- V(WasmInstanceObject, WasmInstanceObject)
-
-#define FORWARD_DECLARE(TypeName, Type) class Type;
+#define TYPED_VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(ConsString) \
+ V(Context) \
+ V(DataHandler) \
+ V(DescriptorArray) \
+ V(EmbedderDataArray) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(JSArrayBuffer) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSTypedArray) \
+ V(WeakCell) \
+ V(JSWeakCollection) \
+ V(JSWeakRef) \
+ V(Map) \
+ V(NativeContext) \
+ V(Oddball) \
+ V(PreparseData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
+ V(Symbol) \
+ V(SyntheticModule) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledDataWithoutPreparseData) \
+ V(UncompiledDataWithPreparseData) \
+ V(WasmCapiFunctionData) \
+ V(WasmIndirectFunctionTable) \
+ V(WasmInstanceObject)
+
+#define FORWARD_DECLARE(TypeName) class TypeName;
TYPED_VISITOR_ID_LIST(FORWARD_DECLARE)
#undef FORWARD_DECLARE
@@ -99,8 +98,8 @@ class HeapVisitor : public ObjectVisitor {
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
-#define VISIT(TypeName, Type) \
- V8_INLINE ResultType Visit##TypeName(Map map, Type object);
+#define VISIT(TypeName) \
+ V8_INLINE ResultType Visit##TypeName(Map map, TypeName object);
TYPED_VISITOR_ID_LIST(VISIT)
#undef VISIT
V8_INLINE ResultType VisitShortcutCandidate(Map map, ConsString object);
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index eefc565e00..3c8984c83a 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_REMEMBERED_SET_H_
#define V8_HEAP_REMEMBERED_SET_H_
+#include <memory>
+
#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
#include "src/heap/heap.h"
@@ -16,54 +18,39 @@ namespace internal {
enum RememberedSetIterationMode { SYNCHRONIZED, NON_SYNCHRONIZED };
-// TODO(ulan): Investigate performance of de-templatizing this class.
-template <RememberedSetType type>
-class RememberedSet : public AllStatic {
+class RememberedSetOperations {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
- template <AccessMode access_mode = AccessMode::ATOMIC>
- static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ template <AccessMode access_mode>
+ static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type, access_mode>();
- if (slot_set == nullptr) {
- slot_set = chunk->AllocateSlotSet<type>();
- }
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Insert<access_mode>(offset %
Page::kPageSize);
}
- // Given a page and a slot in that page, this function returns true if
- // the remembered set contains the slot.
- static bool Contains(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type>();
- if (slot_set == nullptr) {
- return false;
+ template <typename Callback>
+ static void Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
+ if (slots != nullptr) {
+ size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
+ for (size_t page = 0; page < pages; page++) {
+ slots[page].Iterate(chunk->address() + page * Page::kPageSize, callback,
+ mode);
+ }
}
- uintptr_t offset = slot_addr - chunk->address();
- return slot_set[offset / Page::kPageSize].Contains(offset %
- Page::kPageSize);
}
- // Given a page and a slot in that page, this function removes the slot from
- // the remembered set.
- // If the slot was never added, then the function does nothing.
- static void Remove(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type>();
+ static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
if (slot_set != nullptr) {
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
}
}
- // Given a page and a range of slots in that page, this function removes the
- // slots from the remembered set.
- static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->slot_set<type>();
+ static void RemoveRange(SlotSet* slot_set, MemoryChunk* chunk, Address start,
+ Address end, SlotSet::EmptyBucketMode mode) {
if (slot_set != nullptr) {
uintptr_t start_offset = start - chunk->address();
uintptr_t end_offset = end - chunk->address();
@@ -99,6 +86,53 @@ class RememberedSet : public AllStatic {
}
}
}
+};
+
+// TODO(ulan): Investigate performance of de-templatizing this class.
+template <RememberedSetType type>
+class RememberedSet : public AllStatic {
+ public:
+ // Given a page and a slot in that page, this function adds the slot to the
+ // remembered set.
+ template <AccessMode access_mode>
+ static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type, access_mode>();
+ if (slot_set == nullptr) {
+ slot_set = chunk->AllocateSlotSet<type>();
+ }
+ RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a slot in that page, this function returns true if
+ // the remembered set contains the slot.
+ static bool Contains(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type>();
+ if (slot_set == nullptr) {
+ return false;
+ }
+ uintptr_t offset = slot_addr - chunk->address();
+ return slot_set[offset / Page::kPageSize].Contains(offset %
+ Page::kPageSize);
+ }
+
+ // Given a page and a slot in that page, this function removes the slot from
+ // the remembered set.
+ // If the slot was never added, then the function does nothing.
+ static void Remove(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a range of slots in that page, this function removes the
+ // slots from the remembered set.
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
+ }
// Iterates and filters the remembered set with the given callback.
// The callback should take (Address slot) and return SlotCallbackResult.
@@ -120,8 +154,11 @@ class RememberedSet : public AllStatic {
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>();
+ SlotSet* sweeping_slots =
+ type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
- if (slots != nullptr || typed_slots != nullptr ||
+ if (slots != nullptr || sweeping_slots != nullptr ||
+ typed_slots != nullptr ||
chunk->invalidated_slots<type>() != nullptr) {
callback(chunk);
}
@@ -138,42 +175,7 @@ class RememberedSet : public AllStatic {
static void Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->slot_set<type>();
- if (slots != nullptr) {
- size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
- int new_count = 0;
- for (size_t page = 0; page < pages; page++) {
- new_count += slots[page].Iterate(callback, mode);
- }
- // Only old-to-old slot sets are released eagerly. Old-new-slot sets are
- // released by the sweeper threads.
- if (type == OLD_TO_OLD && new_count == 0) {
- chunk->ReleaseSlotSet<OLD_TO_OLD>();
- }
- }
- }
-
- static int NumberOfPreFreedEmptyBuckets(MemoryChunk* chunk) {
- DCHECK(type == OLD_TO_NEW);
- int result = 0;
- SlotSet* slots = chunk->slot_set<type>();
- if (slots != nullptr) {
- size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
- for (size_t page = 0; page < pages; page++) {
- result += slots[page].NumberOfPreFreedEmptyBuckets();
- }
- }
- return result;
- }
-
- static void PreFreeEmptyBuckets(MemoryChunk* chunk) {
- DCHECK(type == OLD_TO_NEW);
- SlotSet* slots = chunk->slot_set<type>();
- if (slots != nullptr) {
- size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
- for (size_t page = 0; page < pages; page++) {
- slots[page].PreFreeEmptyBuckets();
- }
- }
+ RememberedSetOperations::Iterate(slots, chunk, callback, mode);
}
static void FreeEmptyBuckets(MemoryChunk* chunk) {
@@ -183,7 +185,6 @@ class RememberedSet : public AllStatic {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) {
slots[page].FreeEmptyBuckets();
- slots[page].FreeToBeFreedBuckets();
}
}
}
@@ -217,7 +218,7 @@ class RememberedSet : public AllStatic {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
: KEEP_SLOT;
},
- TypedSlotSet::PREFREE_EMPTY_CHUNKS);
+ TypedSlotSet::FREE_EMPTY_CHUNKS);
}
}
@@ -234,9 +235,9 @@ class RememberedSet : public AllStatic {
});
}
- // Iterates and filters typed old to old pointers in the given memory chunk
- // with the given callback. The callback should take (SlotType slot_type,
- // Address addr) and return SlotCallbackResult.
+ // Iterates and filters typed pointers in the given memory chunk with the
+ // given callback. The callback should take (SlotType slot_type, Address addr)
+ // and return SlotCallbackResult.
template <typename Callback>
static void IterateTyped(MemoryChunk* chunk, Callback callback) {
TypedSlotSet* slots = chunk->typed_slot_set<type>();
@@ -259,9 +260,6 @@ class RememberedSet : public AllStatic {
chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
}
}
-
- private:
- static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, ObjectSlot slot);
};
class UpdateTypedSlotHelper {
@@ -347,6 +345,46 @@ class UpdateTypedSlotHelper {
}
};
+class RememberedSetSweeping {
+ public:
+ template <AccessMode access_mode>
+ static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>();
+ if (slot_set == nullptr) {
+ slot_set = chunk->AllocateSweepingSlotSet();
+ }
+ RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
+ }
+
+ static void Remove(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>();
+ RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a range of slots in that page, this function removes the
+ // slots from the remembered set.
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->sweeping_slot_set();
+ RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
+ }
+
+ // Iterates and filters the remembered set in the given memory chunk with
+ // the given callback. The callback should take (Address slot) and return
+ // SlotCallbackResult.
+ //
+ // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
+ // threads concurrently inserting slots.
+ template <typename Callback>
+ static void Iterate(MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slots = chunk->sweeping_slot_set();
+ RememberedSetOperations::Iterate(slots, chunk, callback, mode);
+ }
+};
+
inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
if (RelocInfo::IsCodeTargetMode(rmode)) {
return CODE_TARGET_SLOT;
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index 273866d5e4..3730bfeecb 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -108,7 +108,7 @@ void ScavengeJob::ScheduleIdleTask(Heap* heap) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
idle_task_pending_ = true;
- auto task = base::make_unique<IdleTask>(heap->isolate(), this);
+ auto task = std::make_unique<IdleTask>(heap->isolate(), this);
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate)->PostIdleTask(
std::move(task));
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 7d56882953..47c19d4fcc 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -153,8 +153,17 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
if (result == KEEP_SLOT) {
SLOW_DCHECK(target.IsHeapObject());
- RememberedSet<OLD_TO_NEW>::Insert(MemoryChunk::FromHeapObject(host),
- slot.address());
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
+
+ // Sweeper is stopped during scavenge, so we can directly
+ // insert into its remembered set here.
+ if (chunk->sweeping_slot_set()) {
+ RememberedSetSweeping::Insert<AccessMode::ATOMIC>(chunk,
+ slot.address());
+ } else {
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
+ slot.address());
+ }
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target)));
@@ -165,8 +174,8 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
// We cannot call MarkCompactCollector::RecordSlot because that checks
// that the host page is not in young generation, which does not hold
// for pending large pages.
- RememberedSet<OLD_TO_OLD>::Insert(MemoryChunk::FromHeapObject(host),
- slot.address());
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
+ MemoryChunk::FromHeapObject(host), slot.address());
}
}
@@ -239,8 +248,10 @@ void ScavengerCollector::CollectGarbage() {
// access to the slots of a page and can completely avoid any locks on
// the page itself.
Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
- filter_scope.FilterOldSpaceSweepingPages(
- [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
+ filter_scope.FilterOldSpaceSweepingPages([](Page* page) {
+ return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
+ });
+
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap_, [&job](MemoryChunk* chunk) {
job.AddItem(new PageScavengingItem(chunk));
@@ -335,11 +346,7 @@ void ScavengerCollector::CollectGarbage() {
heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) {
- if (chunk->SweepingDone()) {
- RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- } else {
- RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
- }
+ RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
});
// Update how much has survived scavenge.
@@ -430,16 +437,45 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
}
}
+// Remove this crashkey after chromium:1010312 is fixed.
+class ScopedFullHeapCrashKey {
+ public:
+ explicit ScopedFullHeapCrashKey(Isolate* isolate) : isolate_(isolate) {
+ isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "heap");
+ }
+ ~ScopedFullHeapCrashKey() {
+ isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "");
+ }
+
+ private:
+ Isolate* isolate_ = nullptr;
+};
+
void Scavenger::ScavengePage(MemoryChunk* page) {
+ ScopedFullHeapCrashKey collect_full_heap_dump_if_crash(heap_->isolate());
CodePageMemoryModificationScope memory_modification_scope(page);
+ InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
- [this](MaybeObjectSlot addr) {
- return CheckAndScavengeObject(heap_, addr);
+ [this, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return CheckAndScavengeObject(heap_, slot);
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ filter = InvalidatedSlotsFilter::OldToNew(page);
+ RememberedSetSweeping::Iterate(
+ page,
+ [this, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return CheckAndScavengeObject(heap_, slot);
},
SlotSet::KEEP_EMPTY_BUCKETS);
- DCHECK_NULL(page->invalidated_slots<OLD_TO_NEW>());
+ if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
+ // The invalidated slots are not needed after old-to-new slots were
+ // processed.
+ page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
+ }
RememberedSet<OLD_TO_NEW>::IterateTyped(
page, [=](SlotType type, Address addr) {
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 15ca6d7930..9f94029af3 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -7,6 +7,7 @@
#include "src/builtins/accessors.h"
#include "src/codegen/compilation-cache.h"
#include "src/execution/isolate.h"
+#include "src/execution/protectors.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-configuration.h"
@@ -616,17 +617,17 @@ void Heap::CreateInitialObjects() {
// The -0 value must be set before NewNumber works.
set_minus_zero_value(
- *factory->NewHeapNumber(-0.0, AllocationType::kReadOnly));
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(-0.0));
DCHECK(std::signbit(roots.minus_zero_value().Number()));
- set_nan_value(*factory->NewHeapNumber(
- std::numeric_limits<double>::quiet_NaN(), AllocationType::kReadOnly));
- set_hole_nan_value(*factory->NewHeapNumberFromBits(
- kHoleNanInt64, AllocationType::kReadOnly));
+ set_nan_value(*factory->NewHeapNumber<AllocationType::kReadOnly>(
+ std::numeric_limits<double>::quiet_NaN()));
+ set_hole_nan_value(*factory->NewHeapNumberFromBits<AllocationType::kReadOnly>(
+ kHoleNanInt64));
set_infinity_value(
- *factory->NewHeapNumber(V8_INFINITY, AllocationType::kReadOnly));
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(V8_INFINITY));
set_minus_infinity_value(
- *factory->NewHeapNumber(-V8_INFINITY, AllocationType::kReadOnly));
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(-V8_INFINITY));
set_hash_seed(*factory->NewByteArray(kInt64Size, AllocationType::kReadOnly));
InitializeHashSeed();
@@ -704,8 +705,7 @@ void Heap::CreateInitialObjects() {
Oddball::kStaleRegister));
// Initialize the self-reference marker.
- set_self_reference_marker(
- *factory->NewSelfReferenceMarker(AllocationType::kReadOnly));
+ set_self_reference_marker(*factory->NewSelfReferenceMarker());
set_interpreter_entry_trampoline_for_profiling(roots.undefined_value());
@@ -781,13 +781,13 @@ void Heap::CreateInitialObjects() {
set_feedback_vectors_for_profiling_tools(roots.undefined_value());
set_pending_optimize_for_test_bytecode(roots.undefined_value());
+ set_shared_wasm_memories(roots.empty_weak_array_list());
set_script_list(roots.empty_weak_array_list());
Handle<NumberDictionary> slow_element_dictionary = NumberDictionary::New(
isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
- slow_element_dictionary->set_requires_slow_elements();
set_empty_slow_element_dictionary(*slow_element_dictionary);
set_materialized_objects(*factory->NewFixedArray(0, AllocationType::kOld));
@@ -839,76 +839,122 @@ void Heap::CreateInitialObjects() {
script->set_origin_options(ScriptOriginOptions(true, false));
set_empty_script(*script);
- Handle<Cell> array_constructor_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_array_constructor_protector(*array_constructor_cell);
+ {
+ Handle<PropertyCell> cell = factory->NewPropertyCell(
+ factory->empty_string(), AllocationType::kReadOnly);
+ cell->set_value(roots.the_hole_value());
+ set_empty_property_cell(*cell);
+ }
- Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_no_elements_protector(*cell);
+ // Protectors
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_array_constructor_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string(),
- AllocationType::kReadOnly);
- cell->set_value(roots.the_hole_value());
- set_empty_property_cell(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_no_elements_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_iterator_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_array_iterator_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_map_iterator_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_map_iterator_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_set_iterator_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_set_iterator_protector(*cell);
+ }
- Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_is_concat_spreadable_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_species_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_array_species_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_typed_array_species_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_typed_array_species_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_promise_species_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_promise_species_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_string_iterator_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_string_iterator_protector(*cell);
+ }
- Handle<Cell> string_length_overflow_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_string_length_protector(*string_length_overflow_cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_string_length_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_buffer_detaching_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_array_buffer_detaching_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_promise_hook_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_promise_hook_protector(*cell);
+ }
- Handle<Cell> promise_resolve_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_promise_resolve_protector(*promise_resolve_cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_promise_resolve_protector(*cell);
+ }
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_promise_then_protector(*cell);
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_promise_then_protector(*cell);
+ }
set_serialized_objects(roots.empty_fixed_array());
set_serialized_global_proxy_sizes(roots.empty_fixed_array());
- set_noscript_shared_function_infos(roots.empty_weak_array_list());
-
/* Canonical off-heap trampoline data */
set_off_heap_trampoline_relocation_info(
*Builtins::GenerateOffHeapTrampolineRelocInfo(isolate_));
diff --git a/deps/v8/src/heap/slot-set.cc b/deps/v8/src/heap/slot-set.cc
index 12cf6bab5a..92540574a0 100644
--- a/deps/v8/src/heap/slot-set.cc
+++ b/deps/v8/src/heap/slot-set.cc
@@ -11,7 +11,6 @@ TypedSlots::~TypedSlots() {
Chunk* chunk = head_;
while (chunk != nullptr) {
Chunk* next = chunk->next;
- delete[] chunk->buffer;
delete chunk;
chunk = next;
}
@@ -22,9 +21,8 @@ TypedSlots::~TypedSlots() {
void TypedSlots::Insert(SlotType type, uint32_t offset) {
TypedSlot slot = {TypeField::encode(type) | OffsetField::encode(offset)};
Chunk* chunk = EnsureChunk();
- DCHECK_LT(chunk->count, chunk->capacity);
- chunk->buffer[chunk->count] = slot;
- ++chunk->count;
+ DCHECK_LT(chunk->buffer.size(), chunk->buffer.capacity());
+ chunk->buffer.push_back(slot);
}
void TypedSlots::Merge(TypedSlots* other) {
@@ -46,37 +44,25 @@ TypedSlots::Chunk* TypedSlots::EnsureChunk() {
if (!head_) {
head_ = tail_ = NewChunk(nullptr, kInitialBufferSize);
}
- if (head_->count == head_->capacity) {
- head_ = NewChunk(head_, NextCapacity(head_->capacity));
+ if (head_->buffer.size() == head_->buffer.capacity()) {
+ head_ = NewChunk(head_, NextCapacity(head_->buffer.capacity()));
}
return head_;
}
-TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, int capacity) {
+TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, size_t capacity) {
Chunk* chunk = new Chunk;
chunk->next = next;
- chunk->buffer = new TypedSlot[capacity];
- chunk->capacity = capacity;
- chunk->count = 0;
+ chunk->buffer.reserve(capacity);
+ DCHECK_EQ(chunk->buffer.capacity(), capacity);
return chunk;
}
-TypedSlotSet::~TypedSlotSet() { FreeToBeFreedChunks(); }
-
-void TypedSlotSet::FreeToBeFreedChunks() {
- base::MutexGuard guard(&to_be_freed_chunks_mutex_);
- std::stack<std::unique_ptr<Chunk>> empty;
- to_be_freed_chunks_.swap(empty);
-}
-
void TypedSlotSet::ClearInvalidSlots(
const std::map<uint32_t, uint32_t>& invalid_ranges) {
Chunk* chunk = LoadHead();
while (chunk != nullptr) {
- TypedSlot* buffer = chunk->buffer;
- int count = chunk->count;
- for (int i = 0; i < count; i++) {
- TypedSlot slot = LoadTypedSlot(buffer + i);
+ for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
if (type == CLEARED_SLOT) continue;
uint32_t offset = OffsetField::decode(slot.type_and_offset);
@@ -88,7 +74,7 @@ void TypedSlotSet::ClearInvalidSlots(
upper_bound--;
DCHECK_LE(upper_bound->first, offset);
if (upper_bound->second > offset) {
- ClearTypedSlot(buffer + i);
+ slot = ClearedTypedSlot();
}
}
chunk = LoadNext(chunk);
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index c71192bfdc..b1321b6fca 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SLOT_SET_H_
#include <map>
+#include <memory>
#include <stack>
#include "src/base/atomic-utils.h"
@@ -21,19 +22,15 @@ namespace internal {
enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Data structure for maintaining a set of slots in a standard (non-large)
-// page. The base address of the page must be set with SetPageStart before any
-// operation.
+// page.
// The data structure assumes that the slots are pointer size aligned and
// splits the valid slot offset range into kBuckets buckets.
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
public:
enum EmptyBucketMode {
- FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
- PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot
- // set, but deallocated on demand by a sweeper
- // thread.
- KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
+ FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
+ KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
};
SlotSet() {
@@ -46,15 +43,12 @@ class SlotSet : public Malloced {
for (int i = 0; i < kBuckets; i++) {
ReleaseBucket(i);
}
- FreeToBeFreedBuckets();
}
- void SetPageStart(Address page_start) { page_start_ = page_start; }
-
// The slot offset specifies a slot at address page_start_ + slot_offset.
// AccessMode defines whether there can be concurrent access on the buckets
// or not.
- template <AccessMode access_mode = AccessMode::ATOMIC>
+ template <AccessMode access_mode>
void Insert(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
@@ -138,9 +132,7 @@ class SlotSet : public Malloced {
DCHECK(current_bucket == end_bucket ||
(current_bucket < end_bucket && current_cell == 0));
while (current_bucket < end_bucket) {
- if (mode == PREFREE_EMPTY_BUCKETS) {
- PreFreeEmptyBucket(current_bucket);
- } else if (mode == FREE_EMPTY_BUCKETS) {
+ if (mode == FREE_EMPTY_BUCKETS) {
ReleaseBucket(current_bucket);
} else {
DCHECK(mode == KEEP_EMPTY_BUCKETS);
@@ -152,11 +144,11 @@ class SlotSet : public Malloced {
current_bucket++;
}
// All buckets between start_bucket and end_bucket are cleared.
+ DCHECK(current_bucket == end_bucket);
+ if (current_bucket == kBuckets) return;
bucket = LoadBucket(&buckets_[current_bucket]);
- DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
- if (current_bucket == kBuckets || bucket == nullptr) {
- return;
- }
+ DCHECK(current_cell <= end_cell);
+ if (bucket == nullptr) return;
while (current_cell < end_cell) {
StoreCell(&bucket[current_cell], 0);
current_cell++;
@@ -189,7 +181,7 @@ class SlotSet : public Malloced {
// else return REMOVE_SLOT;
// });
template <typename Callback>
- int Iterate(Callback callback, EmptyBucketMode mode) {
+ int Iterate(Address page_start, Callback callback, EmptyBucketMode mode) {
int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
@@ -205,7 +197,7 @@ class SlotSet : public Malloced {
int bit_offset = base::bits::CountTrailingZeros(cell);
uint32_t bit_mask = 1u << bit_offset;
uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
- if (callback(MaybeObjectSlot(page_start_ + slot)) == KEEP_SLOT) {
+ if (callback(MaybeObjectSlot(page_start + slot)) == KEEP_SLOT) {
++in_bucket_count;
} else {
mask |= bit_mask;
@@ -218,31 +210,12 @@ class SlotSet : public Malloced {
}
}
}
- if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
- PreFreeEmptyBucket(bucket_index);
- }
new_count += in_bucket_count;
}
}
return new_count;
}
- int NumberOfPreFreedEmptyBuckets() {
- base::MutexGuard guard(&to_be_freed_buckets_mutex_);
- return static_cast<int>(to_be_freed_buckets_.size());
- }
-
- void PreFreeEmptyBuckets() {
- for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
- Bucket bucket = LoadBucket(&buckets_[bucket_index]);
- if (bucket != nullptr) {
- if (IsEmptyBucket(bucket)) {
- PreFreeEmptyBucket(bucket_index);
- }
- }
- }
- }
-
void FreeEmptyBuckets() {
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
@@ -254,27 +227,22 @@ class SlotSet : public Malloced {
}
}
- void FreeToBeFreedBuckets() {
- base::MutexGuard guard(&to_be_freed_buckets_mutex_);
- while (!to_be_freed_buckets_.empty()) {
- Bucket top = to_be_freed_buckets_.top();
- to_be_freed_buckets_.pop();
- DeleteArray<uint32_t>(top);
- }
- DCHECK_EQ(0u, to_be_freed_buckets_.size());
- }
-
- private:
- using Bucket = uint32_t*;
static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize;
static const int kCellsPerBucket = 32;
static const int kCellsPerBucketLog2 = 5;
+ static const int kCellSizeBytesLog2 = 2;
+ static const int kCellSizeBytes = 1 << kCellSizeBytesLog2;
static const int kBitsPerCell = 32;
static const int kBitsPerCellLog2 = 5;
static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
+ static const int kSize = kBuckets * kSystemPointerSize;
+
+ using Bucket = uint32_t*;
+
+ private:
Bucket AllocateBucket() {
Bucket result = NewArray<uint32_t>(kCellsPerBucket);
for (int i = 0; i < kCellsPerBucket; i++) {
@@ -293,15 +261,6 @@ class SlotSet : public Malloced {
}
}
- void PreFreeEmptyBucket(int bucket_index) {
- Bucket bucket = LoadBucket(&buckets_[bucket_index]);
- if (bucket != nullptr) {
- base::MutexGuard guard(&to_be_freed_buckets_mutex_);
- to_be_freed_buckets_.push(bucket);
- StoreBucket(&buckets_[bucket_index], nullptr);
- }
- }
-
void ReleaseBucket(int bucket_index) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
StoreBucket(&buckets_[bucket_index], nullptr);
@@ -381,11 +340,11 @@ class SlotSet : public Malloced {
}
Bucket buckets_[kBuckets];
- Address page_start_;
- base::Mutex to_be_freed_buckets_mutex_;
- std::stack<uint32_t*> to_be_freed_buckets_;
};
+STATIC_ASSERT(std::is_standard_layout<SlotSet>::value);
+STATIC_ASSERT(sizeof(SlotSet) == SlotSet::kSize);
+
enum SlotType {
FULL_EMBEDDED_OBJECT_SLOT,
COMPRESSED_EMBEDDED_OBJECT_SLOT,
@@ -396,9 +355,9 @@ enum SlotType {
};
// Data structure for maintaining a list of typed slots in a page.
-// Typed slots can only appear in Code and JSFunction objects, so
+// Typed slots can only appear in Code objects, so
// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
-// The implementation is a chain of chunks, where each chunks is an array of
+// The implementation is a chain of chunks, where each chunk is an array of
// encoded (slot type, slot offset) pairs.
// There is no duplicate detection and we do not expect many duplicates because
// typed slots contain V8 internal pointers that are not directly exposed to JS.
@@ -418,17 +377,15 @@ class V8_EXPORT_PRIVATE TypedSlots {
};
struct Chunk {
Chunk* next;
- TypedSlot* buffer;
- int32_t capacity;
- int32_t count;
+ std::vector<TypedSlot> buffer;
};
- static const int kInitialBufferSize = 100;
- static const int kMaxBufferSize = 16 * KB;
- static int NextCapacity(int capacity) {
+ static const size_t kInitialBufferSize = 100;
+ static const size_t kMaxBufferSize = 16 * KB;
+ static size_t NextCapacity(size_t capacity) {
return Min(kMaxBufferSize, capacity * 2);
}
Chunk* EnsureChunk();
- Chunk* NewChunk(Chunk* next, int capacity);
+ Chunk* NewChunk(Chunk* next, size_t capacity);
Chunk* head_ = nullptr;
Chunk* tail_ = nullptr;
};
@@ -437,15 +394,10 @@ class V8_EXPORT_PRIVATE TypedSlots {
// clearing of invalid slots.
class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
public:
- // The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty
- // during the iteration are queued in to_be_freed_chunks_, which are
- // then freed in FreeToBeFreedChunks.
- enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
+ enum IterationMode { FREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
- ~TypedSlotSet() override;
-
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
// Returns the new number of slots.
@@ -463,11 +415,8 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
Chunk* previous = nullptr;
int new_count = 0;
while (chunk != nullptr) {
- TypedSlot* buffer = chunk->buffer;
- int count = chunk->count;
bool empty = true;
- for (int i = 0; i < count; i++) {
- TypedSlot slot = LoadTypedSlot(buffer + i);
+ for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
if (type != CLEARED_SLOT) {
uint32_t offset = OffsetField::decode(slot.type_and_offset);
@@ -476,12 +425,12 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
new_count++;
empty = false;
} else {
- ClearTypedSlot(buffer + i);
+ slot = ClearedTypedSlot();
}
}
}
Chunk* next = chunk->next;
- if (mode == PREFREE_EMPTY_CHUNKS && empty) {
+ if (mode == FREE_EMPTY_CHUNKS && empty) {
// We remove the chunk from the list but let it still point its next
// chunk to allow concurrent iteration.
if (previous) {
@@ -489,8 +438,8 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
} else {
StoreHead(next);
}
- base::MutexGuard guard(&to_be_freed_chunks_mutex_);
- to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
+
+ delete chunk;
} else {
previous = chunk;
}
@@ -518,19 +467,11 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
void StoreHead(Chunk* chunk) {
base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
}
- TypedSlot LoadTypedSlot(TypedSlot* slot) {
- return TypedSlot{base::AsAtomic32::Relaxed_Load(&slot->type_and_offset)};
- }
- void ClearTypedSlot(TypedSlot* slot) {
- // Order is important here and should match that of LoadTypedSlot.
- base::AsAtomic32::Relaxed_Store(
- &slot->type_and_offset,
- TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
+ static TypedSlot ClearedTypedSlot() {
+ return TypedSlot{TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0)};
}
Address page_start_;
- base::Mutex to_be_freed_chunks_mutex_;
- std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index dd8ba30101..2c5d5c298d 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -11,14 +11,14 @@
#include "src/base/lsan.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
-#include "src/base/template-utils.h"
#include "src/execution/vm-state-inl.h"
-#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/incremental-marking-inl.h"
+#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
@@ -220,7 +220,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
requested));
heap_reservation_ = std::move(reservation);
- code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
+ code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
page_allocator, aligned_base, size,
static_cast<size_t>(MemoryChunk::kAlignment));
code_page_allocator_ = code_page_allocator_instance_.get();
@@ -286,7 +286,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
return;
}
- auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
+ auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
if (FLAG_trace_unmapper) {
PrintIsolate(heap_->isolate(),
"Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
@@ -699,6 +699,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->InitializeReservedMemory();
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
@@ -856,6 +857,33 @@ Page* Page::ConvertNewToOld(Page* old_page) {
return new_page;
}
+void Page::MoveOldToNewRememberedSetForSweeping() {
+ CHECK_NULL(sweeping_slot_set_);
+ sweeping_slot_set_ = slot_set_[OLD_TO_NEW];
+ slot_set_[OLD_TO_NEW] = nullptr;
+}
+
+void Page::MergeOldToNewRememberedSets() {
+ if (sweeping_slot_set_ == nullptr) return;
+
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ this,
+ [this](MaybeObjectSlot slot) {
+ Address address = slot.address();
+ RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
+ return KEEP_SLOT;
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ if (slot_set_[OLD_TO_NEW]) {
+ ReleaseSlotSet<OLD_TO_NEW>();
+ }
+
+ CHECK_NULL(slot_set_[OLD_TO_NEW]);
+ slot_set_[OLD_TO_NEW] = sweeping_slot_set_;
+ sweeping_slot_set_ = nullptr;
+}
+
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
return size();
@@ -1376,6 +1404,7 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
}
ReleaseSlotSet<OLD_TO_NEW>();
+ ReleaseSlotSet(&sweeping_slot_set_);
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
@@ -1399,11 +1428,7 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
DCHECK_LT(0, pages);
- SlotSet* slot_set = new SlotSet[pages];
- for (size_t i = 0; i < pages; i++) {
- slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
- }
- return slot_set;
+ return new SlotSet[pages];
}
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
@@ -1411,15 +1436,23 @@ template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
- SlotSet* slot_set = AllocateAndInitializeSlotSet(size(), address());
+ return AllocateSlotSet(&slot_set_[type]);
+}
+
+SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
+ return AllocateSlotSet(&sweeping_slot_set_);
+}
+
+SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
+ SlotSet* new_slot_set = AllocateAndInitializeSlotSet(size(), address());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
- &slot_set_[type], nullptr, slot_set);
+ slot_set, nullptr, new_slot_set);
if (old_slot_set != nullptr) {
- delete[] slot_set;
- slot_set = old_slot_set;
+ delete[] new_slot_set;
+ new_slot_set = old_slot_set;
}
- DCHECK(slot_set);
- return slot_set;
+ DCHECK(new_slot_set);
+ return new_slot_set;
}
template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
@@ -1427,10 +1460,13 @@ template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
- SlotSet* slot_set = slot_set_[type];
- if (slot_set) {
- slot_set_[type] = nullptr;
- delete[] slot_set;
+ ReleaseSlotSet(&slot_set_[type]);
+}
+
+void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
+ if (*slot_set) {
+ delete[] * slot_set;
+ *slot_set = nullptr;
}
}
@@ -1484,15 +1520,12 @@ void MemoryChunk::ReleaseInvalidatedSlots() {
}
template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
- int size);
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
- int size);
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
template <RememberedSetType type>
-void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
- int size) {
+void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
bool skip_slot_recording;
if (type == OLD_TO_NEW) {
@@ -1509,27 +1542,17 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
AllocateInvalidatedSlots<type>();
}
- InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>();
- InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object);
+ invalidated_slots<type>()->insert(object);
+}
- if (it != invalidated_slots->end() && it->first == object) {
- // object was already inserted
- CHECK_LE(size, it->second);
- return;
+void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
+ if (heap()->incremental_marking()->IsCompacting()) {
+ // We cannot check slot_set_[OLD_TO_OLD] here, since the
+ // concurrent markers might insert slots concurrently.
+ RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
- it = invalidated_slots->insert(it, std::make_pair(object, size));
-
- // prevent overlapping invalidated objects for old-to-new.
- if (type == OLD_TO_NEW && it != invalidated_slots->begin()) {
- HeapObject pred = (--it)->first;
- int pred_size = it->second;
- DCHECK_LT(pred.address(), object.address());
-
- if (pred.address() + pred_size > object.address()) {
- it->second = static_cast<int>(object.address() - pred.address());
- }
- }
+ RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
}
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
@@ -1546,27 +1569,6 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
invalidated_slots<type>()->end();
}
-template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(
- HeapObject old_start, HeapObject new_start);
-
-template <RememberedSetType type>
-void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
- HeapObject new_start) {
- DCHECK_LT(old_start, new_start);
- DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
- MemoryChunk::FromHeapObject(new_start));
- static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots");
- if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) {
- auto it = invalidated_slots<type>()->find(old_start);
- if (it != invalidated_slots<type>()->end()) {
- int old_size = it->second;
- int delta = static_cast<int>(new_start.address() - old_start.address());
- invalidated_slots<type>()->erase(it);
- (*invalidated_slots<type>())[new_start] = old_size - delta;
- }
- }
-}
-
void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_;
@@ -1657,6 +1659,7 @@ void PagedSpace::RefillFreeList() {
DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
+
{
Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
@@ -1667,6 +1670,15 @@ void PagedSpace::RefillFreeList() {
category->Reset(free_list());
});
}
+
+ // Also merge old-to-new remembered sets outside of collections.
+ // Do not do this during GC, because of races during scavenges.
+ // One thread might iterate remembered set, while another thread merges
+ // them.
+ if (!is_local()) {
+ p->MergeOldToNewRememberedSets();
+ }
+
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
@@ -1709,6 +1721,9 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Move over pages.
for (auto it = other->begin(); it != other->end();) {
Page* p = *(it++);
+
+ p->MergeOldToNewRememberedSets();
+
// Relinking requires the category to be unlinked.
other->RemovePage(p);
AddPage(p);
@@ -1883,19 +1898,8 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
// Generated code may allocate inline from the linear allocation area for.
// To make sure we can observe these allocations, we use a lower limit.
size_t step = GetNextInlineAllocationStepSize();
-
- // TODO(ofrobots): there is subtle difference between old space and new
- // space here. Any way to avoid it? `step - 1` makes more sense as we would
- // like to sample the object that straddles the `start + step` boundary.
- // Rounding down further would introduce a small statistical error in
- // sampling. However, presently PagedSpace requires limit to be aligned.
- size_t rounded_step;
- if (identity() == NEW_SPACE) {
- DCHECK_GE(step, 1);
- rounded_step = step - 1;
- } else {
- rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
- }
+ size_t rounded_step =
+ RoundSizeDownToObjectAlignment(static_cast<int>(step - 1));
return Min(static_cast<Address>(start + min_size + rounded_step), end);
} else {
// The entire node can be used as the linear allocation area.
@@ -2139,7 +2143,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = array_buffer.byte_length();
+ size_t size = PerIsolateAccountingLength(array_buffer);
external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -2628,7 +2632,7 @@ void NewSpace::Verify(Isolate* isolate) {
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = array_buffer.byte_length();
+ size_t size = PerIsolateAccountingLength(array_buffer);
external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -3942,6 +3946,7 @@ Address LargePage::GetAddressToShrink(Address object_address,
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
+ DCHECK_NULL(this->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index ebb6876cbe..5652042d20 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -130,12 +130,6 @@ enum FreeMode { kLinkCategory, kDoNotLinkCategory };
enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
-enum RememberedSetType {
- OLD_TO_NEW,
- OLD_TO_OLD,
- NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
-};
-
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
@@ -606,7 +600,7 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize // Address owner_
+ kSizetSize // size_t progress_bar_
+ kIntptrSize // intptr_t live_byte_count_
- + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ + kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
@@ -706,6 +700,13 @@ class MemoryChunk : public BasicMemoryChunk {
return slot_set_[type];
}
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ SlotSet* sweeping_slot_set() {
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
+ return sweeping_slot_set_;
+ }
+
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
if (access_mode == AccessMode::ATOMIC)
@@ -715,9 +716,13 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
+ SlotSet* AllocateSweepingSlotSet();
+ SlotSet* AllocateSlotSet(SlotSet** slot_set);
+
// Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseSlotSet();
+ void ReleaseSlotSet(SlotSet** slot_set);
template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet();
// Not safe to be called concurrently.
@@ -729,12 +734,8 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void ReleaseInvalidatedSlots();
template <RememberedSetType type>
- V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
- int size);
- // Updates invalidated_slots after array left-trimming.
- template <RememberedSetType type>
- void MoveObjectWithInvalidatedSlots(HeapObject old_start,
- HeapObject new_start);
+ V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
+ void InvalidateRecordedSlots(HeapObject object);
template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type>
@@ -914,7 +915,7 @@ class MemoryChunk : public BasicMemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
- SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ SlotSet* sweeping_slot_set_;
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
@@ -1097,6 +1098,9 @@ class Page : public MemoryChunk {
void AllocateFreeListCategories();
void ReleaseFreeListCategories();
+ void MoveOldToNewRememberedSetForSweeping();
+ void MergeOldToNewRememberedSets();
+
#ifdef DEBUG
void Print();
#endif // DEBUG
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
deleted file mode 100644
index b43098bf57..0000000000
--- a/deps/v8/src/heap/store-buffer-inl.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_STORE_BUFFER_INL_H_
-#define V8_HEAP_STORE_BUFFER_INL_H_
-
-#include "src/heap/store-buffer.h"
-
-#include "src/heap/heap-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void StoreBuffer::InsertIntoStoreBuffer(Address slot) {
- if (top_ + sizeof(Address) > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = slot;
- top_++;
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_STORE_BUFFER_INL_H_
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
deleted file mode 100644
index 349e787740..0000000000
--- a/deps/v8/src/heap/store-buffer.cc
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/store-buffer.h"
-
-#include <algorithm>
-
-#include "src/base/bits.h"
-#include "src/base/macros.h"
-#include "src/base/template-utils.h"
-#include "src/execution/isolate.h"
-#include "src/heap/incremental-marking.h"
-#include "src/heap/store-buffer-inl.h"
-#include "src/init/v8.h"
-#include "src/logging/counters.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-StoreBuffer::StoreBuffer(Heap* heap)
- : heap_(heap), top_(nullptr), current_(0), mode_(NOT_IN_GC) {
- for (int i = 0; i < kStoreBuffers; i++) {
- start_[i] = nullptr;
- limit_[i] = nullptr;
- lazy_top_[i] = nullptr;
- }
- task_running_ = false;
- insertion_callback = &InsertDuringRuntime;
-}
-
-void StoreBuffer::SetUp() {
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- // Round up the requested size in order to fulfill the VirtualMemory's
- // requrements on the requested size alignment. This may cause a bit of
- // memory wastage if the actual CommitPageSize() will be bigger than the
- // kMinExpectedOSPageSize value but this is a trade-off for keeping the
- // store buffer overflow check in write barriers cheap.
- const size_t requested_size = RoundUp(kStoreBufferSize * kStoreBuffers,
- page_allocator->CommitPageSize());
- // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
- // use a bit test to detect the ends of the buffers.
- STATIC_ASSERT(base::bits::IsPowerOfTwo(kStoreBufferSize));
- const size_t alignment =
- std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
- void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
- VirtualMemory reservation(page_allocator, requested_size, hint, alignment);
- if (!reservation.IsReserved()) {
- heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
- }
-
- Address start = reservation.address();
- const size_t allocated_size = reservation.size();
-
- start_[0] = reinterpret_cast<Address*>(start);
- limit_[0] = start_[0] + (kStoreBufferSize / kSystemPointerSize);
- start_[1] = limit_[0];
- limit_[1] = start_[1] + (kStoreBufferSize / kSystemPointerSize);
-
- // Sanity check the buffers.
- Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
- USE(vm_limit);
- for (int i = 0; i < kStoreBuffers; i++) {
- DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
- DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
- DCHECK(start_[i] <= vm_limit);
- DCHECK(limit_[i] <= vm_limit);
- DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask);
- }
-
- // Set RW permissions only on the pages we use.
- const size_t used_size = RoundUp(requested_size, CommitPageSize());
- if (!reservation.SetPermissions(start, used_size,
- PageAllocator::kReadWrite)) {
- heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
- }
- current_ = 0;
- top_ = start_[current_];
- virtual_memory_ = std::move(reservation);
-}
-
-void StoreBuffer::TearDown() {
- if (virtual_memory_.IsReserved()) virtual_memory_.Free();
- top_ = nullptr;
- for (int i = 0; i < kStoreBuffers; i++) {
- start_[i] = nullptr;
- limit_[i] = nullptr;
- lazy_top_[i] = nullptr;
- }
-}
-
-void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
- DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
- store_buffer->InsertIntoStoreBuffer(slot);
-}
-
-void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
- Address slot) {
- DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
-}
-
-void StoreBuffer::SetMode(StoreBufferMode mode) {
- mode_ = mode;
- if (mode == NOT_IN_GC) {
- insertion_callback = &InsertDuringRuntime;
- } else {
- insertion_callback = &InsertDuringGarbageCollection;
- }
-}
-
-int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
- isolate->heap()->store_buffer()->FlipStoreBuffers();
- isolate->counters()->store_buffer_overflows()->Increment();
- // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
- return 0;
-}
-
-void StoreBuffer::FlipStoreBuffers() {
- base::MutexGuard guard(&mutex_);
- int other = (current_ + 1) % kStoreBuffers;
- MoveEntriesToRememberedSet(other);
- lazy_top_[current_] = top_;
- current_ = other;
- top_ = start_[current_];
-
- if (!task_running_ && FLAG_concurrent_store_buffer) {
- task_running_ = true;
- V8::GetCurrentPlatform()->CallOnWorkerThread(
- base::make_unique<Task>(heap_->isolate(), this));
- }
-}
-
-void StoreBuffer::MoveEntriesToRememberedSet(int index) {
- if (!lazy_top_[index]) return;
- DCHECK_GE(index, 0);
- DCHECK_LT(index, kStoreBuffers);
- Address last_inserted_addr = kNullAddress;
- MemoryChunk* chunk = nullptr;
-
- for (Address* current = start_[index]; current < lazy_top_[index];
- current++) {
- Address addr = *current;
- if (chunk == nullptr ||
- MemoryChunk::BaseAddress(addr) != chunk->address()) {
- chunk = MemoryChunk::FromAnyPointerAddress(addr);
- }
- if (addr != last_inserted_addr) {
- RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
- last_inserted_addr = addr;
- }
- }
- lazy_top_[index] = nullptr;
-}
-
-void StoreBuffer::MoveAllEntriesToRememberedSet() {
- base::MutexGuard guard(&mutex_);
- int other = (current_ + 1) % kStoreBuffers;
- MoveEntriesToRememberedSet(other);
- lazy_top_[current_] = top_;
- MoveEntriesToRememberedSet(current_);
- top_ = start_[current_];
-}
-
-void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
- base::MutexGuard guard(&mutex_);
- int other = (current_ + 1) % kStoreBuffers;
- MoveEntriesToRememberedSet(other);
- task_running_ = false;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
deleted file mode 100644
index 025bb6a060..0000000000
--- a/deps/v8/src/heap/store-buffer.h
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_STORE_BUFFER_H_
-#define V8_HEAP_STORE_BUFFER_H_
-
-#include "src/base/logging.h"
-#include "src/base/platform/platform.h"
-#include "src/common/globals.h"
-#include "src/heap/gc-tracer.h"
-#include "src/heap/remembered-set.h"
-#include "src/heap/slot-set.h"
-#include "src/tasks/cancelable-task.h"
-#include "src/utils/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Intermediate buffer that accumulates old-to-new stores from the generated
-// code. Moreover, it stores invalid old-to-new slots with two entries.
-// The first is a tagged address of the start of the invalid range, the second
-// one is the end address of the invalid range or null if there is just one slot
-// that needs to be removed from the remembered set. On buffer overflow the
-// slots are moved to the remembered set.
-// Store buffer entries are always full pointers.
-class StoreBuffer {
- public:
- enum StoreBufferMode { IN_GC, NOT_IN_GC };
-
- static const int kStoreBuffers = 2;
- static const int kStoreBufferSize =
- Max(static_cast<int>(kMinExpectedOSPageSize / kStoreBuffers),
- 1 << (11 + kSystemPointerSizeLog2));
- static const int kStoreBufferMask = kStoreBufferSize - 1;
-
- V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
-
- static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
- Address slot);
- static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot);
-
- explicit StoreBuffer(Heap* heap);
- void SetUp();
- void TearDown();
-
- // Used to add entries from generated code.
- inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
-
- // Moves entries from a specific store buffer to the remembered set. This
- // method takes a lock.
- void MoveEntriesToRememberedSet(int index);
-
- // This method ensures that all used store buffer entries are transferred to
- // the remembered set.
- void MoveAllEntriesToRememberedSet();
-
- inline void InsertIntoStoreBuffer(Address slot);
-
- void InsertEntry(Address slot) {
- // Insertions coming from the GC are directly inserted into the remembered
- // set. Insertions coming from the runtime are added to the store buffer to
- // allow concurrent processing.
- insertion_callback(this, slot);
- }
-
- void SetMode(StoreBufferMode mode);
-
- // Used by the concurrent processing thread to transfer entries from the
- // store buffer to the remembered set.
- void ConcurrentlyProcessStoreBuffer();
-
- bool Empty() {
- for (int i = 0; i < kStoreBuffers; i++) {
- if (lazy_top_[i]) {
- return false;
- }
- }
- return top_ == start_[current_];
- }
-
- Heap* heap() { return heap_; }
-
- private:
- // There are two store buffers. If one store buffer fills up, the main thread
- // publishes the top pointer of the store buffer that needs processing in its
- // global lazy_top_ field. After that it start the concurrent processing
- // thread. The concurrent processing thread uses the pointer in lazy_top_.
- // It will grab the given mutex and transfer its entries to the remembered
- // set. If the concurrent thread does not make progress, the main thread will
- // perform the work.
- // Important: there is an ordering constrained. The store buffer with the
- // older entries has to be processed first.
- class Task : public CancelableTask {
- public:
- Task(Isolate* isolate, StoreBuffer* store_buffer)
- : CancelableTask(isolate),
- store_buffer_(store_buffer),
- tracer_(isolate->heap()->tracer()) {}
- ~Task() override = default;
-
- private:
- void RunInternal() override {
- TRACE_BACKGROUND_GC(tracer_,
- GCTracer::BackgroundScope::BACKGROUND_STORE_BUFFER);
- store_buffer_->ConcurrentlyProcessStoreBuffer();
- }
- StoreBuffer* store_buffer_;
- GCTracer* tracer_;
- DISALLOW_COPY_AND_ASSIGN(Task);
- };
-
- StoreBufferMode mode() const { return mode_; }
-
- void FlipStoreBuffers();
-
- Heap* heap_;
-
- Address* top_;
-
- // The start and the limit of the buffer that contains store slots
- // added from the generated code. We have two chunks of store buffers.
- // Whenever one fills up, we notify a concurrent processing thread and
- // use the other empty one in the meantime.
- Address* start_[kStoreBuffers];
- Address* limit_[kStoreBuffers];
-
- // At most one lazy_top_ pointer is set at any time.
- Address* lazy_top_[kStoreBuffers];
- base::Mutex mutex_;
-
- // We only want to have at most one concurrent processing tas running.
- bool task_running_;
-
- // Points to the current buffer in use.
- int current_;
-
- // During GC, entries are directly added to the remembered set without
- // going through the store buffer. This is signaled by a special
- // IN_GC mode.
- StoreBufferMode mode_;
-
- VirtualMemory virtual_memory_;
-
- // Callbacks are more efficient than reading out the gc state for every
- // store buffer operation.
- void (*insertion_callback)(StoreBuffer*, Address);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_STORE_BUFFER_H_
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index c3c6b58835..11be775485 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -4,7 +4,6 @@
#include "src/heap/sweeper.h"
-#include "src/base/template-utils.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/gc-tracer.h"
@@ -181,7 +180,7 @@ void Sweeper::StartSweeperTasks() {
ForAllSweepingSpaces([this](AllocationSpace space) {
DCHECK(IsValidSweepingSpace(space));
num_sweeping_tasks_++;
- auto task = base::make_unique<SweeperTask>(
+ auto task = std::make_unique<SweeperTask>(
heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
&num_sweeping_tasks_, space);
DCHECK_LT(num_tasks_, kMaxSweeperTasks);
@@ -321,8 +320,8 @@ int Sweeper::RawSweep(
ClearFreedMemoryMode::kClearFreedMemory);
}
if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
- RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
- SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSetSweeping::RemoveRange(p, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
@@ -355,8 +354,8 @@ int Sweeper::RawSweep(
ClearFreedMemoryMode::kClearFreedMemory);
}
if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
- RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
- SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSetSweeping::RemoveRange(p, free_start, p->area_end(),
+ SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
@@ -404,6 +403,10 @@ void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
Page* page = nullptr;
while (!stop_sweeper_tasks_ &&
((page = GetSweepingPageSafe(identity)) != nullptr)) {
+ // Typed slot sets are only recorded on code pages. Code pages
+ // are not swept concurrently to the application to ensure W^X.
+ DCHECK(!page->typed_slot_set<OLD_TO_NEW>() &&
+ !page->typed_slot_set<OLD_TO_OLD>());
ParallelSweepPage(page, identity);
}
}
@@ -462,16 +465,6 @@ int Sweeper::ParallelSweepPage(
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
invalidated_slots_in_free_space);
DCHECK(page->SweepingDone());
-
- // After finishing sweeping of a page we clean up its remembered set.
- TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
- if (typed_slot_set) {
- typed_slot_set->FreeToBeFreedChunks();
- }
- SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
- if (slot_set) {
- slot_set->FreeToBeFreedBuckets();
- }
}
{
@@ -488,7 +481,7 @@ void Sweeper::ScheduleIncrementalSweepingTask() {
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
taskrunner->PostTask(
- base::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
+ std::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
}
}
@@ -517,6 +510,7 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK(!category->is_linked(page->owner()->free_list()));
});
#endif // DEBUG
+ page->MoveOldToNewRememberedSetForSweeping();
page->set_concurrent_sweeping_state(Page::kSweepingPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
@@ -596,8 +590,8 @@ void Sweeper::StartIterabilityTasks() {
DCHECK(!iterability_task_started_);
if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
- auto task = base::make_unique<IterabilityTask>(
- heap_->isolate(), this, &iterability_task_semaphore_);
+ auto task = std::make_unique<IterabilityTask>(heap_->isolate(), this,
+ &iterability_task_semaphore_);
iterability_task_id_ = task->id();
iterability_task_started_ = true;
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));