aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2017-09-12 11:34:59 +0200
committerAnna Henningsen <anna@addaleax.net>2017-09-13 16:15:18 +0200
commitd82e1075dbc2cec2d6598ade10c1f43805f690fd (patch)
treeccd242b9b491dfc341d1099fe11b0ef528839877 /deps/v8/src/heap
parentb4b7ac6ae811b2b5a3082468115dfb5a5246fe3f (diff)
downloadandroid-node-v8-d82e1075dbc2cec2d6598ade10c1f43805f690fd.tar.gz
android-node-v8-d82e1075dbc2cec2d6598ade10c1f43805f690fd.tar.bz2
android-node-v8-d82e1075dbc2cec2d6598ade10c1f43805f690fd.zip
deps: update V8 to 6.1.534.36
PR-URL: https://github.com/nodejs/node/pull/14730 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/OWNERS3
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h20
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc54
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h29
-rw-r--r--deps/v8/src/heap/code-stats.h11
-rw-r--r--deps/v8/src/heap/concurrent-marking-deque.h175
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc273
-rw-r--r--deps/v8/src/heap/concurrent-marking.h45
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc2
-rw-r--r--deps/v8/src/heap/embedder-tracing.h10
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc15
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h5
-rw-r--r--deps/v8/src/heap/gc-tracer.cc27
-rw-r--r--deps/v8/src/heap/gc-tracer.h153
-rw-r--r--deps/v8/src/heap/heap-inl.h181
-rw-r--r--deps/v8/src/heap/heap.cc1029
-rw-r--r--deps/v8/src/heap/heap.h281
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h8
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc10
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h7
-rw-r--r--deps/v8/src/heap/incremental-marking.cc293
-rw-r--r--deps/v8/src/heap/incremental-marking.h35
-rw-r--r--deps/v8/src/heap/item-parallel-job.h6
-rw-r--r--deps/v8/src/heap/local-allocator.h99
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h169
-rw-r--r--deps/v8/src/heap/mark-compact.cc1966
-rw-r--r--deps/v8/src/heap/mark-compact.h427
-rw-r--r--deps/v8/src/heap/marking.cc201
-rw-r--r--deps/v8/src/heap/marking.h318
-rw-r--r--deps/v8/src/heap/memory-reducer.cc1
-rw-r--r--deps/v8/src/heap/object-stats.cc36
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h951
-rw-r--r--deps/v8/src/heap/objects-visiting.cc195
-rw-r--r--deps/v8/src/heap/objects-visiting.h438
-rw-r--r--deps/v8/src/heap/page-parallel-job.h180
-rw-r--r--deps/v8/src/heap/remembered-set.h28
-rw-r--r--deps/v8/src/heap/scavenge-job.h5
-rw-r--r--deps/v8/src/heap/scavenger-inl.h200
-rw-r--r--deps/v8/src/heap/scavenger.cc531
-rw-r--r--deps/v8/src/heap/scavenger.h174
-rw-r--r--deps/v8/src/heap/sequential-marking-deque.cc6
-rw-r--r--deps/v8/src/heap/sequential-marking-deque.h19
-rw-r--r--deps/v8/src/heap/slot-set.h375
-rw-r--r--deps/v8/src/heap/spaces-inl.h110
-rw-r--r--deps/v8/src/heap/spaces.cc348
-rw-r--r--deps/v8/src/heap/spaces.h147
-rw-r--r--deps/v8/src/heap/store-buffer.cc3
-rw-r--r--deps/v8/src/heap/worklist.h354
-rw-r--r--deps/v8/src/heap/workstealing-marking-deque.h167
49 files changed, 4735 insertions, 5385 deletions
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index 32da1ecead..79eea3aaab 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -1,7 +1,8 @@
set noparent
hpayer@chromium.org
-jochen@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
ulan@chromium.org
+
+# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index d20f128002..0688a29f3a 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -14,7 +14,7 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
void* data = buffer->backing_store();
if (!data) return;
- size_t length = NumberToSize(buffer->byte_length());
+ size_t length = buffer->allocation_length();
Page* page = Page::FromAddress(buffer->address());
{
base::LockGuard<base::RecursiveMutex> guard(page->mutex());
@@ -37,31 +37,33 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
if (!data) return;
Page* page = Page::FromAddress(buffer->address());
- size_t length = 0;
+ size_t length = buffer->allocation_length();
{
base::LockGuard<base::RecursiveMutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker);
- length = tracker->Remove(buffer);
+ tracker->Remove(buffer, length);
}
heap->update_external_memory(-static_cast<intptr_t>(length));
}
-void LocalArrayBufferTracker::Add(Key key, const Value& value) {
- auto ret = array_buffers_.insert(std::make_pair(key, value));
+void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) {
+ DCHECK_GE(retained_size_ + length, retained_size_);
+ retained_size_ += length;
+ auto ret = array_buffers_.insert(buffer);
USE(ret);
// Check that we indeed inserted a new value and did not overwrite an existing
// one (which would be a bug).
DCHECK(ret.second);
}
-LocalArrayBufferTracker::Value LocalArrayBufferTracker::Remove(Key key) {
- TrackingData::iterator it = array_buffers_.find(key);
+void LocalArrayBufferTracker::Remove(JSArrayBuffer* buffer, size_t length) {
+ DCHECK_GE(retained_size_, retained_size_ - length);
+ retained_size_ -= length;
+ TrackingData::iterator it = array_buffers_.find(buffer);
// Check that we indeed find a key to remove.
DCHECK(it != array_buffers_.end());
- Value value = it->second;
array_buffers_.erase(it);
- return value;
}
} // namespace internal
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index b4b4757808..08b5750752 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -17,19 +17,21 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
+ size_t retained_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
- JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
+ JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(*it);
+ const size_t length = buffer->allocation_length();
if (should_free(buffer)) {
- const size_t len = it->second;
+ freed_memory += length;
buffer->FreeBackingStore();
-
- freed_memory += len;
it = array_buffers_.erase(it);
} else {
+ retained_size += length;
++it;
}
}
+ retained_size_ = retained_size;
if (freed_memory > 0) {
heap_->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
@@ -39,36 +41,41 @@ void LocalArrayBufferTracker::Free(Callback should_free) {
template <typename Callback>
void LocalArrayBufferTracker::Process(Callback callback) {
JSArrayBuffer* new_buffer = nullptr;
+ JSArrayBuffer* old_buffer = nullptr;
size_t freed_memory = 0;
+ size_t retained_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
- const CallbackResult result = callback(it->first, &new_buffer);
+ old_buffer = reinterpret_cast<JSArrayBuffer*>(*it);
+ const size_t length = old_buffer->allocation_length();
+ const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
+ retained_size += length;
++it;
} else if (result == kUpdateEntry) {
DCHECK_NOT_NULL(new_buffer);
Page* target_page = Page::FromAddress(new_buffer->address());
- // We need to lock the target page because we cannot guarantee
- // exclusive access to new space pages.
- if (target_page->InNewSpace()) target_page->mutex()->Lock();
- LocalArrayBufferTracker* tracker = target_page->local_tracker();
- if (tracker == nullptr) {
- target_page->AllocateLocalTracker();
- tracker = target_page->local_tracker();
+ {
+ base::LockGuard<base::RecursiveMutex> guard(target_page->mutex());
+ LocalArrayBufferTracker* tracker = target_page->local_tracker();
+ if (tracker == nullptr) {
+ target_page->AllocateLocalTracker();
+ tracker = target_page->local_tracker();
+ }
+ DCHECK_NOT_NULL(tracker);
+ DCHECK_EQ(length, new_buffer->allocation_length());
+ tracker->Add(new_buffer, length);
}
- DCHECK_NOT_NULL(tracker);
- tracker->Add(new_buffer, it->second);
- if (target_page->InNewSpace()) target_page->mutex()->Unlock();
it = array_buffers_.erase(it);
} else if (result == kRemoveEntry) {
- const size_t len = it->second;
- it->first->FreeBackingStore();
- freed_memory += len;
+ freed_memory += length;
+ old_buffer->FreeBackingStore();
it = array_buffers_.erase(it);
} else {
UNREACHABLE();
}
}
+ retained_size_ = retained_size;
if (freed_memory > 0) {
heap_->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
@@ -85,6 +92,17 @@ void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
heap->account_external_memory_concurrently_freed();
}
+size_t ArrayBufferTracker::RetainedInNewSpace(Heap* heap) {
+ size_t retained_size = 0;
+ for (Page* page : PageRange(heap->new_space()->ToSpaceStart(),
+ heap->new_space()->ToSpaceEnd())) {
+ LocalArrayBufferTracker* tracker = page->local_tracker();
+ if (tracker == nullptr) continue;
+ retained_size += tracker->retained_size();
+ }
+ return retained_size;
+}
+
void ArrayBufferTracker::FreeDead(Page* page,
const MarkingState& marking_state) {
// Callers need to ensure having the page lock.
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 56f042780e..e1b4dc4e4d 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_ARRAY_BUFFER_TRACKER_H_
#define V8_HEAP_ARRAY_BUFFER_TRACKER_H_
-#include <unordered_map>
+#include <unordered_set>
#include "src/allocation.h"
#include "src/base/platform/mutex.h"
@@ -38,6 +38,9 @@ class ArrayBufferTracker : public AllStatic {
// Does not take any locks and can only be called during Scavenge.
static void FreeDeadInNewSpace(Heap* heap);
+ // Number of array buffer bytes retained from new space.
+ static size_t RetainedInNewSpace(Heap* heap);
+
// Frees all backing store pointers for dead JSArrayBuffer on a given page.
// Requires marking information to be present. Requires the page lock to be
// taken by the caller.
@@ -60,17 +63,15 @@ class ArrayBufferTracker : public AllStatic {
// Never use directly but instead always call through |ArrayBufferTracker|.
class LocalArrayBufferTracker {
public:
- typedef JSArrayBuffer* Key;
- typedef size_t Value;
-
enum CallbackResult { kKeepEntry, kUpdateEntry, kRemoveEntry };
enum FreeMode { kFreeDead, kFreeAll };
- explicit LocalArrayBufferTracker(Heap* heap) : heap_(heap) {}
+ explicit LocalArrayBufferTracker(Heap* heap)
+ : heap_(heap), retained_size_(0) {}
~LocalArrayBufferTracker();
- inline void Add(Key key, const Value& value);
- inline Value Remove(Key key);
+ inline void Add(JSArrayBuffer* buffer, size_t length);
+ inline void Remove(JSArrayBuffer* buffer, size_t length);
// Frees up array buffers.
//
@@ -90,17 +91,23 @@ class LocalArrayBufferTracker {
template <typename Callback>
void Process(Callback callback);
- bool IsEmpty() { return array_buffers_.empty(); }
+ bool IsEmpty() const { return array_buffers_.empty(); }
- bool IsTracked(Key key) {
- return array_buffers_.find(key) != array_buffers_.end();
+ bool IsTracked(JSArrayBuffer* buffer) const {
+ return array_buffers_.find(buffer) != array_buffers_.end();
}
+ size_t retained_size() const { return retained_size_; }
+
private:
- typedef std::unordered_map<Key, Value> TrackingData;
+ typedef std::unordered_set<JSArrayBuffer*> TrackingData;
Heap* heap_;
+ // The set contains raw heap pointers which are removed by the GC upon
+ // processing the tracker through its owning page.
TrackingData array_buffers_;
+ // Retained size of array buffers for this tracker in bytes.
+ size_t retained_size_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/code-stats.h b/deps/v8/src/heap/code-stats.h
index 499c9fa5ac..fa106d6435 100644
--- a/deps/v8/src/heap/code-stats.h
+++ b/deps/v8/src/heap/code-stats.h
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler.h"
-#include "src/heap/spaces.h"
-#include "src/isolate.h"
-#include "src/objects.h"
-
namespace v8 {
namespace internal {
+class Isolate;
+class HeapObject;
+class LargeObjectSpace;
+class PagedSpace;
+class RelocIterator;
+
class CodeStatistics {
public:
// Collect statistics related to code size.
diff --git a/deps/v8/src/heap/concurrent-marking-deque.h b/deps/v8/src/heap/concurrent-marking-deque.h
deleted file mode 100644
index 1490923a2f..0000000000
--- a/deps/v8/src/heap/concurrent-marking-deque.h
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CONCURRENT_MARKING_DEQUE_
-#define V8_HEAP_CONCURRENT_MARKING_DEQUE_
-
-#include <deque>
-
-#include "src/base/platform/mutex.h"
-
-namespace v8 {
-namespace internal {
-
-class Heap;
-class Isolate;
-class HeapObject;
-
-enum class MarkingThread { kMain, kConcurrent };
-
-enum class TargetDeque { kShared, kBailout };
-
-// The concurrent marking deque supports deque operations for two threads:
-// main and concurrent. It is implemented using two deques: shared and bailout.
-//
-// The concurrent thread can use the push and pop operations with the
-// MarkingThread::kConcurrent argument. All other operations are intended
-// to be used by the main thread only.
-//
-// The interface of the concurrent marking deque for the main thread matches
-// that of the sequential marking deque, so they can be easily switched
-// at compile time without updating the main thread call-sites.
-//
-// The shared deque is shared between the main thread and the concurrent
-// thread, so both threads can push to and pop from the shared deque.
-// The bailout deque stores objects that cannot be processed by the concurrent
-// thread. Only the concurrent thread can push to it and only the main thread
-// can pop from it.
-class ConcurrentMarkingDeque {
- public:
- // The heap parameter is needed to match the interface
- // of the sequential marking deque.
- explicit ConcurrentMarkingDeque(Heap* heap) {}
-
- // Pushes the object into the specified deque assuming that the function is
- // called on the specified thread. The main thread can push only to the shared
- // deque. The concurrent thread can push to both deques.
- bool Push(HeapObject* object, MarkingThread thread = MarkingThread::kMain,
- TargetDeque target = TargetDeque::kShared) {
- switch (target) {
- case TargetDeque::kShared:
- shared_deque_.Push(object);
- break;
- case TargetDeque::kBailout:
- bailout_deque_.Push(object);
- break;
- }
- return true;
- }
-
- // Pops an object from the bailout or shared deque assuming that the function
- // is called on the specified thread. The main thread first tries to pop the
- // bailout deque. If the deque is empty then it tries the shared deque.
- // If the shared deque is also empty, then the function returns nullptr.
- // The concurrent thread pops only from the shared deque.
- HeapObject* Pop(MarkingThread thread = MarkingThread::kMain) {
- if (thread == MarkingThread::kMain) {
- HeapObject* result = bailout_deque_.Pop();
- if (result != nullptr) return result;
- }
- return shared_deque_.Pop();
- }
-
- // All the following operations can used only by the main thread.
- void Clear() {
- bailout_deque_.Clear();
- shared_deque_.Clear();
- }
-
- bool IsFull() { return false; }
-
- bool IsEmpty() { return bailout_deque_.IsEmpty() && shared_deque_.IsEmpty(); }
-
- int Size() { return bailout_deque_.Size() + shared_deque_.Size(); }
-
- // This is used for a large array with a progress bar.
- // For simpicity, unshift to the bailout deque so that the concurrent thread
- // does not see such objects.
- bool Unshift(HeapObject* object) {
- bailout_deque_.Unshift(object);
- return true;
- }
-
- // Calls the specified callback on each element of the deques and replaces
- // the element with the result of the callback. If the callback returns
- // nullptr then the element is removed from the deque.
- // The callback must accept HeapObject* and return HeapObject*.
- template <typename Callback>
- void Update(Callback callback) {
- bailout_deque_.Update(callback);
- shared_deque_.Update(callback);
- }
-
- // These empty functions are needed to match the interface
- // of the sequential marking deque.
- void SetUp() {}
- void TearDown() {}
- void StartUsing() {}
- void StopUsing() {}
- void ClearOverflowed() {}
- void SetOverflowed() {}
- bool overflowed() const { return false; }
-
- private:
- // Simple, slow, and thread-safe deque that forwards all operations to
- // a lock-protected std::deque.
- class Deque {
- public:
- Deque() { cache_padding_[0] = 0; }
- void Clear() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- return deque_.clear();
- }
- bool IsEmpty() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- return deque_.empty();
- }
- int Size() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- return static_cast<int>(deque_.size());
- }
- void Push(HeapObject* object) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- deque_.push_back(object);
- }
- HeapObject* Pop() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (deque_.empty()) return nullptr;
- HeapObject* result = deque_.back();
- deque_.pop_back();
- return result;
- }
- void Unshift(HeapObject* object) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- deque_.push_front(object);
- }
- template <typename Callback>
- void Update(Callback callback) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- std::deque<HeapObject*> new_deque;
- for (auto object : deque_) {
- HeapObject* new_object = callback(object);
- if (new_object) {
- new_deque.push_back(new_object);
- }
- }
- deque_.swap(new_deque);
- }
-
- private:
- base::Mutex mutex_;
- std::deque<HeapObject*> deque_;
- // Ensure that two deques do not share the same cache line.
- static int const kCachePadding = 64;
- char cache_padding_[kCachePadding];
- };
- Deque bailout_deque_;
- Deque shared_deque_;
- DISALLOW_COPY_AND_ASSIGN(ConcurrentMarkingDeque);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CONCURRENT_MARKING_DEQUE_
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index f541828e29..d8b1a0895f 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -7,12 +7,12 @@
#include <stack>
#include <unordered_map>
-#include "src/heap/concurrent-marking-deque.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/marking.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/worklist.h"
#include "src/isolate.h"
#include "src/locked-queue-inl.h"
#include "src/utils-inl.h"
@@ -48,18 +48,20 @@ class ConcurrentMarkingVisitor final
public:
using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
- explicit ConcurrentMarkingVisitor(ConcurrentMarkingDeque* deque)
- : deque_(deque) {}
+ explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared,
+ ConcurrentMarking::MarkingWorklist* bailout,
+ int task_id)
+ : shared_(shared, task_id), bailout_(bailout, task_id) {}
- bool ShouldVisit(HeapObject* object) override {
- return ObjectMarking::GreyToBlack<MarkBit::AccessMode::ATOMIC>(
+ bool ShouldVisit(HeapObject* object) {
+ return ObjectMarking::GreyToBlack<AccessMode::ATOMIC>(
object, marking_state(object));
}
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
Object* object = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<const base::AtomicWord*>(p)));
+ base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
if (!object->IsHeapObject()) continue;
MarkObject(HeapObject::cast(object));
}
@@ -73,11 +75,18 @@ class ConcurrentMarkingVisitor final
}
}
+ void VisitCodeEntry(JSFunction* host, Address entry_address) override {
+ Address code_entry = base::AsAtomicWord::Relaxed_Load(
+ reinterpret_cast<Address*>(entry_address));
+ Object* code = Code::GetObjectFromCodeEntry(code_entry);
+ VisitPointer(host, &code);
+ }
+
// ===========================================================================
// JS object =================================================================
// ===========================================================================
- int VisitJSObject(Map* map, JSObject* object) override {
+ int VisitJSObject(Map* map, JSObject* object) {
int size = JSObject::BodyDescriptor::SizeOf(map, object);
const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
if (!ShouldVisit(object)) return 0;
@@ -85,29 +94,44 @@ class ConcurrentMarkingVisitor final
return size;
}
- int VisitJSObjectFast(Map* map, JSObject* object) override {
+ int VisitJSObjectFast(Map* map, JSObject* object) {
return VisitJSObject(map, object);
}
- int VisitJSApiObject(Map* map, JSObject* object) override {
- return VisitJSObject(map, object);
+ int VisitJSApiObject(Map* map, JSObject* object) {
+ if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
+ int size = JSObject::BodyDescriptor::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ // It is OK to iterate body of JS API object here because they do not have
+ // unboxed double fields.
+ DCHECK(map->HasFastPointerLayout());
+ JSObject::BodyDescriptor::IterateBody(object, size, this);
+ // The main thread will do wrapper tracing in Blink.
+ bailout_.Push(object);
+ }
+ return 0;
}
// ===========================================================================
// Fixed array object ========================================================
// ===========================================================================
- int VisitFixedArray(Map* map, FixedArray* object) override {
- // TODO(ulan): implement iteration with prefetched length.
- return BaseClass::VisitFixedArray(map, object);
+ int VisitFixedArray(Map* map, FixedArray* object) {
+ int length = object->synchronized_length();
+ int size = FixedArray::SizeFor(length);
+ if (!ShouldVisit(object)) return 0;
+ VisitMapPointer(object, object->map_slot());
+ FixedArray::BodyDescriptor::IterateBody(object, size, this);
+ return size;
}
// ===========================================================================
// Code object ===============================================================
// ===========================================================================
- int VisitCode(Map* map, Code* object) override {
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ int VisitCode(Map* map, Code* object) {
+ bailout_.Push(object);
return 0;
}
@@ -115,58 +139,94 @@ class ConcurrentMarkingVisitor final
// Objects with weak fields and/or side-effectiful visitation.
// ===========================================================================
- int VisitBytecodeArray(Map* map, BytecodeArray* object) override {
- // TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ int VisitBytecodeArray(Map* map, BytecodeArray* object) {
+ if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
+ int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this);
+ // Aging of bytecode arrays is done on the main thread.
+ bailout_.Push(object);
+ }
return 0;
}
- int VisitJSFunction(Map* map, JSFunction* object) override {
- // TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
- return 0;
+ int VisitAllocationSite(Map* map, AllocationSite* object) {
+ if (!ShouldVisit(object)) return 0;
+ int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ AllocationSite::BodyDescriptorWeak::IterateBody(object, size, this);
+ return size;
+ }
+
+ int VisitJSFunction(Map* map, JSFunction* object) {
+ if (!ShouldVisit(object)) return 0;
+ int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ JSFunction::BodyDescriptorWeak::IterateBody(object, size, this);
+ return size;
}
- int VisitMap(Map* map, Map* object) override {
+ int VisitMap(Map* map, Map* object) {
// TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ bailout_.Push(object);
return 0;
}
- int VisitNativeContext(Map* map, Context* object) override {
- // TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ int VisitNativeContext(Map* map, Context* object) {
+ if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
+ int size = Context::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ Context::BodyDescriptorWeak::IterateBody(object, size, this);
+ // TODO(ulan): implement proper weakness for normalized map cache
+ // and remove this bailout.
+ bailout_.Push(object);
+ }
return 0;
}
- int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) override {
- // TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) {
+ if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
+ int size = SharedFunctionInfo::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ SharedFunctionInfo::BodyDescriptorWeak::IterateBody(object, size, this);
+ // Resetting of IC age counter is done on the main thread.
+ bailout_.Push(object);
+ }
return 0;
}
- int VisitTransitionArray(Map* map, TransitionArray* object) override {
+ int VisitTransitionArray(Map* map, TransitionArray* object) {
// TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ bailout_.Push(object);
return 0;
}
- int VisitWeakCell(Map* map, WeakCell* object) override {
+ int VisitWeakCell(Map* map, WeakCell* object) {
// TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ bailout_.Push(object);
return 0;
}
- int VisitJSWeakCollection(Map* map, JSWeakCollection* object) override {
+ int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
// TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ bailout_.Push(object);
return 0;
}
void MarkObject(HeapObject* object) {
- if (ObjectMarking::WhiteToGrey<MarkBit::AccessMode::ATOMIC>(
- object, marking_state(object))) {
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kShared);
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race
+ // in mark-bit initialization. See MemoryChunk::Initialize for the
+ // corresponding release store.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ CHECK_NOT_NULL(chunk->synchronized_heap());
+#endif
+ if (ObjectMarking::WhiteToGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
+ shared_.Push(object);
}
}
@@ -183,7 +243,7 @@ class ConcurrentMarkingVisitor final
Object** end) override {
for (Object** p = start; p < end; p++) {
Object* object = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<const base::AtomicWord*>(p)));
+ base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
slot_snapshot_->add(p, object);
}
}
@@ -206,82 +266,145 @@ class ConcurrentMarkingVisitor final
return MarkingState::Internal(object);
}
- ConcurrentMarkingDeque* deque_;
+ ConcurrentMarking::MarkingWorklist::View shared_;
+ ConcurrentMarking::MarkingWorklist::View bailout_;
SlotSnapshot slot_snapshot_;
};
class ConcurrentMarking::Task : public CancelableTask {
public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
- base::Semaphore* on_finish)
+ base::Mutex* lock, int task_id)
: CancelableTask(isolate),
concurrent_marking_(concurrent_marking),
- on_finish_(on_finish) {}
+ lock_(lock),
+ task_id_(task_id) {}
virtual ~Task() {}
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override {
- concurrent_marking_->Run();
- on_finish_->Signal();
+ concurrent_marking_->Run(task_id_, lock_);
}
ConcurrentMarking* concurrent_marking_;
- base::Semaphore* on_finish_;
+ base::Mutex* lock_;
+ int task_id_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
-ConcurrentMarking::ConcurrentMarking(Heap* heap, ConcurrentMarkingDeque* deque)
+ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
+ MarkingWorklist* bailout)
: heap_(heap),
- pending_task_semaphore_(0),
- deque_(deque),
- visitor_(new ConcurrentMarkingVisitor(deque_)),
- is_task_pending_(false) {
- // The runtime flag should be set only if the compile time flag was set.
+ shared_(shared),
+ bailout_(bailout),
+ pending_task_count_(0) {
+// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
#endif
+ for (int i = 0; i <= kTasks; i++) {
+ is_pending_[i] = false;
+ }
}
-ConcurrentMarking::~ConcurrentMarking() { delete visitor_; }
-
-void ConcurrentMarking::Run() {
- double time_ms = heap_->MonotonicallyIncreasingTimeInMs();
+void ConcurrentMarking::Run(int task_id, base::Mutex* lock) {
+ ConcurrentMarkingVisitor visitor(shared_, bailout_, task_id);
+ double time_ms;
size_t bytes_marked = 0;
- base::Mutex* relocation_mutex = heap_->relocation_mutex();
+ if (FLAG_trace_concurrent_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Starting concurrent marking task %d\n", task_id);
+ }
{
TimedScope scope(&time_ms);
- HeapObject* object;
- while ((object = deque_->Pop(MarkingThread::kConcurrent)) != nullptr) {
- base::LockGuard<base::Mutex> guard(relocation_mutex);
- bytes_marked += visitor_->Visit(object);
+ while (true) {
+ base::LockGuard<base::Mutex> guard(lock);
+ HeapObject* object;
+ if (!shared_->Pop(task_id, &object)) break;
+ Address new_space_top = heap_->new_space()->original_top();
+ Address new_space_limit = heap_->new_space()->original_limit();
+ Address addr = object->address();
+ if (new_space_top <= addr && addr < new_space_limit) {
+ bailout_->Push(task_id, object);
+ } else {
+ Map* map = object->synchronized_map();
+ bytes_marked += visitor.Visit(map, object);
+ }
+ }
+ {
+ // Take the lock to synchronize with worklist update after
+ // young generation GC.
+ base::LockGuard<base::Mutex> guard(lock);
+ bailout_->FlushToGlobal(task_id);
+ }
+ {
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ is_pending_[task_id] = false;
+ --pending_task_count_;
+ pending_condition_.NotifyAll();
}
}
if (FLAG_trace_concurrent_marking) {
- heap_->isolate()->PrintWithTimestamp("concurrently marked %dKB in %.2fms\n",
- static_cast<int>(bytes_marked / KB),
- time_ms);
+ heap_->isolate()->PrintWithTimestamp(
+ "Task %d concurrently marked %dKB in %.2fms\n", task_id,
+ static_cast<int>(bytes_marked / KB), time_ms);
+ }
+}
+
+void ConcurrentMarking::ScheduleTasks() {
+ if (!FLAG_concurrent_marking) return;
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ if (pending_task_count_ < kTasks) {
+ // Task id 0 is for the main thread.
+ for (int i = 1; i <= kTasks; i++) {
+ if (!is_pending_[i]) {
+ if (FLAG_trace_concurrent_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Scheduling concurrent marking task %d\n", i);
+ }
+ is_pending_[i] = true;
+ ++pending_task_count_;
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new Task(heap_->isolate(), this, &task_lock_[i].lock, i),
+ v8::Platform::kShortRunningTask);
+ }
+ }
}
}
-void ConcurrentMarking::StartTask() {
+void ConcurrentMarking::RescheduleTasksIfNeeded() {
if (!FLAG_concurrent_marking) return;
- is_task_pending_ = true;
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new Task(heap_->isolate(), this, &pending_task_semaphore_),
- v8::Platform::kShortRunningTask);
+ {
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ if (pending_task_count_ > 0) return;
+ }
+ if (!shared_->IsGlobalPoolEmpty()) {
+ ScheduleTasks();
+ }
}
-void ConcurrentMarking::WaitForTaskToComplete() {
+void ConcurrentMarking::EnsureCompleted() {
if (!FLAG_concurrent_marking) return;
- pending_task_semaphore_.Wait();
- is_task_pending_ = false;
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ while (pending_task_count_ > 0) {
+ pending_condition_.Wait(&pending_lock_);
+ }
}
-void ConcurrentMarking::EnsureTaskCompleted() {
- if (IsTaskPending()) {
- WaitForTaskToComplete();
+ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
+ : concurrent_marking_(concurrent_marking) {
+ if (!FLAG_concurrent_marking) return;
+ for (int i = 1; i <= kTasks; i++) {
+ concurrent_marking_->task_lock_[i].lock.Lock();
+ }
+}
+
+ConcurrentMarking::PauseScope::~PauseScope() {
+ if (!FLAG_concurrent_marking) return;
+ for (int i = kTasks; i >= 1; i--) {
+ concurrent_marking_->task_lock_[i].lock.Unlock();
}
}
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 134fa38f64..5179fc812d 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -7,35 +7,54 @@
#include "src/allocation.h"
#include "src/cancelable-task.h"
+#include "src/heap/worklist.h"
#include "src/utils.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
-class ConcurrentMarkingDeque;
-class ConcurrentMarkingVisitor;
class Heap;
class Isolate;
class ConcurrentMarking {
public:
- ConcurrentMarking(Heap* heap, ConcurrentMarkingDeque* deque_);
- ~ConcurrentMarking();
+ // When the scope is entered, the concurrent marking tasks
+ // are paused and are not looking at the heap objects.
+ class PauseScope {
+ public:
+ explicit PauseScope(ConcurrentMarking* concurrent_marking);
+ ~PauseScope();
- void StartTask();
- void WaitForTaskToComplete();
- bool IsTaskPending() { return is_task_pending_; }
- void EnsureTaskCompleted();
+ private:
+ ConcurrentMarking* concurrent_marking_;
+ };
+
+ static const int kTasks = 4;
+ using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
+
+ ConcurrentMarking(Heap* heap, MarkingWorklist* shared_,
+ MarkingWorklist* bailout_);
+
+ void ScheduleTasks();
+ void EnsureCompleted();
+ void RescheduleTasksIfNeeded();
private:
+ struct TaskLock {
+ base::Mutex lock;
+ char cache_line_padding[64];
+ };
class Task;
- void Run();
+ void Run(int task_id, base::Mutex* lock);
Heap* heap_;
- base::Semaphore pending_task_semaphore_;
- ConcurrentMarkingDeque* deque_;
- ConcurrentMarkingVisitor* visitor_;
- bool is_task_pending_;
+ MarkingWorklist* shared_;
+ MarkingWorklist* bailout_;
+ TaskLock task_lock_[kTasks + 1];
+ base::Mutex pending_lock_;
+ base::ConditionVariable pending_condition_;
+ int pending_task_count_;
+ bool is_pending_[kTasks + 1];
};
} // namespace internal
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 2d11724181..1d20918ef3 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -13,7 +13,7 @@ void LocalEmbedderHeapTracer::TracePrologue() {
if (!InUse()) return;
CHECK(cached_wrappers_to_trace_.empty());
- num_v8_marking_deque_was_empty_ = 0;
+ num_v8_marking_worklist_was_empty_ = 0;
remote_tracer_->TracePrologue();
}
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 5e10d6e2e8..8146a1281c 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -19,7 +19,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
typedef std::pair<void*, void*> WrapperInfo;
LocalEmbedderHeapTracer()
- : remote_tracer_(nullptr), num_v8_marking_deque_was_empty_(0) {}
+ : remote_tracer_(nullptr), num_v8_marking_worklist_was_empty_(0) {}
void SetRemoteTracer(EmbedderHeapTracer* tracer) { remote_tracer_ = tracer; }
bool InUse() { return remote_tracer_ != nullptr; }
@@ -45,12 +45,14 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
// are too many of them.
bool RequiresImmediateWrapperProcessing();
- void NotifyV8MarkingDequeWasEmpty() { num_v8_marking_deque_was_empty_++; }
+ void NotifyV8MarkingWorklistWasEmpty() {
+ num_v8_marking_worklist_was_empty_++;
+ }
bool ShouldFinalizeIncrementalMarking() {
static const size_t kMaxIncrementalFixpointRounds = 3;
return !FLAG_incremental_marking_wrappers || !InUse() ||
NumberOfWrappersToTrace() == 0 ||
- num_v8_marking_deque_was_empty_ > kMaxIncrementalFixpointRounds;
+ num_v8_marking_worklist_was_empty_ > kMaxIncrementalFixpointRounds;
}
private:
@@ -58,7 +60,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
EmbedderHeapTracer* remote_tracer_;
WrapperCache cached_wrappers_to_trace_;
- size_t num_v8_marking_deque_was_empty_;
+ size_t num_v8_marking_worklist_was_empty_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 905514c4bf..6142a0c8e4 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -73,9 +73,11 @@ double GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
}
bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
- int contexts_disposed, double contexts_disposal_rate) {
+ int contexts_disposed, double contexts_disposal_rate,
+ size_t size_of_objects) {
return contexts_disposed > 0 && contexts_disposal_rate > 0 &&
- contexts_disposal_rate < kHighContextDisposalRate;
+ contexts_disposal_rate < kHighContextDisposalRate &&
+ size_of_objects <= kMaxHeapSizeForContextDisposalMarkCompact;
}
bool GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
@@ -123,9 +125,9 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
GCIdleTimeHeapState heap_state) {
if (static_cast<int>(idle_time_in_ms) <= 0) {
if (heap_state.incremental_marking_stopped) {
- if (ShouldDoContextDisposalMarkCompact(
- heap_state.contexts_disposed,
- heap_state.contexts_disposal_rate)) {
+ if (ShouldDoContextDisposalMarkCompact(heap_state.contexts_disposed,
+ heap_state.contexts_disposal_rate,
+ heap_state.size_of_objects)) {
return GCIdleTimeAction::FullGC();
}
}
@@ -135,7 +137,8 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
// We are in a context disposal GC scenario. Don't do anything if we do not
// get the right idle signal.
if (ShouldDoContextDisposalMarkCompact(heap_state.contexts_disposed,
- heap_state.contexts_disposal_rate)) {
+ heap_state.contexts_disposal_rate,
+ heap_state.size_of_objects)) {
return NothingOrDone(idle_time_in_ms);
}
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index b730a7bbba..722710e11a 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -107,6 +107,8 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
// considered low
static const size_t kLowAllocationThroughput = 1000;
+ static const size_t kMaxHeapSizeForContextDisposalMarkCompact = 100 * MB;
+
// If contexts are disposed at a higher rate a full gc is triggered.
static const double kHighContextDisposalRate;
@@ -136,7 +138,8 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
size_t size_of_objects, double mark_compact_speed_in_bytes_per_ms);
static bool ShouldDoContextDisposalMarkCompact(int context_disposed,
- double contexts_disposal_rate);
+ double contexts_disposal_rate,
+ size_t size_of_objects);
static bool ShouldDoFinalIncrementalMarkCompact(
double idle_time_in_ms, size_t size_of_objects,
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 46d5bb66ee..d675492a3a 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -464,7 +464,6 @@ void GCTracer::PrintNVP() const {
"old_new=%.2f "
"weak=%.2f "
"roots=%.2f "
- "code=%.2f "
"semispace=%.2f "
"steps_count=%d "
"steps_took=%.1f "
@@ -504,7 +503,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
current_.scopes[Scope::SCAVENGER_WEAK],
current_.scopes[Scope::SCAVENGER_ROOTS],
- current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
current_.scopes[Scope::SCAVENGER_SEMISPACE],
current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
.steps,
@@ -530,7 +528,6 @@ void GCTracer::PrintNVP() const {
"minor_mc=%.2f "
"finish_sweeping=%.2f "
"mark=%.2f "
- "mark.identify_global_handles=%.2f "
"mark.seed=%.2f "
"mark.roots=%.2f "
"mark.weak=%.2f "
@@ -541,17 +538,14 @@ void GCTracer::PrintNVP() const {
"evacuate=%.2f "
"evacuate.copy=%.2f "
"evacuate.update_pointers=%.2f "
- "evacuate.update_pointers.to_new=%.2f "
- "evacuate.update_pointers.to_new.tospace=%.2f "
- "evacuate.update_pointers.to_new.roots=%.2f "
- "evacuate.update_pointers.to_new.old=%.2f "
+ "evacuate.update_pointers.to_new_roots=%.2f "
+ "evacuate.update_pointers.slots=%.2f "
"update_marking_deque=%.2f "
"reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
current_.scopes[Scope::MINOR_MC],
current_.scopes[Scope::MINOR_MC_SWEEPING],
current_.scopes[Scope::MINOR_MC_MARK],
- current_.scopes[Scope::MINOR_MC_MARK_IDENTIFY_GLOBAL_HANDLES],
current_.scopes[Scope::MINOR_MC_MARK_SEED],
current_.scopes[Scope::MINOR_MC_MARK_ROOTS],
current_.scopes[Scope::MINOR_MC_MARK_WEAK],
@@ -562,12 +556,9 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MINOR_MC_EVACUATE],
current_.scopes[Scope::MINOR_MC_EVACUATE_COPY],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS],
- current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
- current_
- .scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE],
current_
.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
- current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD],
+ current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS],
current_.scopes[Scope::MINOR_MC_MARKING_DEQUE],
current_.scopes[Scope::MINOR_MC_RESET_LIVENESS]);
break;
@@ -585,7 +576,6 @@ void GCTracer::PrintNVP() const {
"heap.external.epilogue=%.1f "
"heap.external.weak_global_handles=%.1f "
"clear=%1.f "
- "clear.code_flush=%.1f "
"clear.dependent_code=%.1f "
"clear.maps=%.1f "
"clear.slots_buffer=%.1f "
@@ -603,13 +593,12 @@ void GCTracer::PrintNVP() const {
"evacuate.epilogue=%.1f "
"evacuate.rebalance=%.1f "
"evacuate.update_pointers=%.1f "
- "evacuate.update_pointers.to_evacuated=%.1f "
- "evacuate.update_pointers.to_new=%.1f "
+ "evacuate.update_pointers.to_new_roots=%.1f "
+ "evacuate.update_pointers.slots=%.1f "
"evacuate.update_pointers.weak=%.1f "
"finish=%.1f "
"mark=%.1f "
"mark.finish_incremental=%.1f "
- "mark.prepare_code_flush=%.1f "
"mark.roots=%.1f "
"mark.weak_closure=%.1f "
"mark.weak_closure.ephemeral=%.1f "
@@ -671,7 +660,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::HEAP_EXTERNAL_EPILOGUE],
current_.scopes[Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.scopes[Scope::MC_CLEAR],
- current_.scopes[Scope::MC_CLEAR_CODE_FLUSH],
current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
current_.scopes[Scope::MC_CLEAR_MAPS],
current_.scopes[Scope::MC_CLEAR_SLOTS_BUFFER],
@@ -689,12 +677,11 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE_EPILOGUE],
current_.scopes[Scope::MC_EVACUATE_REBALANCE],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
- current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
- current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
- current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
current_.scopes[Scope::MC_MARK_ROOTS],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL],
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 96b21c6712..6e3e875b94 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -10,6 +10,7 @@
#include "src/base/ring-buffer.h"
#include "src/counters.h"
#include "src/globals.h"
+#include "src/heap/heap.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
@@ -34,85 +35,79 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
-#define TRACER_SCOPES(F) \
- INCREMENTAL_SCOPES(F) \
- F(HEAP_EPILOGUE) \
- F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
- F(HEAP_EXTERNAL_EPILOGUE) \
- F(HEAP_EXTERNAL_PROLOGUE) \
- F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
- F(HEAP_PROLOGUE) \
- F(MC_CLEAR) \
- F(MC_CLEAR_CODE_FLUSH) \
- F(MC_CLEAR_DEPENDENT_CODE) \
- F(MC_CLEAR_MAPS) \
- F(MC_CLEAR_SLOTS_BUFFER) \
- F(MC_CLEAR_STORE_BUFFER) \
- F(MC_CLEAR_STRING_TABLE) \
- F(MC_CLEAR_WEAK_CELLS) \
- F(MC_CLEAR_WEAK_COLLECTIONS) \
- F(MC_CLEAR_WEAK_LISTS) \
- F(MC_EPILOGUE) \
- F(MC_EVACUATE) \
- F(MC_EVACUATE_CANDIDATES) \
- F(MC_EVACUATE_CLEAN_UP) \
- F(MC_EVACUATE_COPY) \
- F(MC_EVACUATE_EPILOGUE) \
- F(MC_EVACUATE_PROLOGUE) \
- F(MC_EVACUATE_REBALANCE) \
- F(MC_EVACUATE_UPDATE_POINTERS) \
- F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
- F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
- F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
- F(MC_FINISH) \
- F(MC_MARK) \
- F(MC_MARK_FINISH_INCREMENTAL) \
- F(MC_MARK_PREPARE_CODE_FLUSH) \
- F(MC_MARK_ROOTS) \
- F(MC_MARK_WEAK_CLOSURE) \
- F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
- F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
- F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
- F(MC_MARK_WEAK_CLOSURE_HARMONY) \
- F(MC_MARK_WRAPPER_EPILOGUE) \
- F(MC_MARK_WRAPPER_PROLOGUE) \
- F(MC_MARK_WRAPPER_TRACING) \
- F(MC_PROLOGUE) \
- F(MC_SWEEP) \
- F(MC_SWEEP_CODE) \
- F(MC_SWEEP_MAP) \
- F(MC_SWEEP_OLD) \
- F(MINOR_MC) \
- F(MINOR_MC_CLEAR) \
- F(MINOR_MC_CLEAR_STRING_TABLE) \
- F(MINOR_MC_CLEAR_WEAK_LISTS) \
- F(MINOR_MC_EVACUATE) \
- F(MINOR_MC_EVACUATE_CLEAN_UP) \
- F(MINOR_MC_EVACUATE_COPY) \
- F(MINOR_MC_EVACUATE_EPILOGUE) \
- F(MINOR_MC_EVACUATE_PROLOGUE) \
- F(MINOR_MC_EVACUATE_REBALANCE) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
- F(MINOR_MC_MARK) \
- F(MINOR_MC_MARK_GLOBAL_HANDLES) \
- F(MINOR_MC_MARK_IDENTIFY_GLOBAL_HANDLES) \
- F(MINOR_MC_MARK_SEED) \
- F(MINOR_MC_MARK_ROOTS) \
- F(MINOR_MC_MARK_WEAK) \
- F(MINOR_MC_MARKING_DEQUE) \
- F(MINOR_MC_RESET_LIVENESS) \
- F(MINOR_MC_SWEEPING) \
- F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
- F(SCAVENGER_EVACUATE) \
- F(SCAVENGER_OLD_TO_NEW_POINTERS) \
- F(SCAVENGER_ROOTS) \
- F(SCAVENGER_SCAVENGE) \
- F(SCAVENGER_SEMISPACE) \
+#define TRACER_SCOPES(F) \
+ INCREMENTAL_SCOPES(F) \
+ F(HEAP_EPILOGUE) \
+ F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
+ F(HEAP_EXTERNAL_EPILOGUE) \
+ F(HEAP_EXTERNAL_PROLOGUE) \
+ F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
+ F(HEAP_PROLOGUE) \
+ F(MC_CLEAR) \
+ F(MC_CLEAR_DEPENDENT_CODE) \
+ F(MC_CLEAR_MAPS) \
+ F(MC_CLEAR_SLOTS_BUFFER) \
+ F(MC_CLEAR_STORE_BUFFER) \
+ F(MC_CLEAR_STRING_TABLE) \
+ F(MC_CLEAR_WEAK_CELLS) \
+ F(MC_CLEAR_WEAK_COLLECTIONS) \
+ F(MC_CLEAR_WEAK_LISTS) \
+ F(MC_EPILOGUE) \
+ F(MC_EVACUATE) \
+ F(MC_EVACUATE_CANDIDATES) \
+ F(MC_EVACUATE_CLEAN_UP) \
+ F(MC_EVACUATE_COPY) \
+ F(MC_EVACUATE_EPILOGUE) \
+ F(MC_EVACUATE_PROLOGUE) \
+ F(MC_EVACUATE_REBALANCE) \
+ F(MC_EVACUATE_UPDATE_POINTERS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
+ F(MC_FINISH) \
+ F(MC_MARK) \
+ F(MC_MARK_FINISH_INCREMENTAL) \
+ F(MC_MARK_ROOTS) \
+ F(MC_MARK_WEAK_CLOSURE) \
+ F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
+ F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
+ F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
+ F(MC_MARK_WEAK_CLOSURE_HARMONY) \
+ F(MC_MARK_WRAPPER_EPILOGUE) \
+ F(MC_MARK_WRAPPER_PROLOGUE) \
+ F(MC_MARK_WRAPPER_TRACING) \
+ F(MC_PROLOGUE) \
+ F(MC_SWEEP) \
+ F(MC_SWEEP_CODE) \
+ F(MC_SWEEP_MAP) \
+ F(MC_SWEEP_OLD) \
+ F(MINOR_MC) \
+ F(MINOR_MC_CLEAR) \
+ F(MINOR_MC_CLEAR_STRING_TABLE) \
+ F(MINOR_MC_CLEAR_WEAK_LISTS) \
+ F(MINOR_MC_EVACUATE) \
+ F(MINOR_MC_EVACUATE_CLEAN_UP) \
+ F(MINOR_MC_EVACUATE_COPY) \
+ F(MINOR_MC_EVACUATE_EPILOGUE) \
+ F(MINOR_MC_EVACUATE_PROLOGUE) \
+ F(MINOR_MC_EVACUATE_REBALANCE) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
+ F(MINOR_MC_MARK) \
+ F(MINOR_MC_MARK_GLOBAL_HANDLES) \
+ F(MINOR_MC_MARK_SEED) \
+ F(MINOR_MC_MARK_ROOTS) \
+ F(MINOR_MC_MARK_WEAK) \
+ F(MINOR_MC_MARKING_DEQUE) \
+ F(MINOR_MC_RESET_LIVENESS) \
+ F(MINOR_MC_SWEEPING) \
+ F(SCAVENGER_EVACUATE) \
+ F(SCAVENGER_OLD_TO_NEW_POINTERS) \
+ F(SCAVENGER_ROOTS) \
+ F(SCAVENGER_SCAVENGE) \
+ F(SCAVENGER_SEMISPACE) \
F(SCAVENGER_WEAK)
#define TRACE_GC(tracer, scope_id) \
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 87aac8731d..33e31b02b8 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -23,6 +23,8 @@
#include "src/msan.h"
#include "src/objects-inl.h"
#include "src/objects/scope-info.h"
+#include "src/objects/script-inl.h"
+#include "src/profiler/heap-profiler.h"
#include "src/string-hasher.h"
namespace v8 {
@@ -30,7 +32,7 @@ namespace internal {
AllocationSpace AllocationResult::RetrySpace() {
DCHECK(IsRetry());
- return static_cast<AllocationSpace>(Smi::cast(object_)->value());
+ return static_cast<AllocationSpace>(Smi::ToInt(object_));
}
HeapObject* AllocationResult::ToObjectChecked() {
@@ -38,83 +40,6 @@ HeapObject* AllocationResult::ToObjectChecked() {
return HeapObject::cast(object_);
}
-void PromotionQueue::insert(HeapObject* target, int32_t size) {
- if (emergency_stack_ != NULL) {
- emergency_stack_->Add(Entry(target, size));
- return;
- }
-
- if ((rear_ - 1) < limit_) {
- RelocateQueueHead();
- emergency_stack_->Add(Entry(target, size));
- return;
- }
-
- struct Entry* entry = reinterpret_cast<struct Entry*>(--rear_);
- entry->obj_ = target;
- entry->size_ = size;
-
-// Assert no overflow into live objects.
-#ifdef DEBUG
- SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
- reinterpret_cast<Address>(rear_));
-#endif
-}
-
-void PromotionQueue::remove(HeapObject** target, int32_t* size) {
- DCHECK(!is_empty());
- if (front_ == rear_) {
- Entry e = emergency_stack_->RemoveLast();
- *target = e.obj_;
- *size = e.size_;
- return;
- }
-
- struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
- *target = entry->obj_;
- *size = entry->size_;
-
- // Assert no underflow.
- SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
- reinterpret_cast<Address>(front_));
-}
-
-Page* PromotionQueue::GetHeadPage() {
- return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
-}
-
-void PromotionQueue::SetNewLimit(Address limit) {
- // If we are already using an emergency stack, we can ignore it.
- if (emergency_stack_) return;
-
- // If the limit is not on the same page, we can ignore it.
- if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
-
- limit_ = reinterpret_cast<struct Entry*>(limit);
-
- if (limit_ <= rear_) {
- return;
- }
-
- RelocateQueueHead();
-}
-
-bool PromotionQueue::IsBelowPromotionQueue(Address to_space_top) {
- // If an emergency stack is used, the to-space address cannot interfere
- // with the promotion queue.
- if (emergency_stack_) return true;
-
- // If the given to-space top pointer and the head of the promotion queue
- // are not on the same page, then the to-space objects are below the
- // promotion queue.
- if (GetHeadPage() != Page::FromAddress(to_space_top)) {
- return true;
- }
- // If the to space top pointer is smaller or equal than the promotion
- // queue head, then the to-space objects are below the promotion queue.
- return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
-}
-
#define ROOT_ACCESSOR(type, name, camel_name) \
type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
ROOT_LIST(ROOT_ACCESSOR)
@@ -292,6 +217,9 @@ AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
return CopyFixedDoubleArrayWithMap(src, src->map());
}
+AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
+ return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
+}
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
AllocationAlignment alignment) {
@@ -367,7 +295,7 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
UpdateAllocationsHash(size_in_bytes);
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
- PrintAlloctionsHash();
+ PrintAllocationsHash();
}
}
@@ -402,7 +330,7 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
UpdateAllocationsHash(size_in_bytes);
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
- PrintAlloctionsHash();
+ PrintAllocationsHash();
}
}
}
@@ -491,7 +419,7 @@ bool Heap::InOldSpaceSlow(Address address) {
return old_space_->ContainsSlow(address);
}
-bool Heap::ShouldBePromoted(Address old_address, int object_size) {
+bool Heap::ShouldBePromoted(Address old_address) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
@@ -524,48 +452,15 @@ Address* Heap::store_buffer_top_address() {
return store_buffer()->top_address();
}
-bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
- // Object migration is governed by the following rules:
- //
- // 1) Objects in new-space can be migrated to the old space
- // that matches their target space or they stay in new-space.
- // 2) Objects in old-space stay in the same space when migrating.
- // 3) Fillers (two or more words) can migrate due to left-trimming of
- // fixed arrays in new-space or old space.
- // 4) Fillers (one word) can never migrate, they are skipped by
- // incremental marking explicitly to prevent invalid pattern.
- //
- // Since this function is used for debugging only, we do not place
- // asserts here, but check everything explicitly.
- if (obj->map() == one_pointer_filler_map()) return false;
- InstanceType type = obj->map()->instance_type();
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- AllocationSpace src = chunk->owner()->identity();
- switch (src) {
- case NEW_SPACE:
- return dst == src || dst == OLD_SPACE;
- case OLD_SPACE:
- return dst == src &&
- (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
- case CODE_SPACE:
- return dst == src && type == CODE_TYPE;
- case MAP_SPACE:
- case LO_SPACE:
- return false;
- }
- UNREACHABLE();
- return false;
-}
-
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
static_cast<size_t>(byte_size / kPointerSize));
}
template <Heap::FindMementoMode mode>
-AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
+AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
Address object_address = object->address();
- Address memento_address = object_address + object->Size();
+ Address memento_address = object_address + object->SizeFromMap(map);
Address last_memento_word_address = memento_address + kPointerSize;
// If the memento would be on another page, bail out immediately.
if (!Page::OnSamePage(object_address, last_memento_word_address)) {
@@ -621,11 +516,10 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
UNREACHABLE();
}
UNREACHABLE();
- return nullptr;
}
template <Heap::UpdateAllocationSiteMode mode>
-void Heap::UpdateAllocationSite(HeapObject* object,
+void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
base::HashMap* pretenuring_feedback) {
DCHECK(InFromSpace(object) ||
(InToSpace(object) &&
@@ -635,9 +529,10 @@ void Heap::UpdateAllocationSite(HeapObject* object,
Page::FromAddress(object->address())
->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
if (!FLAG_allocation_site_pretenuring ||
- !AllocationSite::CanTrack(object->map()->instance_type()))
+ !AllocationSite::CanTrack(map->instance_type()))
return;
- AllocationMemento* memento_candidate = FindAllocationMemento<kForGC>(object);
+ AllocationMemento* memento_candidate =
+ FindAllocationMemento<kForGC>(map, object);
if (memento_candidate == nullptr) return;
if (mode == kGlobal) {
@@ -673,15 +568,6 @@ void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
}
-bool Heap::CollectGarbage(AllocationSpace space,
- GarbageCollectionReason gc_reason,
- const v8::GCCallbackFlags callbackFlags) {
- const char* collector_reason = NULL;
- GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
- return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
-}
-
-
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -
@@ -754,19 +640,10 @@ void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
#endif
}
-void Heap::ClearInstanceofCache() { set_instanceof_cache_function(Smi::kZero); }
-
Oddball* Heap::ToBoolean(bool condition) {
return condition ? true_value() : false_value();
}
-
-void Heap::CompletelyClearInstanceofCache() {
- set_instanceof_cache_map(Smi::kZero);
- set_instanceof_cache_function(Smi::kZero);
-}
-
-
uint32_t Heap::HashSeed() {
uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
DCHECK(FLAG_randomize_hashes || seed == 0);
@@ -858,34 +735,6 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_count_.Decrement(1);
}
-void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
- Object** end) {
- VerifyPointers(start, end);
-}
-
-void VerifyPointersVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
- VerifyPointers(start, end);
-}
-
-void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK(object->GetIsolate()->heap()->Contains(object));
- CHECK(object->map()->IsMap());
- } else {
- CHECK((*current)->IsSmi());
- }
- }
-}
-
-void VerifySmisVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
- for (Object** current = start; current < end; current++) {
- CHECK((*current)->IsSmi());
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index fa47dc825b..20b20024c3 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -4,6 +4,9 @@
#include "src/heap/heap.h"
+#include <unordered_map>
+#include <unordered_set>
+
#include "src/accessors.h"
#include "src/api.h"
#include "src/assembler-inl.h"
@@ -38,6 +41,8 @@
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
#include "src/interpreter/interpreter.h"
+#include "src/objects/object-macros.h"
+#include "src/objects/shared-function-info.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
@@ -52,7 +57,6 @@
namespace v8 {
namespace internal {
-
struct Heap::StrongRootsList {
Object** start;
Object** end;
@@ -81,7 +85,7 @@ Heap::Heap()
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
- initial_semispace_size_(MB),
+ initial_semispace_size_(kMinSemiSpaceSizeInKB * KB),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_max_old_generation_size_(max_old_generation_size_),
initial_old_generation_size_(max_old_generation_size_ /
@@ -112,6 +116,7 @@ Heap::Heap()
raw_allocations_hash_(0),
ms_count_(0),
gc_count_(0),
+ mmap_region_base_(0),
remembered_unmapped_pages_index_(0),
#ifdef DEBUG
allocation_timeout_(0),
@@ -130,7 +135,6 @@ Heap::Heap()
maximum_size_scavenges_(0),
last_idle_notification_time_(0.0),
last_gc_time_(0.0),
- scavenge_collector_(nullptr),
mark_compact_collector_(nullptr),
minor_mark_compact_collector_(nullptr),
memory_allocator_(nullptr),
@@ -146,11 +150,9 @@ Heap::Heap()
new_space_allocation_counter_(0),
old_generation_allocation_counter_at_last_gc_(0),
old_generation_size_at_last_gc_(0),
- gcs_since_last_deopt_(0),
global_pretenuring_feedback_(nullptr),
ring_buffer_full_(false),
ring_buffer_end_(0),
- promotion_queue_(this),
configured_(false),
current_gc_flags_(Heap::kNoGCFlags),
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
@@ -161,17 +163,9 @@ Heap::Heap()
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false),
- use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) {
-// Allow build-time customization of the max semispace size. Building
-// V8 with snapshots and a non-default max semispace size is much
-// easier if you can define it as part of the build environment.
-#if defined(V8_MAX_SEMISPACE_SIZE)
- max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
-#endif
-
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
@@ -378,6 +372,8 @@ void Heap::PrintShortHeapStatistics() {
this->CommittedMemory() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
external_memory_ / KB);
+ PrintIsolate(isolate_, "External memory global %zu KB\n",
+ external_memory_callback_() / KB);
PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
total_gc_time_ms_);
}
@@ -556,6 +552,65 @@ class Heap::PretenuringScope {
Heap* heap_;
};
+namespace {
+inline bool MakePretenureDecision(
+ AllocationSite* site, AllocationSite::PretenureDecision current_decision,
+ double ratio, bool maximum_size_scavenge) {
+ // Here we just allow state transitions from undecided or maybe tenure
+ // to don't tenure, maybe tenure, or tenure.
+ if ((current_decision == AllocationSite::kUndecided ||
+ current_decision == AllocationSite::kMaybeTenure)) {
+ if (ratio >= AllocationSite::kPretenureRatio) {
+ // We just transition into tenure state when the semi-space was at
+ // maximum capacity.
+ if (maximum_size_scavenge) {
+ site->set_deopt_dependent_code(true);
+ site->set_pretenure_decision(AllocationSite::kTenure);
+ // Currently we just need to deopt when we make a state transition to
+ // tenure.
+ return true;
+ }
+ site->set_pretenure_decision(AllocationSite::kMaybeTenure);
+ } else {
+ site->set_pretenure_decision(AllocationSite::kDontTenure);
+ }
+ }
+ return false;
+}
+
+inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
+ bool maximum_size_scavenge) {
+ bool deopt = false;
+ int create_count = site->memento_create_count();
+ int found_count = site->memento_found_count();
+ bool minimum_mementos_created =
+ create_count >= AllocationSite::kPretenureMinimumCreated;
+ double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
+ ? static_cast<double>(found_count) / create_count
+ : 0.0;
+ AllocationSite::PretenureDecision current_decision =
+ site->pretenure_decision();
+
+ if (minimum_mementos_created) {
+ deopt = MakePretenureDecision(site, current_decision, ratio,
+ maximum_size_scavenge);
+ }
+
+ if (FLAG_trace_pretenuring_statistics) {
+ PrintIsolate(isolate,
+ "pretenuring: AllocationSite(%p): (created, found, ratio) "
+ "(%d, %d, %f) %s => %s\n",
+ static_cast<void*>(site), create_count, found_count, ratio,
+ site->PretenureDecisionName(current_decision),
+ site->PretenureDecisionName(site->pretenure_decision()));
+ }
+
+ // Clear feedback calculation fields until the next gc.
+ site->set_memento_found_count(0);
+ site->set_memento_create_count(0);
+ return deopt;
+}
+} // namespace
void Heap::ProcessPretenuringFeedback() {
bool trigger_deoptimization = false;
@@ -582,7 +637,7 @@ void Heap::ProcessPretenuringFeedback() {
DCHECK(site->IsAllocationSite());
active_allocation_sites++;
allocation_mementos_found += found_count;
- if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
+ if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
trigger_deoptimization = true;
}
if (site->GetPretenureMode() == TENURED) {
@@ -667,15 +722,6 @@ void Heap::GarbageCollectionEpilogue() {
if (FLAG_code_stats) ReportCodeStatistics("After GC");
if (FLAG_check_handle_count) CheckHandleCount();
#endif
- if (FLAG_deopt_every_n_garbage_collections > 0) {
- // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
- // the topmost optimized frame can be deoptimized safely, because it
- // might not have a lazy bailout point right after its current PC.
- if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
- Deoptimizer::DeoptimizeAll(isolate());
- gcs_since_last_deopt_ = 0;
- }
- }
UpdateMaximumCommitted();
@@ -770,7 +816,7 @@ void Heap::PreprocessStackTraces() {
// a stack trace that has already been preprocessed. Guard against this.
if (!maybe_code->IsAbstractCode()) break;
AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
- int offset = Smi::cast(elements->get(j + 3))->value();
+ int offset = Smi::ToInt(elements->get(j + 3));
int pos = abstract_code->SourcePosition(offset);
elements->set(j + 2, Smi::FromInt(pos));
}
@@ -911,7 +957,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
+ if (!CollectGarbage(OLD_SPACE, gc_reason,
v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
@@ -923,45 +969,47 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
void Heap::ReportExternalMemoryPressure() {
+ const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
+ static_cast<GCCallbackFlags>(
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing |
+ kGCCallbackFlagCollectAllExternalMemory);
if (external_memory_ >
(external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
CollectAllGarbage(
kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kExternalMemoryPressure,
static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
- kGCCallbackFlagCollectAllExternalMemory));
+ kGCCallbackFlagsForExternalMemory));
return;
}
if (incremental_marking()->IsStopped()) {
if (incremental_marking()->CanBeActivated()) {
- StartIncrementalMarking(
- i::Heap::kNoGCFlags, GarbageCollectionReason::kExternalMemoryPressure,
- static_cast<GCCallbackFlags>(
- kGCCallbackFlagSynchronousPhantomCallbackProcessing |
- kGCCallbackFlagCollectAllExternalMemory));
+ StartIncrementalMarking(i::Heap::kNoGCFlags,
+ GarbageCollectionReason::kExternalMemoryPressure,
+ kGCCallbackFlagsForExternalMemory);
} else {
CollectAllGarbage(i::Heap::kNoGCFlags,
GarbageCollectionReason::kExternalMemoryPressure,
- kGCCallbackFlagSynchronousPhantomCallbackProcessing);
+ kGCCallbackFlagsForExternalMemory);
}
} else {
// Incremental marking is turned on an has already been started.
- const double pressure =
- static_cast<double>(external_memory_ -
- external_memory_at_last_mark_compact_ -
- kExternalAllocationSoftLimit) /
- external_memory_hard_limit();
- DCHECK_GE(1, pressure);
- const double kMaxStepSizeOnExternalLimit = 25;
- const double deadline = MonotonicallyIncreasingTimeInMs() +
- pressure * kMaxStepSizeOnExternalLimit;
+ const double kMinStepSize = 5;
+ const double kMaxStepSize = 10;
+ const double ms_step =
+ Min(kMaxStepSize,
+ Max(kMinStepSize, static_cast<double>(external_memory_) /
+ external_memory_limit_ * kMinStepSize));
+ const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
+ // Extend the gc callback flags with external memory flags.
+ current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
+ current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
incremental_marking()->AdvanceIncrementalMarking(
deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
}
}
-
void Heap::EnsureFillerObjectAtTop() {
// There may be an allocation memento behind objects in new space. Upon
// evacuation of a non-full new space (or if we are on the last page) there
@@ -975,14 +1023,16 @@ void Heap::EnsureFillerObjectAtTop() {
}
}
-bool Heap::CollectGarbage(GarbageCollector collector,
+bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason,
- const char* collector_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate());
RuntimeCallTimerScope runtime_timer(isolate(), &RuntimeCallStats::GC);
+ const char* collector_reason = NULL;
+ GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
// allow at least a few allocations after a collection. The reason
@@ -1065,8 +1115,8 @@ bool Heap::CollectGarbage(GarbageCollector collector,
// causes another mark-compact.
if (IsYoungGenerationCollector(collector) &&
!ShouldAbortIncrementalMarking()) {
- StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
- kNoGCCallbackFlags);
+ StartIncrementalMarkingIfAllocationLimitIsReached(
+ kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
}
return next_gc_likely_to_collect_more;
@@ -1130,8 +1180,23 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
if (len == 0) return;
DCHECK(array->map() != fixed_cow_array_map());
- Object** dst_objects = array->data_start() + dst_index;
- MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
+ Object** dst = array->data_start() + dst_index;
+ Object** src = array->data_start() + src_index;
+ if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
+ if (dst < src) {
+ for (int i = 0; i < len; i++) {
+ base::AsAtomicWord::Relaxed_Store(
+ dst + i, base::AsAtomicWord::Relaxed_Load(src + i));
+ }
+ } else {
+ for (int i = len - 1; i >= 0; i--) {
+ base::AsAtomicWord::Relaxed_Store(
+ dst + i, base::AsAtomicWord::Relaxed_Load(src + i));
+ }
+ }
+ } else {
+ MemMove(dst, src, len * kPointerSize);
+ }
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
}
@@ -1524,7 +1589,7 @@ void Heap::MarkCompactEpilogue() {
PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped());
- mark_compact_collector()->marking_deque()->StopUsing();
+ mark_compact_collector()->marking_worklist()->StopUsing();
}
@@ -1537,8 +1602,6 @@ void Heap::MarkCompactPrologue() {
isolate_->compilation_cache()->MarkCompactPrologue();
- CompletelyClearInstanceofCache();
-
FlushNumberStringCache();
ClearNormalizedMapCaches();
}
@@ -1562,55 +1625,11 @@ void Heap::CheckNewSpaceExpansionCriteria() {
}
}
-
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
return heap->InNewSpace(*p) &&
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
-void PromotionQueue::Initialize() {
- // The last to-space page may be used for promotion queue. On promotion
- // conflict, we use the emergency stack.
- DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
- 0);
- front_ = rear_ =
- reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
- limit_ = reinterpret_cast<struct Entry*>(
- Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
- ->area_start());
- emergency_stack_ = NULL;
-}
-
-void PromotionQueue::Destroy() {
- DCHECK(is_empty());
- delete emergency_stack_;
- emergency_stack_ = NULL;
-}
-
-void PromotionQueue::RelocateQueueHead() {
- DCHECK(emergency_stack_ == NULL);
-
- Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
- struct Entry* head_start = rear_;
- struct Entry* head_end =
- Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
-
- int entries_count =
- static_cast<int>(head_end - head_start) / sizeof(struct Entry);
-
- emergency_stack_ = new List<Entry>(2 * entries_count);
-
- while (head_start != head_end) {
- struct Entry* entry = head_start++;
- // New space allocation in SemiSpaceCopyObject marked the region
- // overlapping with promotion queue as uninitialized.
- MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry));
- emergency_stack_->Add(*entry);
- }
- rear_ = head_end;
-}
-
-
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
@@ -1634,6 +1653,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
+ ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
DCHECK(CanExpandOldGeneration(new_space()->Size()));
@@ -1673,9 +1693,17 @@ void Heap::EvacuateYoungGeneration() {
SetGCState(NOT_IN_GC);
}
+static bool IsLogging(Isolate* isolate) {
+ return FLAG_verify_predictable || isolate->logger()->is_logging() ||
+ isolate->is_profiling() ||
+ (isolate->heap_profiler() != nullptr &&
+ isolate->heap_profiler()->is_tracking_object_moves());
+}
+
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
+ ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.
@@ -1698,37 +1726,26 @@ void Heap::Scavenge() {
// Used for updating survived_since_last_expansion_ at function end.
size_t survived_watermark = PromotedSpaceSizeOfObjects();
- scavenge_collector_->SelectScavengingVisitorsTable();
-
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_->Flip();
new_space_->ResetAllocationInfo();
- // We need to sweep newly copied objects which can be either in the
- // to space or promoted to the old generation. For to-space
- // objects, we treat the bottom of the to space as a queue. Newly
- // copied and unswept objects lie between a 'front' mark and the
- // allocation pointer.
- //
- // Promoted objects can go into various old-generation spaces, and
- // can be allocated internally in the spaces (from the free list).
- // We treat the top of the to space as a queue of addresses of
- // promoted objects. The addresses of newly promoted and unswept
- // objects lie between a 'front' mark and a 'rear' mark that is
- // updated as a side effect of promoting an object.
- //
- // There is guaranteed to be enough room at the top of the to space
- // for the addresses of promoted objects: every object promoted
- // frees up its size in bytes from the top of the new space, and
- // objects are at least one pointer in size.
- Address new_space_front = new_space_->ToSpaceStart();
- promotion_queue_.Initialize();
-
- RootScavengeVisitor root_scavenge_visitor(this);
+ const int kScavengerTasks = 1;
+ const int kMainThreadId = 0;
+ CopiedList copied_list(kScavengerTasks);
+ PromotionList promotion_list(kScavengerTasks);
+ Scavenger scavenger(this, IsLogging(isolate()),
+ incremental_marking()->IsMarking(), &copied_list,
+ &promotion_list, kMainThreadId);
+ RootScavengeVisitor root_scavenge_visitor(this, &scavenger);
isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &IsUnmodifiedHeapObject);
+ &JSObject::IsUnmodifiedApiObject);
+
+ std::vector<MemoryChunk*> pages;
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ this, [&pages](MemoryChunk* chunk) { pages.push_back(chunk); });
{
// Copy roots.
@@ -1739,23 +1756,29 @@ void Heap::Scavenge() {
{
// Copy objects reachable from the old generation.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
- RememberedSet<OLD_TO_NEW>::Iterate(
- this, SYNCHRONIZED, [this](Address addr) {
- return Scavenger::CheckAndScavengeObject(this, addr);
- });
-
- RememberedSet<OLD_TO_NEW>::IterateTyped(
- this, SYNCHRONIZED,
- [this](SlotType type, Address host_addr, Address addr) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate(), type, addr, [this](Object** addr) {
- // We expect that objects referenced by code are long living.
- // If we do not force promotion, then we need to clear
- // old_to_new slots in dead code objects after mark-compact.
- return Scavenger::CheckAndScavengeObject(
- this, reinterpret_cast<Address>(addr));
- });
- });
+
+ for (MemoryChunk* chunk : pages) {
+ base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk,
+ [this, &scavenger](Address addr) {
+ return scavenger.CheckAndScavengeObject(this, addr);
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ chunk,
+ [this, &scavenger](SlotType type, Address host_addr, Address addr) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate(), type, addr, [this, &scavenger](Object** addr) {
+ // We expect that objects referenced by code are long
+ // living. If we do not force promotion, then we need to
+ // clear old_to_new slots in dead code objects after
+ // mark-compact.
+ return scavenger.CheckAndScavengeObject(
+ this, reinterpret_cast<Address>(addr));
+ });
+ });
+ }
}
{
@@ -1764,19 +1787,8 @@ void Heap::Scavenge() {
}
{
- // Copy objects reachable from the code flushing candidates list.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
- MarkCompactCollector* collector = mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- collector->code_flusher()->VisitListHeads(&root_scavenge_visitor);
- collector->code_flusher()
- ->IteratePointersToFromSpace<StaticScavengeVisitor>();
- }
- }
-
- {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
- new_space_front = DoScavenge(new_space_front);
+ scavenger.Process();
}
isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
@@ -1784,25 +1796,27 @@ void Heap::Scavenge() {
isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
&root_scavenge_visitor);
- new_space_front = DoScavenge(new_space_front);
+ scavenger.Process();
+
+ scavenger.Finalize();
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
- promotion_queue_.Destroy();
-
- incremental_marking()->UpdateMarkingDequeAfterScavenge();
+ incremental_marking()->UpdateMarkingWorklistAfterScavenge();
ScavengeWeakObjectRetainer weak_object_retainer(this);
ProcessYoungWeakReferences(&weak_object_retainer);
- DCHECK(new_space_front == new_space_->top());
-
// Set age mark.
new_space_->set_age_mark(new_space_->top());
ArrayBufferTracker::FreeDeadInNewSpace(this);
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(this, [](MemoryChunk* chunk) {
+ RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
+ });
+
// Update how much has survived scavenge.
DCHECK_GE(PromotedSpaceSizeOfObjects(), survived_watermark);
IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
@@ -1992,49 +2006,6 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
external_string_table_.IterateAll(&external_string_table_visitor);
}
-Address Heap::DoScavenge(Address new_space_front) {
- do {
- SemiSpace::AssertValidRange(new_space_front, new_space_->top());
- // The addresses new_space_front and new_space_.top() define a
- // queue of unprocessed copied objects. Process them until the
- // queue is empty.
- while (new_space_front != new_space_->top()) {
- if (!Page::IsAlignedToPageSize(new_space_front)) {
- HeapObject* object = HeapObject::FromAddress(new_space_front);
- new_space_front +=
- StaticScavengeVisitor::IterateBody(object->map(), object);
- } else {
- new_space_front = Page::FromAllocationAreaAddress(new_space_front)
- ->next_page()
- ->area_start();
- }
- }
-
- // Promote and process all the to-be-promoted objects.
- {
- while (!promotion_queue()->is_empty()) {
- HeapObject* target;
- int32_t size;
- promotion_queue()->remove(&target, &size);
-
- // Promoted object might be already partially visited
- // during old space pointer iteration. Thus we search specifically
- // for pointers to from semispace instead of looking for pointers
- // to new space.
- DCHECK(!target->IsMap());
-
- IterateAndScavengePromotedObject(target, static_cast<int>(size));
- }
- }
-
- // Take another spin if there are now unswept objects in new space
- // (there are currently no more unswept promoted objects).
- } while (new_space_front != new_space_->top());
-
- return new_space_front;
-}
-
-
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
0); // NOLINT
STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
@@ -2122,29 +2093,28 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
- reinterpret_cast<Map*>(result)->set_map_after_allocation(
- reinterpret_cast<Map*>(root(kMetaMapRootIndex)), SKIP_WRITE_BARRIER);
- reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
- reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
+ Map* map = reinterpret_cast<Map*>(result);
+ map->set_map_after_allocation(reinterpret_cast<Map*>(root(kMetaMapRootIndex)),
+ SKIP_WRITE_BARRIER);
+ map->set_instance_type(instance_type);
+ map->set_instance_size(instance_size);
// Initialize to only containing tagged fields.
- reinterpret_cast<Map*>(result)->set_visitor_id(
- StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
if (FLAG_unbox_double_fields) {
- reinterpret_cast<Map*>(result)
- ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
- }
- reinterpret_cast<Map*>(result)->clear_unused();
- reinterpret_cast<Map*>(result)
- ->set_inobject_properties_or_constructor_function_index(0);
- reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
- reinterpret_cast<Map*>(result)->set_bit_field(0);
- reinterpret_cast<Map*>(result)->set_bit_field2(0);
+ map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ }
+ // GetVisitorId requires a properly initialized LayoutDescriptor.
+ map->set_visitor_id(Map::GetVisitorId(map));
+ map->clear_unused();
+ map->set_inobject_properties_or_constructor_function_index(0);
+ map->set_unused_property_fields(0);
+ map->set_bit_field(0);
+ map->set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true) |
Map::ConstructionCounter::encode(Map::kNoSlackTracking);
- reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
- reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::kZero);
- return result;
+ map->set_bit_field3(bit_field3);
+ map->set_weak_cell_cache(Smi::kZero);
+ return map;
}
@@ -2176,7 +2146,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
}
// Must be called only after |instance_type|, |instance_size| and
// |layout_descriptor| are set.
- map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
+ map->set_visitor_id(Map::GetVisitorId(map));
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
@@ -2270,7 +2240,7 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
- fixed_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
+ fixed_array_map()->set_elements_kind(HOLEY_ELEMENTS);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
@@ -2345,7 +2315,7 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
- fixed_cow_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
+ fixed_cow_array_map()->set_elements_kind(HOLEY_ELEMENTS);
DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
@@ -2396,10 +2366,13 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
- fixed_double_array_map()->set_elements_kind(FAST_HOLEY_DOUBLE_ELEMENTS);
+ fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
+ ALLOCATE_VARSIZE_MAP(PROPERTY_ARRAY_TYPE, property_array)
+ ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_MAP_TYPE, small_ordered_hash_map)
+ ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
@@ -2485,6 +2458,12 @@ bool Heap::CreateInitialMaps() {
set_empty_byte_array(byte_array);
}
+ {
+ PropertyArray* property_array;
+ if (!AllocatePropertyArray(0, TENURED).To(&property_array)) return false;
+ set_empty_property_array(property_array);
+ }
+
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
{ \
FixedTypedArrayBase* obj; \
@@ -2534,7 +2513,8 @@ AllocationResult Heap::AllocateCell(Object* value) {
return result;
}
-AllocationResult Heap::AllocatePropertyCell() {
+AllocationResult Heap::AllocatePropertyCell(Name* name) {
+ DCHECK(name->IsUniqueName());
int size = PropertyCell::kSize;
STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
@@ -2548,6 +2528,7 @@ AllocationResult Heap::AllocatePropertyCell() {
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
cell->set_property_details(PropertyDetails(Smi::kZero));
+ cell->set_name(name);
cell->set_value(the_hole_value());
return result;
}
@@ -2625,6 +2606,9 @@ void Heap::CreateFixedStubs() {
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope(isolate());
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate());
// Create stubs that should be there, so we don't unexpectedly have to
// create them if we need them during the creation of another stub.
@@ -2731,10 +2715,6 @@ void Heap::CreateInitialObjects() {
// expanding the dictionary during bootstrapping.
set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
- set_instanceof_cache_function(Smi::kZero);
- set_instanceof_cache_map(Smi::kZero);
- set_instanceof_cache_answer(Smi::kZero);
-
{
HandleScope scope(isolate());
#define SYMBOL_INIT(name) \
@@ -2768,14 +2748,14 @@ void Heap::CreateInitialObjects() {
#undef SYMBOL_INIT
}
- Handle<NameDictionary> empty_properties_dictionary =
- NameDictionary::NewEmpty(isolate(), TENURED);
- empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
- set_empty_properties_dictionary(*empty_properties_dictionary);
+ Handle<NameDictionary> empty_property_dictionary =
+ NameDictionary::New(isolate(), 1, TENURED, USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
+ set_empty_property_dictionary(*empty_property_dictionary);
- set_public_symbol_table(*empty_properties_dictionary);
- set_api_symbol_table(*empty_properties_dictionary);
- set_api_private_symbol_table(*empty_properties_dictionary);
+ set_public_symbol_table(*empty_property_dictionary);
+ set_api_symbol_table(*empty_property_dictionary);
+ set_api_private_symbol_table(*empty_property_dictionary);
set_number_string_cache(
*factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
@@ -2813,9 +2793,7 @@ void Heap::CreateInitialObjects() {
set_detached_contexts(empty_fixed_array());
set_retained_maps(ArrayList::cast(empty_fixed_array()));
- set_weak_object_to_code_table(
- *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
- TENURED));
+ set_weak_object_to_code_table(*WeakHashTable::New(isolate(), 16, TENURED));
set_weak_new_space_object_to_code_list(
ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
@@ -2826,7 +2804,9 @@ void Heap::CreateInitialObjects() {
set_script_list(Smi::kZero);
Handle<SeededNumberDictionary> slow_element_dictionary =
- SeededNumberDictionary::NewEmpty(isolate(), TENURED);
+ SeededNumberDictionary::New(isolate(), 1, TENURED,
+ USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
slow_element_dictionary->set_requires_slow_elements();
set_empty_slow_element_dictionary(*slow_element_dictionary);
@@ -2836,20 +2816,30 @@ void Heap::CreateInitialObjects() {
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
set_next_template_serial_number(Smi::kZero);
+ // Allocate the empty OrderedHashTable.
+ Handle<FixedArray> empty_ordered_hash_table =
+ factory->NewFixedArray(OrderedHashMap::kHashTableStartIndex, TENURED);
+ empty_ordered_hash_table->set_map_no_write_barrier(
+ *factory->ordered_hash_table_map());
+ for (int i = 0; i < empty_ordered_hash_table->length(); ++i) {
+ empty_ordered_hash_table->set(i, Smi::kZero);
+ }
+ set_empty_ordered_hash_table(*empty_ordered_hash_table);
+
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
script->set_type(Script::TYPE_NATIVE);
set_empty_script(*script);
- Handle<PropertyCell> cell = factory->NewPropertyCell();
+ Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_protector(*cell);
- cell = factory->NewPropertyCell();
+ cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(the_hole_value());
set_empty_property_cell(*cell);
- cell = factory->NewPropertyCell();
+ cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_iterator_protector(*cell);
@@ -2857,11 +2847,11 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
- Handle<Cell> species_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_species_protector(*species_cell);
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_species_protector(*cell);
- cell = factory->NewPropertyCell();
+ cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_string_length_protector(*cell);
@@ -2869,7 +2859,7 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_fast_array_iteration_protector(*fast_array_iteration_cell);
- cell = factory->NewPropertyCell();
+ cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
@@ -2929,9 +2919,6 @@ void Heap::CreateInitialObjects() {
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
switch (root_index) {
case kNumberStringCacheRootIndex:
- case kInstanceofCacheFunctionRootIndex:
- case kInstanceofCacheMapRootIndex:
- case kInstanceofCacheAnswerRootIndex:
case kCodeStubsRootIndex:
case kScriptListRootIndex:
case kMaterializedObjectsRootIndex:
@@ -2948,6 +2935,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kPublicSymbolTableRootIndex:
case kApiSymbolTableRootIndex:
case kApiPrivateSymbolTableRootIndex:
+ case kMessageListenersRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
@@ -2962,23 +2950,10 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
}
bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
- return !RootCanBeWrittenAfterInitialization(root_index) &&
- !InNewSpace(root(root_index));
-}
-
-bool Heap::IsUnmodifiedHeapObject(Object** p) {
- Object* object = *p;
- if (object->IsSmi()) return false;
- HeapObject* heap_object = HeapObject::cast(object);
- if (!object->IsJSObject()) return false;
- JSObject* js_object = JSObject::cast(object);
- if (!js_object->WasConstructedFromApiFunction()) return false;
- Object* maybe_constructor = js_object->map()->GetConstructor();
- if (!maybe_constructor->IsJSFunction()) return false;
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
- if (js_object->elements()->length() != 0) return false;
-
- return constructor->initial_map() == heap_object->map();
+ bool can_be = !RootCanBeWrittenAfterInitialization(root_index) &&
+ !InNewSpace(root(root_index));
+ DCHECK_IMPLIES(can_be, IsImmovable(HeapObject::cast(root(root_index))));
+ return can_be;
}
int Heap::FullSizeNumberStringCacheLength() {
@@ -3021,7 +2996,6 @@ Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
default:
UNREACHABLE();
- return kUndefinedValueRootIndex;
}
}
@@ -3037,12 +3011,10 @@ Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
#undef ELEMENT_KIND_TO_ROOT_INDEX
default:
UNREACHABLE();
- return kUndefinedValueRootIndex;
}
}
-
-FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
+FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
return FixedTypedArrayBase::cast(
roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
}
@@ -3060,6 +3032,45 @@ AllocationResult Heap::AllocateForeign(Address address,
return result;
}
+AllocationResult Heap::AllocateSmallOrderedHashSet(int capacity,
+ PretenureFlag pretenure) {
+ DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
+ CHECK_GE(SmallOrderedHashSet::kMaxCapacity, capacity);
+
+ int size = SmallOrderedHashSet::Size(capacity);
+ AllocationSpace space = SelectSpace(pretenure);
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, space);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_after_allocation(small_ordered_hash_set_map(),
+ SKIP_WRITE_BARRIER);
+ Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result));
+ table->Initialize(isolate(), capacity);
+ return result;
+}
+
+AllocationResult Heap::AllocateSmallOrderedHashMap(int capacity,
+ PretenureFlag pretenure) {
+ DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
+ CHECK_GE(SmallOrderedHashMap::kMaxCapacity, capacity);
+
+ int size = SmallOrderedHashMap::Size(capacity);
+ AllocationSpace space = SelectSpace(pretenure);
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, space);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_after_allocation(small_ordered_hash_map_map(),
+ SKIP_WRITE_BARRIER);
+ Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result));
+ table->Initialize(isolate(), capacity);
+ return result;
+}
AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > ByteArray::kMaxLength) {
@@ -3130,7 +3141,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
filler->set_map_after_allocation(
reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
SKIP_WRITE_BARRIER);
- FreeSpace::cast(filler)->nobarrier_set_size(size);
+ FreeSpace::cast(filler)->relaxed_write_size(size);
}
if (mode == ClearRecordedSlots::kYes) {
ClearRecordedSlotRange(addr, addr + size);
@@ -3172,9 +3183,14 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by) {
lo_space()->AdjustLiveBytes(by);
} else if (!in_heap_iterator() &&
!mark_compact_collector()->sweeping_in_progress() &&
- ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
+ ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
+ object, MarkingState::Internal(object))) {
DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
+#ifdef V8_CONCURRENT_MARKING
+ MarkingState::Internal(object).IncrementLiveBytes<AccessMode::ATOMIC>(by);
+#else
MarkingState::Internal(object).IncrementLiveBytes(by);
+#endif
}
}
@@ -3206,14 +3222,9 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
Address old_start = object->address();
Address new_start = old_start + bytes_to_trim;
- // Transfer the mark bits to their new location if the object is not within
- // a black area.
- if (!incremental_marking()->black_allocation() ||
- !Marking::IsBlack(ObjectMarking::MarkBitFrom(
- HeapObject::FromAddress(new_start),
- MarkingState::Internal(HeapObject::FromAddress(new_start))))) {
- incremental_marking()->TransferMark(this, object,
- HeapObject::FromAddress(new_start));
+ if (incremental_marking()->IsMarking()) {
+ incremental_marking()->NotifyLeftTrimming(
+ object, HeapObject::FromAddress(new_start));
}
// Technically in new space this write might be omitted (except for
@@ -3224,10 +3235,9 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
- Object** former_start = HeapObject::RawField(object, 0);
- int new_start_index = elements_to_trim * (element_size / kPointerSize);
- former_start[new_start_index] = map;
- former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
+ RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
+ RELAXED_WRITE_FIELD(object, bytes_to_trim + kPointerSize,
+ Smi::FromInt(len - elements_to_trim));
FixedArrayBase* new_object =
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
@@ -3294,7 +3304,8 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
if (incremental_marking()->black_allocation() &&
- ObjectMarking::IsBlackOrGrey(filler, MarkingState::Internal(filler))) {
+ ObjectMarking::IsBlackOrGrey<IncrementalMarking::kAtomicity>(
+ filler, MarkingState::Internal(filler))) {
Page* page = Page::FromAddress(new_end);
MarkingState::Internal(page).bitmap()->ClearRange(
page->AddressToMarkbitIndex(new_end),
@@ -3424,8 +3435,6 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
DCHECK(!memory_allocator()->code_range()->valid() ||
memory_allocator()->code_range()->contains(code->address()) ||
object_size <= code_space()->AreaSize());
- code->set_gc_metadata(Smi::kZero);
- code->set_ic_age(global_ic_age_);
return code;
}
@@ -3453,7 +3462,7 @@ AllocationResult Heap::CopyCode(Code* code) {
new_code->Relocate(new_addr - old_addr);
// We have to iterate over the object and process its pointers when black
// allocation is on.
- incremental_marking()->IterateBlackObject(new_code);
+ incremental_marking()->ProcessBlackAllocatedObject(new_code);
// Record all references to embedded objects in the new code object.
RecordWritesIntoCode(new_code);
return new_code;
@@ -3505,8 +3514,10 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
- // No need for write barrier since object is white and map is in old space.
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ // New space objects are allocated white.
+ WriteBarrierMode write_barrier_mode =
+ space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ result->set_map_after_allocation(map, write_barrier_mode);
if (allocation_site != NULL) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
@@ -3515,10 +3526,9 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
return result;
}
-
-void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
+void Heap::InitializeJSObjectFromMap(JSObject* obj, Object* properties,
Map* map) {
- obj->set_properties(properties);
+ obj->set_raw_properties_or_hash(properties);
obj->initialize_elements();
// TODO(1240798): Initialize the object's body using valid initial values
// according to the object's initial map. For example, if the map's
@@ -3614,6 +3624,10 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
map->instance_type() == JS_ERROR_TYPE ||
map->instance_type() == JS_ARRAY_TYPE ||
map->instance_type() == JS_API_OBJECT_TYPE ||
+ map->instance_type() == WASM_INSTANCE_TYPE ||
+ map->instance_type() == WASM_MEMORY_TYPE ||
+ map->instance_type() == WASM_MODULE_TYPE ||
+ map->instance_type() == WASM_TABLE_TYPE ||
map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
int object_size = map->instance_size();
@@ -3640,7 +3654,6 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
source->GetElementsKind());
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
- FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
if (elements->length() > 0) {
FixedArrayBase* elem = nullptr;
@@ -3648,7 +3661,7 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
AllocationResult allocation;
if (elements->map() == fixed_cow_array_map()) {
allocation = FixedArray::cast(elements);
- } else if (source->HasFastDoubleElements()) {
+ } else if (source->HasDoubleElements()) {
allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
} else {
allocation = CopyFixedArray(FixedArray::cast(elements));
@@ -3657,14 +3670,28 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
}
JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
}
+
// Update properties if necessary.
- if (properties->length() > 0) {
+ if (source->HasFastProperties()) {
+ if (source->property_array()->length() > 0) {
+ PropertyArray* properties = source->property_array();
+ PropertyArray* prop = nullptr;
+ {
+ // TODO(gsathya): Do not copy hash code.
+ AllocationResult allocation = CopyPropertyArray(properties);
+ if (!allocation.To(&prop)) return allocation;
+ }
+ JSObject::cast(clone)->set_raw_properties_or_hash(prop,
+ SKIP_WRITE_BARRIER);
+ }
+ } else {
+ FixedArray* properties = FixedArray::cast(source->property_dictionary());
FixedArray* prop = nullptr;
{
AllocationResult allocation = CopyFixedArray(properties);
if (!allocation.To(&prop)) return allocation;
}
- JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
+ JSObject::cast(clone)->set_raw_properties_or_hash(prop, SKIP_WRITE_BARRIER);
}
// Return the new clone.
return clone;
@@ -3877,9 +3904,9 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
return AllocateFixedTypedArray(0, array_type, false, TENURED);
}
-
-AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
- PretenureFlag pretenure) {
+template <typename T>
+AllocationResult Heap::CopyArrayAndGrow(T* src, int grow_by,
+ PretenureFlag pretenure) {
int old_len = src->length();
int new_len = old_len + grow_by;
DCHECK(new_len >= old_len);
@@ -3889,8 +3916,8 @@ AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
if (!allocation.To(&obj)) return allocation;
}
- obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
- FixedArray* result = FixedArray::cast(obj);
+ obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
+ T* result = T::cast(obj);
result->set_length(new_len);
// Copy the content.
@@ -3901,6 +3928,12 @@ AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
return result;
}
+template AllocationResult Heap::CopyArrayAndGrow(FixedArray* src, int grow_by,
+ PretenureFlag pretenure);
+template AllocationResult Heap::CopyArrayAndGrow(PropertyArray* src,
+ int grow_by,
+ PretenureFlag pretenure);
+
AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
PretenureFlag pretenure) {
if (new_len == 0) return empty_fixed_array();
@@ -3924,7 +3957,8 @@ AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
return result;
}
-AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
+template <typename T>
+AllocationResult Heap::CopyArrayWithMap(T* src, Map* map) {
int len = src->length();
HeapObject* obj = nullptr;
{
@@ -3933,14 +3967,14 @@ AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
}
obj->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- FixedArray* result = FixedArray::cast(obj);
+ T* result = T::cast(obj);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
// Eliminate the write barrier if possible.
if (mode == SKIP_WRITE_BARRIER) {
CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
- FixedArray::SizeFor(len) - kPointerSize);
+ T::SizeFor(len) - kPointerSize);
return obj;
}
@@ -3950,6 +3984,16 @@ AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
return result;
}
+template AllocationResult Heap::CopyArrayWithMap(FixedArray* src, Map* map);
+template AllocationResult Heap::CopyArrayWithMap(PropertyArray* src, Map* map);
+
+AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
+ return CopyArrayWithMap(src, map);
+}
+
+AllocationResult Heap::CopyPropertyArray(PropertyArray* src) {
+ return CopyArrayWithMap(src, property_array_map());
+}
AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
Map* map) {
@@ -4007,12 +4051,23 @@ AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
return array;
}
+AllocationResult Heap::AllocatePropertyArray(int length,
+ PretenureFlag pretenure) {
+ DCHECK(length >= 0);
+ DCHECK(!InNewSpace(undefined_value()));
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
+ if (!allocation.To(&result)) return allocation;
+ }
-AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
+ result->set_map_after_allocation(property_array_map(), SKIP_WRITE_BARRIER);
+ PropertyArray* array = PropertyArray::cast(result);
+ array->set_length(length);
+ MemsetPointer(array->data_start(), undefined_value(), length);
+ return result;
}
-
AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
@@ -4095,7 +4150,6 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
#undef MAKE_CASE
default:
UNREACHABLE();
- return exception();
}
int size = map->instance_size();
Struct* result = nullptr;
@@ -4195,7 +4249,7 @@ bool Heap::HasHighFragmentation(size_t used, size_t committed) {
bool Heap::ShouldOptimizeForMemoryUsage() {
return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
- HighMemoryPressure() || IsLowMemoryDevice();
+ HighMemoryPressure();
}
void Heap::ActivateMemoryReducerIfNeeded() {
@@ -4235,11 +4289,11 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
if (incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
- mark_compact_collector()->marking_deque()->IsEmpty() &&
+ mark_compact_collector()->marking_worklist()->IsEmpty() &&
local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
FinalizeIncrementalMarking(gc_reason);
} else if (incremental_marking()->IsComplete() ||
- (mark_compact_collector()->marking_deque()->IsEmpty() &&
+ (mark_compact_collector()->marking_worklist()->IsEmpty() &&
local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking())) {
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
@@ -4262,11 +4316,11 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
Address addr = chunk.start;
while (addr < chunk.end) {
HeapObject* obj = HeapObject::FromAddress(addr);
- // There might be grey objects due to black to grey transitions in
- // incremental marking. E.g. see VisitNativeContextIncremental.
- DCHECK(ObjectMarking::IsBlackOrGrey(obj, MarkingState::Internal(obj)));
- if (ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))) {
- incremental_marking()->IterateBlackObject(obj);
+ // Objects can have any color because incremental marking can
+ // start in the middle of Heap::ReserveSpace().
+ if (ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
+ obj, MarkingState::Internal(obj))) {
+ incremental_marking()->ProcessBlackAllocatedObject(obj);
}
addr += obj->Size();
}
@@ -4278,7 +4332,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// Large object space doesn't use reservations, so it needs custom handling.
for (HeapObject* object : *large_objects) {
- incremental_marking()->IterateBlackObject(object);
+ incremental_marking()->ProcessBlackAllocatedObject(object);
}
}
@@ -4518,10 +4572,12 @@ void Heap::CheckMemoryPressure() {
GarbageCollectionReason::kMemoryPressure);
}
}
- MemoryReducer::Event event;
- event.type = MemoryReducer::kPossibleGarbage;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer_->NotifyPossibleGarbage(event);
+ if (memory_reducer_) {
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kPossibleGarbage;
+ event.time_ms = MonotonicallyIncreasingTimeInMs();
+ memory_reducer_->NotifyPossibleGarbage(event);
+ }
}
void Heap::CollectGarbageOnMemoryPressure() {
@@ -4701,7 +4757,6 @@ const char* Heap::GarbageCollectionReasonToString(
return "unknown";
}
UNREACHABLE();
- return "";
}
bool Heap::Contains(HeapObject* value) {
@@ -4743,7 +4798,6 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
return lo_space_->Contains(value);
}
UNREACHABLE();
- return false;
}
bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
@@ -4765,7 +4819,6 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
return lo_space_->ContainsSlow(addr);
}
UNREACHABLE();
- return false;
}
@@ -4929,12 +4982,14 @@ template <RememberedSetType direction>
void CollectSlots(MemoryChunk* chunk, Address start, Address end,
std::set<Address>* untyped,
std::set<std::pair<SlotType, Address> >* typed) {
- RememberedSet<direction>::Iterate(chunk, [start, end, untyped](Address slot) {
- if (start <= slot && slot < end) {
- untyped->insert(slot);
- }
- return KEEP_SLOT;
- });
+ RememberedSet<direction>::Iterate(chunk,
+ [start, end, untyped](Address slot) {
+ if (start <= slot && slot < end) {
+ untyped->insert(slot);
+ }
+ return KEEP_SLOT;
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
RememberedSet<direction>::IterateTyped(
chunk, [start, end, typed](SlotType type, Address host, Address slot) {
if (start <= slot && slot < end) {
@@ -4974,91 +5029,19 @@ void Heap::ZapFromSpace() {
}
}
-class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
- public:
- IterateAndScavengePromotedObjectsVisitor(Heap* heap, bool record_slots)
- : heap_(heap), record_slots_(record_slots) {}
-
- inline void VisitPointers(HeapObject* host, Object** start,
- Object** end) override {
- Address slot_address = reinterpret_cast<Address>(start);
- Page* page = Page::FromAddress(slot_address);
-
- while (slot_address < reinterpret_cast<Address>(end)) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* target = *slot;
-
- if (target->IsHeapObject()) {
- if (heap_->InFromSpace(target)) {
- Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(target));
- target = *slot;
- if (heap_->InNewSpace(target)) {
- SLOW_DCHECK(heap_->InToSpace(target));
- SLOW_DCHECK(target->IsHeapObject());
- RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
- }
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target)));
- } else if (record_slots_ &&
- MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target))) {
- heap_->mark_compact_collector()->RecordSlot(host, slot, target);
- }
- }
-
- slot_address += kPointerSize;
- }
- }
-
- inline void VisitCodeEntry(JSFunction* host,
- Address code_entry_slot) override {
- // Black allocation requires us to process objects referenced by
- // promoted objects.
- if (heap_->incremental_marking()->black_allocation()) {
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- heap_->incremental_marking()->WhiteToGreyAndPush(code);
- }
- }
-
- private:
- Heap* heap_;
- bool record_slots_;
-};
-
-void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size) {
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
- bool record_slots = false;
- if (incremental_marking()->IsCompacting()) {
- record_slots =
- ObjectMarking::IsBlack(target, MarkingState::Internal(target));
- }
-
- IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
- if (target->IsJSFunction()) {
- // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
- // this links are recorded during processing of weak lists.
- JSFunction::BodyDescriptorWeakCode::IterateBody(target, size, &visitor);
- } else {
- target->IterateBody(target->map()->instance_type(), size, &visitor);
- }
-}
-
void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
}
void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
+ const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
+ mode == VISIT_ALL_IN_MINOR_MC_MARK ||
+ mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
v->VisitRootPointer(Root::kStringTable, reinterpret_cast<Object**>(
&roots_[kStringTableRootIndex]));
v->Synchronize(VisitorSynchronization::kStringTable);
- if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+ if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
external_string_table_.IterateAll(v);
}
@@ -5123,6 +5106,9 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
};
void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
+ const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
+ mode == VISIT_ALL_IN_MINOR_MC_MARK ||
+ mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
v->VisitRootPointers(Root::kStrongRootList, &roots_[0],
&roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -5153,7 +5139,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
- if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_MINOR_MC_UPDATE) {
+ if (!isMinorGC) {
isolate_->builtins()->IterateBuiltins(v);
v->Synchronize(VisitorSynchronization::kBuiltins);
isolate_->interpreter()->IterateDispatchTable(v);
@@ -5173,8 +5159,11 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
case VISIT_ALL_IN_SCAVENGE:
isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
break;
+ case VISIT_ALL_IN_MINOR_MC_MARK:
+ // Global handles are processed manually be the minor MC.
+ break;
case VISIT_ALL_IN_MINOR_MC_UPDATE:
- isolate_->global_handles()->IterateAllNewSpaceRoots(v);
+ // Global handles are processed manually be the minor MC.
break;
case VISIT_ALL_IN_SWEEP_NEWSPACE:
case VISIT_ALL:
@@ -5184,7 +5173,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kGlobalHandles);
// Iterate over eternal handles.
- if (mode == VISIT_ALL_IN_SCAVENGE || mode == VISIT_ALL_IN_MINOR_MC_UPDATE) {
+ if (isMinorGC) {
isolate_->eternal_handles()->IterateNewSpaceRoots(v);
} else {
isolate_->eternal_handles()->IterateAllRoots(v);
@@ -5215,16 +5204,18 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
- size_t code_range_size) {
+bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
+ size_t max_old_generation_size_in_mb,
+ size_t code_range_size_in_mb) {
if (HasBeenSetUp()) return false;
// Overwrite default configuration.
- if (max_semi_space_size != 0) {
- max_semi_space_size_ = max_semi_space_size * MB;
+ if (max_semi_space_size_in_kb != 0) {
+ max_semi_space_size_ =
+ ROUND_UP(max_semi_space_size_in_kb * KB, Page::kPageSize);
}
- if (max_old_space_size != 0) {
- max_old_generation_size_ = max_old_space_size * MB;
+ if (max_old_generation_size_in_mb != 0) {
+ max_old_generation_size_ = max_old_generation_size_in_mb * MB;
}
// If max space size flags are specified overwrite the configuration.
@@ -5252,6 +5243,12 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
static_cast<uint32_t>(max_semi_space_size_));
+ if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
+ // Start with at least 1*MB semi-space on machines with a lot of memory.
+ initial_semispace_size_ =
+ Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
+ }
+
if (FLAG_min_semi_space_size > 0) {
size_t initial_semispace_size =
static_cast<size_t>(FLAG_min_semi_space_size) * MB;
@@ -5295,7 +5292,7 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
- code_range_size_ = code_range_size * MB;
+ code_range_size_ = code_range_size_in_mb * MB;
configured_ = true;
return true;
@@ -5427,8 +5424,11 @@ const double Heap::kTargetMutatorUtilization = 0.97;
// F * (1 - MU / (R * (1 - MU))) = 1
// F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
// F = R * (1 - MU) / (R * (1 - MU) - MU)
-double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
- if (gc_speed == 0 || mutator_speed == 0) return kMaxHeapGrowingFactor;
+double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed,
+ double max_factor) {
+ DCHECK(max_factor >= kMinHeapGrowingFactor);
+ DCHECK(max_factor <= kMaxHeapGrowingFactor);
+ if (gc_speed == 0 || mutator_speed == 0) return max_factor;
const double speed_ratio = gc_speed / mutator_speed;
const double mu = kTargetMutatorUtilization;
@@ -5437,13 +5437,39 @@ double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
const double b = speed_ratio * (1 - mu) - mu;
// The factor is a / b, but we need to check for small b first.
- double factor =
- (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
- factor = Min(factor, kMaxHeapGrowingFactor);
+ double factor = (a < b * max_factor) ? a / b : max_factor;
+ factor = Min(factor, max_factor);
factor = Max(factor, kMinHeapGrowingFactor);
return factor;
}
+double Heap::MaxHeapGrowingFactor(size_t max_old_generation_size) {
+ const double min_small_factor = 1.3;
+ const double max_small_factor = 2.0;
+ const double high_factor = 4.0;
+
+ size_t max_old_generation_size_in_mb = max_old_generation_size / MB;
+ max_old_generation_size_in_mb =
+ Max(max_old_generation_size_in_mb,
+ static_cast<size_t>(kMinOldGenerationSize));
+
+ // If we are on a device with lots of memory, we allow a high heap
+ // growing factor.
+ if (max_old_generation_size_in_mb >= kMaxOldGenerationSize) {
+ return high_factor;
+ }
+
+ DCHECK_GE(max_old_generation_size_in_mb, kMinOldGenerationSize);
+ DCHECK_LT(max_old_generation_size_in_mb, kMaxOldGenerationSize);
+
+ // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
+ double factor = (max_old_generation_size_in_mb - kMinOldGenerationSize) *
+ (max_small_factor - min_small_factor) /
+ (kMaxOldGenerationSize - kMinOldGenerationSize) +
+ min_small_factor;
+ return factor;
+}
+
size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
size_t old_gen_size) {
CHECK(factor > 1.0);
@@ -5468,7 +5494,8 @@ size_t Heap::MinimumAllocationLimitGrowingStep() {
void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
double mutator_speed) {
- double factor = HeapGrowingFactor(gc_speed, mutator_speed);
+ double max_factor = MaxHeapGrowingFactor(max_old_generation_size_);
+ double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
if (FLAG_trace_gc_verbose) {
isolate_->PrintWithTimestamp(
@@ -5478,10 +5505,6 @@ void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
mutator_speed);
}
- if (IsMemoryConstrainedDevice()) {
- factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
- }
-
if (memory_reducer_->ShouldGrowHeapSlowly() ||
ShouldOptimizeForMemoryUsage()) {
factor = Min(factor, kConservativeHeapGrowingFactor);
@@ -5508,7 +5531,8 @@ void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
double gc_speed,
double mutator_speed) {
- double factor = HeapGrowingFactor(gc_speed, mutator_speed);
+ double max_factor = MaxHeapGrowingFactor(max_old_generation_size_);
+ double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
if (limit < old_generation_allocation_limit_) {
if (FLAG_trace_gc_verbose) {
@@ -5563,9 +5587,15 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
// Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
- if (!incremental_marking()->CanBeActivated() || always_allocate() ||
- PromotedSpaceSizeOfObjects() <=
- IncrementalMarking::kActivationThreshold) {
+ if (!incremental_marking()->CanBeActivated() || always_allocate()) {
+ // Incremental marking is disabled or it is too early to start.
+ return IncrementalMarkingLimit::kNoLimit;
+ }
+ if (FLAG_stress_incremental_marking) {
+ return IncrementalMarkingLimit::kHardLimit;
+ }
+ if (PromotedSpaceSizeOfObjects() <=
+ IncrementalMarking::kActivationThreshold) {
// Incremental marking is disabled or it is too early to start.
return IncrementalMarkingLimit::kNoLimit;
}
@@ -5615,16 +5645,6 @@ void Heap::DisableInlineAllocation() {
}
}
-
-V8_DECLARE_ONCE(initialize_gc_once);
-
-static void InitializeGCOnce() {
- Scavenger::Initialize();
- StaticScavengeVisitor::Initialize();
- MarkCompactCollector::Initialize();
-}
-
-
bool Heap::SetUp() {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
@@ -5642,7 +5662,9 @@ bool Heap::SetUp() {
if (!ConfigureHeapDefault()) return false;
}
- base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
+ mmap_region_base_ =
+ reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ ~kMmapRegionMask;
// Set up memory allocator.
memory_allocator_ = new MemoryAllocator(isolate_);
@@ -5688,15 +5710,16 @@ bool Heap::SetUp() {
}
tracer_ = new GCTracer(this);
- scavenge_collector_ = new Scavenger(this);
mark_compact_collector_ = new MarkCompactCollector(this);
- incremental_marking_->set_marking_deque(
- mark_compact_collector_->marking_deque());
+ incremental_marking_->set_marking_worklist(
+ mark_compact_collector_->marking_worklist());
#ifdef V8_CONCURRENT_MARKING
- concurrent_marking_ =
- new ConcurrentMarking(this, mark_compact_collector_->marking_deque());
+ MarkCompactCollector::MarkingWorklist* marking_worklist =
+ mark_compact_collector_->marking_worklist();
+ concurrent_marking_ = new ConcurrentMarking(this, marking_worklist->shared(),
+ marking_worklist->bailout());
#else
- concurrent_marking_ = new ConcurrentMarking(this, nullptr);
+ concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr);
#endif
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
@@ -5722,6 +5745,9 @@ bool Heap::SetUp() {
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
new_space()->AddAllocationObserver(idle_scavenge_observer_);
+ SetGetExternallyAllocatedMemoryInBytesCallback(
+ DefaultGetExternallyAllocatedMemoryInBytesCallback);
+
return true;
}
@@ -5769,7 +5795,7 @@ void Heap::ClearStackLimits() {
roots_[kRealStackLimitRootIndex] = Smi::kZero;
}
-void Heap::PrintAlloctionsHash() {
+void Heap::PrintAllocationsHash() {
uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
}
@@ -5824,7 +5850,6 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
}
void Heap::TearDown() {
- use_tasks_ = false;
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -5834,16 +5859,13 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
if (FLAG_verify_predictable) {
- PrintAlloctionsHash();
+ PrintAllocationsHash();
}
new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
delete idle_scavenge_observer_;
idle_scavenge_observer_ = nullptr;
- delete scavenge_collector_;
- scavenge_collector_ = nullptr;
-
if (mark_compact_collector_ != nullptr) {
mark_compact_collector_->TearDown();
delete mark_compact_collector_;
@@ -6256,18 +6278,34 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
~UnreachableObjectsFilter() {
- heap_->mark_compact_collector()->ClearMarkbits();
+ for (auto it : reachable_) {
+ delete it.second;
+ it.second = nullptr;
+ }
}
bool SkipObject(HeapObject* object) {
if (object->IsFiller()) return true;
- return ObjectMarking::IsWhite(object, MarkingState::Internal(object));
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ if (reachable_.count(chunk) == 0) return true;
+ return reachable_[chunk]->count(object) == 0;
}
private:
+ bool MarkAsReachable(HeapObject* object) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ if (reachable_.count(chunk) == 0) {
+ reachable_[chunk] = new std::unordered_set<HeapObject*>();
+ }
+ if (reachable_[chunk]->count(object)) return false;
+ reachable_[chunk]->insert(object);
+ return true;
+ }
+
class MarkingVisitor : public ObjectVisitor, public RootVisitor {
public:
- MarkingVisitor() : marking_stack_(10) {}
+ explicit MarkingVisitor(UnreachableObjectsFilter* filter)
+ : filter_(filter), marking_stack_(10) {}
void VisitPointers(HeapObject* host, Object** start,
Object** end) override {
@@ -6290,27 +6328,26 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
- // Use Marking instead of ObjectMarking to avoid adjusting live bytes
- // counter.
- MarkBit mark_bit =
- ObjectMarking::MarkBitFrom(obj, MarkingState::Internal(obj));
- if (Marking::IsWhite(mark_bit)) {
- Marking::WhiteToBlack(mark_bit);
+ if (filter_->MarkAsReachable(obj)) {
marking_stack_.Add(obj);
}
}
}
+ UnreachableObjectsFilter* filter_;
List<HeapObject*> marking_stack_;
};
+ friend class MarkingVisitor;
+
void MarkReachableObjects() {
- MarkingVisitor visitor;
+ MarkingVisitor visitor(this);
heap_->IterateRoots(&visitor, VISIT_ALL);
visitor.TransitiveClosure();
}
Heap* heap_;
DisallowHeapAllocation no_allocation_;
+ std::unordered_map<MemoryChunk*, std::unordered_set<HeapObject*>*> reachable_;
};
HeapIterator::HeapIterator(Heap* heap,
@@ -6472,6 +6509,9 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) {
remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
}
+void Heap::AgeInlineCaches() {
+ global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
+}
void Heap::RegisterStrongRoots(Object** start, Object** end) {
StrongRootsList* list = new StrongRootsList();
@@ -6559,12 +6599,6 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
return false;
}
-
-// static
-int Heap::GetStaticVisitorIdForMap(Map* map) {
- return StaticVisitorBase::GetVisitorId(map);
-}
-
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
@@ -6583,5 +6617,66 @@ const char* AllocationSpaceName(AllocationSpace space) {
return NULL;
}
+void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
+ Object** end) {
+ VerifyPointers(start, end);
+}
+
+void VerifyPointersVisitor::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ VerifyPointers(start, end);
+}
+
+void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ CHECK(object->GetIsolate()->heap()->Contains(object));
+ CHECK(object->map()->IsMap());
+ } else {
+ CHECK((*current)->IsSmi());
+ }
+ }
+}
+
+void VerifySmisVisitor::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ for (Object** current = start; current < end; current++) {
+ CHECK((*current)->IsSmi());
+ }
+}
+
+bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
+ // Object migration is governed by the following rules:
+ //
+ // 1) Objects in new-space can be migrated to the old space
+ // that matches their target space or they stay in new-space.
+ // 2) Objects in old-space stay in the same space when migrating.
+ // 3) Fillers (two or more words) can migrate due to left-trimming of
+ // fixed arrays in new-space or old space.
+ // 4) Fillers (one word) can never migrate, they are skipped by
+ // incremental marking explicitly to prevent invalid pattern.
+ //
+ // Since this function is used for debugging only, we do not place
+ // asserts here, but check everything explicitly.
+ if (obj->map() == one_pointer_filler_map()) return false;
+ InstanceType type = obj->map()->instance_type();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ AllocationSpace src = chunk->owner()->identity();
+ switch (src) {
+ case NEW_SPACE:
+ return dst == src || dst == OLD_SPACE;
+ case OLD_SPACE:
+ return dst == src &&
+ (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
+ case CODE_SPACE:
+ return dst == src && type == CODE_TYPE;
+ case MAP_SPACE:
+ case LO_SPACE:
+ return false;
+ }
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 7f213eff27..b579c0288a 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -91,6 +91,8 @@ using v8::MemoryPressureLevel;
V(Map, ordered_hash_table_map, OrderedHashTableMap) \
V(Map, unseeded_number_dictionary_map, UnseededNumberDictionaryMap) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
+ V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
+ V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, external_map, ExternalMap) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
@@ -98,6 +100,7 @@ using v8::MemoryPressureLevel;
V(Map, no_closures_cell_map, NoClosuresCellMap) \
V(Map, one_closure_cell_map, OneClosureCellMap) \
V(Map, many_closures_cell_map, ManyClosuresCellMap) \
+ V(Map, property_array_map, PropertyArrayMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -149,6 +152,7 @@ using v8::MemoryPressureLevel;
V(Map, optimized_out_map, OptimizedOutMap) \
V(Map, stale_register_map, StaleRegisterMap) \
/* Canonical empty values */ \
+ V(PropertyArray, empty_property_array, EmptyPropertyArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
@@ -165,13 +169,14 @@ using v8::MemoryPressureLevel;
V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
+ V(FixedArray, empty_ordered_hash_table, EmptyOrderedHashTable) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
/* Protectors */ \
V(PropertyCell, array_protector, ArrayProtector) \
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
- V(Cell, species_protector, SpeciesProtector) \
+ V(PropertyCell, species_protector, SpeciesProtector) \
V(PropertyCell, string_length_protector, StringLengthProtector) \
V(Cell, fast_array_iteration_protector, FastArrayIterationProtector) \
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
@@ -188,11 +193,8 @@ using v8::MemoryPressureLevel;
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
- V(Object, instanceof_cache_function, InstanceofCacheFunction) \
- V(Object, instanceof_cache_map, InstanceofCacheMap) \
- V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
/* Lists and dictionaries */ \
- V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
+ V(NameDictionary, empty_property_dictionary, EmptyPropertiesDictionary) \
V(NameDictionary, public_symbol_table, PublicSymbolTable) \
V(NameDictionary, api_symbol_table, ApiSymbolTable) \
V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
@@ -315,6 +317,9 @@ using v8::MemoryPressureLevel;
V(OnePointerFillerMap) \
V(OptimizedOut) \
V(OrderedHashTableMap) \
+ V(PropertyArrayMap) \
+ V(SmallOrderedHashMapMap) \
+ V(SmallOrderedHashSetMap) \
V(ScopeInfoMap) \
V(ScriptContextMap) \
V(SharedFunctionInfoMap) \
@@ -338,6 +343,12 @@ using v8::MemoryPressureLevel;
V(WithContextMap) \
PRIVATE_SYMBOL_LIST(V)
+#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
+ do { \
+ heap->RecordFixedArrayElements(array, start, length); \
+ heap->incremental_marking()->RecordWrites(array); \
+ } while (false)
+
// Forward declarations.
class AllocationObserver;
class ArrayBufferTracker;
@@ -414,57 +425,6 @@ enum class YoungGenerationHandling {
// Also update src/tools/metrics/histograms/histograms.xml in chromium.
};
-// A queue of objects promoted during scavenge. Each object is accompanied by
-// its size to avoid dereferencing a map pointer for scanning. The last page in
-// to-space is used for the promotion queue. On conflict during scavenge, the
-// promotion queue is allocated externally and all entries are copied to the
-// external queue.
-class PromotionQueue {
- public:
- explicit PromotionQueue(Heap* heap)
- : front_(nullptr),
- rear_(nullptr),
- limit_(nullptr),
- emergency_stack_(nullptr),
- heap_(heap) {}
-
- void Initialize();
- void Destroy();
-
- inline void SetNewLimit(Address limit);
- inline bool IsBelowPromotionQueue(Address to_space_top);
-
- inline void insert(HeapObject* target, int32_t size);
- inline void remove(HeapObject** target, int32_t* size);
-
- bool is_empty() {
- return (front_ == rear_) &&
- (emergency_stack_ == nullptr || emergency_stack_->length() == 0);
- }
-
- private:
- struct Entry {
- Entry(HeapObject* obj, int32_t size) : obj_(obj), size_(size) {}
-
- HeapObject* obj_;
- int32_t size_;
- };
-
- inline Page* GetHeadPage();
-
- void RelocateQueueHead();
-
- // The front of the queue is higher in the memory page chain than the rear.
- struct Entry* front_;
- struct Entry* rear_;
- struct Entry* limit_;
-
- List<Entry>* emergency_stack_;
- Heap* heap_;
-
- DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
-};
-
class AllocationResult {
public:
static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
@@ -613,19 +573,16 @@ class Heap {
static const int kPointerMultiplier = i::kPointerSize / 4;
#endif
- // The new space size has to be a power of 2. Sizes are in MB.
- static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
+ // Semi-space size needs to be a multiple of page size.
+ static const int kMinSemiSpaceSizeInKB =
+ 1 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
+ static const int kMaxSemiSpaceSizeInKB =
+ 16 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
// The old space size has to be a multiple of Page::kPageSize.
// Sizes are in MB.
- static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeMediumMemoryDevice =
- 256 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHugeMemoryDevice = 1024 * kPointerMultiplier;
+ static const int kMinOldGenerationSize = 128 * kPointerMultiplier;
+ static const int kMaxOldGenerationSize = 1024 * kPointerMultiplier;
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
@@ -683,8 +640,6 @@ class Heap {
// they are in new space.
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
- static bool IsUnmodifiedHeapObject(Object** p);
-
// Zapping is needed for verify heap, and always done in debug builds.
static inline bool ShouldZapGarbage() {
#ifdef DEBUG
@@ -718,17 +673,16 @@ class Heap {
return "Unknown collector";
}
+ V8_EXPORT_PRIVATE static double MaxHeapGrowingFactor(
+ size_t max_old_generation_size);
V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
- double mutator_speed);
+ double mutator_speed,
+ double max_factor);
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
- // Determines a static visitor id based on the given {map} that can then be
- // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
- static int GetStaticVisitorIdForMap(Map* map);
-
// Notifies the heap that is ok to start marking or other activities that
// should not happen during deserialization.
void NotifyDeserializationComplete();
@@ -738,9 +692,6 @@ class Heap {
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
- // Clear the Instanceof cache (used when a prototype changes).
- inline void ClearInstanceofCache();
-
// FreeSpace objects have a null map after deserialization. Update the map.
void RepairFreeListsAfterDeserialization();
@@ -813,7 +764,7 @@ class Heap {
// Checks whether the given object is allowed to be migrated from it's
// current space into the given destination space. Used for debugging.
- inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
+ bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
void CheckHandleCount();
@@ -831,7 +782,7 @@ class Heap {
// If an object has an AllocationMemento trailing it, return it, otherwise
// return NULL;
template <FindMementoMode mode>
- inline AllocationMemento* FindAllocationMemento(HeapObject* object);
+ inline AllocationMemento* FindAllocationMemento(Map* map, HeapObject* object);
// Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations, List<Address>* maps);
@@ -864,16 +815,12 @@ class Heap {
// An object should be promoted if the object has survived a
// scavenge operation.
- inline bool ShouldBePromoted(Address old_address, int object_size);
+ inline bool ShouldBePromoted(Address old_address);
void ClearNormalizedMapCaches();
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
- // Completely clear the Instanceof cache (to stop it keeping objects alive
- // around a GC).
- inline void CompletelyClearInstanceofCache();
-
inline uint32_t HashSeed();
inline int NextScriptId();
@@ -896,9 +843,7 @@ class Heap {
// disposal. We use it to flush inline caches.
int global_ic_age() { return global_ic_age_; }
- void AgeInlineCaches() {
- global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
- }
+ void AgeInlineCaches();
int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
@@ -950,14 +895,6 @@ class Heap {
bool ShouldOptimizeForMemoryUsage();
- bool IsLowMemoryDevice() {
- return max_old_generation_size_ <= kMaxOldSpaceSizeLowMemoryDevice;
- }
-
- bool IsMemoryConstrainedDevice() {
- return max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice;
- }
-
bool HighMemoryPressure() {
return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
}
@@ -990,10 +927,14 @@ class Heap {
// Initialization. ===========================================================
// ===========================================================================
- // Configure heap size in MB before setup. Return false if the heap has been
- // set up already.
- bool ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
- size_t code_range_size);
+ // Configure heap sizes
+ // max_semi_space_size_in_kb: maximum semi-space size in KB
+ // max_old_generation_size_in_mb: maximum old generation size in MB
+ // code_range_size_in_mb: code range size in MB
+ // Return false if the heap has been set up already.
+ bool ConfigureHeap(size_t max_semi_space_size_in_kb,
+ size_t max_old_generation_size_in_mb,
+ size_t code_range_size_in_mb);
bool ConfigureHeapDefault();
// Prepares the heap, setting up memory areas that are needed in the isolate
@@ -1016,8 +957,6 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp();
- bool use_tasks() const { return use_tasks_; }
-
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================
@@ -1044,8 +983,6 @@ class Heap {
MemoryAllocator* memory_allocator() { return memory_allocator_; }
- PromotionQueue* promotion_queue() { return &promotion_queue_; }
-
inline Isolate* isolate();
MarkCompactCollector* mark_compact_collector() {
@@ -1138,7 +1075,7 @@ class Heap {
RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
- FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
+ FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
void RegisterStrongRoots(Object** start, Object** end);
void UnregisterStrongRoots(Object** start);
@@ -1161,7 +1098,7 @@ class Heap {
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- inline bool CollectGarbage(
+ bool CollectGarbage(
AllocationSpace space, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
@@ -1179,6 +1116,14 @@ class Heap {
// completes incremental marking in order to free external resources.
void ReportExternalMemoryPressure();
+ typedef v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback
+ GetExternallyAllocatedMemoryInBytesCallback;
+
+ void SetGetExternallyAllocatedMemoryInBytesCallback(
+ GetExternallyAllocatedMemoryInBytesCallback callback) {
+ external_memory_callback_ = callback;
+ }
+
// Invoked when GC was requested via the stack guard.
void HandleGCRequest();
@@ -1196,9 +1141,6 @@ class Heap {
// Iterates over all the other roots in the heap.
void IterateWeakRoots(RootVisitor* v, VisitMode mode);
- // Iterate pointers of promoted objects.
- void IterateAndScavengePromotedObject(HeapObject* target, int size);
-
// ===========================================================================
// Store buffer API. =========================================================
// ===========================================================================
@@ -1350,6 +1292,30 @@ class Heap {
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
+ static size_t ComputeMaxOldGenerationSize(uint64_t physical_memory) {
+ const int old_space_physical_memory_factor = 4;
+ int computed_size =
+ static_cast<int>(physical_memory / i::MB /
+ old_space_physical_memory_factor * kPointerMultiplier);
+ return Max(Min(computed_size, kMaxOldGenerationSize),
+ kMinOldGenerationSize);
+ }
+
+ static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
+ const uint64_t min_physical_memory = 512 * MB;
+ const uint64_t max_physical_memory = 2 * static_cast<uint64_t>(GB);
+
+ uint64_t capped_physical_memory =
+ Max(Min(physical_memory, max_physical_memory), min_physical_memory);
+ // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
+ int semi_space_size_in_kb =
+ static_cast<int>(((capped_physical_memory - min_physical_memory) *
+ (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
+ (max_physical_memory - min_physical_memory) +
+ kMinSemiSpaceSizeInKB);
+ return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
+ }
+
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
size_t Capacity();
@@ -1502,7 +1468,7 @@ class Heap {
// in the hash map is created. Otherwise the entry (including a the count
// value) is cached on the local pretenuring feedback.
template <UpdateAllocationSiteMode mode>
- inline void UpdateAllocationSite(HeapObject* object,
+ inline void UpdateAllocationSite(Map* map, HeapObject* object,
base::HashMap* pretenuring_feedback);
// Removes an entry from the global pretenuring storage.
@@ -1532,6 +1498,25 @@ class Heap {
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
#endif
+ void* GetRandomMmapAddr() {
+ void* result = base::OS::GetRandomMmapAddr();
+#if V8_TARGET_ARCH_X64
+#if V8_OS_MACOSX
+ // The Darwin kernel [as of macOS 10.12.5] does not clean up page
+ // directory entries [PDE] created from mmap or mach_vm_allocate, even
+ // after the region is destroyed. Using a virtual address space that is
+ // too large causes a leak of about 1 wired [can never be paged out] page
+ // per call to mmap(). The page is only reclaimed when the process is
+ // killed. Confine the hint to a 32-bit section of the virtual address
+ // space. See crbug.com/700928.
+ uintptr_t offset =
+ reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ kMmapRegionMask;
+ result = reinterpret_cast<void*>(mmap_region_base_ + offset);
+#endif // V8_OS_MACOSX
+#endif // V8_TARGET_ARCH_X64
+ return result;
+ }
static const char* GarbageCollectionReasonToString(
GarbageCollectionReason gc_reason);
@@ -1647,6 +1632,10 @@ class Heap {
return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
}
+ static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
+ return 0;
+ }
+
#define ROOT_ACCESSOR(type, name, camel_name) \
inline void set_##name(type* value);
ROOT_LIST(ROOT_ACCESSOR)
@@ -1687,14 +1676,6 @@ class Heap {
// over all objects. May cause a GC.
void MakeHeapIterable();
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- bool CollectGarbage(
- GarbageCollector collector, GarbageCollectionReason gc_reason,
- const char* collector_reason,
- const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
// Performs garbage collection
// Returns whether there is a chance another major GC could
// collect more garbage.
@@ -1705,8 +1686,7 @@ class Heap {
inline void UpdateOldSpaceLimits();
// Initializes a JSObject based on its map.
- void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
- Map* map);
+ void InitializeJSObjectFromMap(JSObject* obj, Object* properties, Map* map);
// Initializes JSObject body starting at given offset.
void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);
@@ -1774,7 +1754,7 @@ class Heap {
inline void UpdateAllocationsHash(HeapObject* object);
inline void UpdateAllocationsHash(uint32_t value);
- void PrintAlloctionsHash();
+ void PrintAllocationsHash();
void AddToRingBuffer(const char* string);
void GetFromRingBuffer(char* buffer);
@@ -1832,8 +1812,6 @@ class Heap {
void Scavenge();
void EvacuateYoungGeneration();
- Address DoScavenge(Address new_space_front);
-
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
@@ -1991,8 +1969,17 @@ class Heap {
CopyBytecodeArray(BytecodeArray* bytecode_array);
// Allocates a fixed array initialized with undefined values
+ MUST_USE_RESULT inline AllocationResult AllocateFixedArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a property array initialized with undefined values
MUST_USE_RESULT AllocationResult
- AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
+ AllocatePropertyArray(int length, PretenureFlag pretenure = NOT_TENURED);
+
+ MUST_USE_RESULT AllocationResult AllocateSmallOrderedHashSet(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT AllocationResult AllocateSmallOrderedHashMap(
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate an uninitialized object. The memory is non-executable if the
// hardware and OS allow. This is the single choke-point for allocations
@@ -2064,8 +2051,13 @@ class Heap {
MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
// Make a copy of src, also grow the copy, and return the copy.
- MUST_USE_RESULT AllocationResult
- CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
+ template <typename T>
+ MUST_USE_RESULT AllocationResult CopyArrayAndGrow(T* src, int grow_by,
+ PretenureFlag pretenure);
+
+ // Make a copy of src, also grow the copy, and return the copy.
+ MUST_USE_RESULT AllocationResult CopyPropertyArrayAndGrow(
+ PropertyArray* src, int grow_by, PretenureFlag pretenure);
// Make a copy of src, also grow the copy, and return the copy.
MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
@@ -2073,8 +2065,15 @@ class Heap {
PretenureFlag pretenure);
// Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult
- CopyFixedArrayWithMap(FixedArray* src, Map* map);
+ template <typename T>
+ MUST_USE_RESULT AllocationResult CopyArrayWithMap(T* src, Map* map);
+
+ // Make a copy of src, set the map, and return the copy.
+ MUST_USE_RESULT AllocationResult CopyFixedArrayWithMap(FixedArray* src,
+ Map* map);
+
+ // Make a copy of src, set the map, and return the copy.
+ MUST_USE_RESULT AllocationResult CopyPropertyArray(PropertyArray* src);
// Make a copy of src and return it.
MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
@@ -2123,7 +2122,7 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
// Allocate a tenured JS global property cell initialized with the hole.
- MUST_USE_RESULT AllocationResult AllocatePropertyCell();
+ MUST_USE_RESULT AllocationResult AllocatePropertyCell(Name* name);
MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
@@ -2224,6 +2223,9 @@ class Heap {
// How many gc happened.
unsigned int gc_count_;
+ static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
+ uintptr_t mmap_region_base_;
+
// For post mortem debugging.
int remembered_unmapped_pages_index_;
Address remembered_unmapped_pages_[kRememberedUnmappedPages];
@@ -2262,6 +2264,8 @@ class Heap {
List<GCCallbackPair> gc_epilogue_callbacks_;
List<GCCallbackPair> gc_prologue_callbacks_;
+ GetExternallyAllocatedMemoryInBytesCallback external_memory_callback_;
+
int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
GCTracer* tracer_;
@@ -2291,8 +2295,6 @@ class Heap {
// Last time a garbage collection happened.
double last_gc_time_;
- Scavenger* scavenge_collector_;
-
MarkCompactCollector* mark_compact_collector_;
MinorMarkCompactCollector* minor_mark_compact_collector_;
@@ -2327,11 +2329,6 @@ class Heap {
// The size of objects in old generation after the last MarkCompact GC.
size_t old_generation_size_at_last_gc_;
- // If the --deopt_every_n_garbage_collections flag is set to a positive value,
- // this variable holds the number of garbage collections since the last
- // deoptimization triggered by garbage collection.
- int gcs_since_last_deopt_;
-
// The feedback storage is used to store allocation sites (keys) and how often
// they have been visited (values) by finding a memento behind an object. The
// storage is only alive temporary during a GC. The invariant is that all
@@ -2346,9 +2343,6 @@ class Heap {
bool ring_buffer_full_;
size_t ring_buffer_end_;
- // Shared state read by the scavenge collector and set by ScavengeObject.
- PromotionQueue promotion_queue_;
-
// Flag is set when the heap has been configured. The heap can be repeatedly
// configured through the API until it is set up.
bool configured_;
@@ -2377,8 +2371,6 @@ class Heap {
bool fast_promotion_mode_;
- bool use_tasks_;
-
// Used for testing purposes.
bool force_oom_;
bool delay_sweeper_tasks_for_testing_;
@@ -2470,21 +2462,18 @@ class AlwaysAllocateScope {
// objects in a heap space but above the allocation pointer.
class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
- inline void VisitPointers(HeapObject* host, Object** start,
- Object** end) override;
- inline void VisitRootPointers(Root root, Object** start,
- Object** end) override;
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, Object** start, Object** end) override;
private:
- inline void VerifyPointers(Object** start, Object** end);
+ void VerifyPointers(Object** start, Object** end);
};
// Verify that all objects are Smis.
class VerifySmisVisitor : public RootVisitor {
public:
- inline void VisitRootPointers(Root root, Object** start,
- Object** end) override;
+ void VisitRootPointers(Root root, Object** start, Object** end) override;
};
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index ee594b2aee..16418bdfcb 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -19,6 +19,14 @@ void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
}
}
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+ if (IsMarking()) {
+ if (FLAG_concurrent_marking ||
+ ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj))) {
+ RevisitObject(obj);
+ }
+ }
+}
void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value) {
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 47a27faf15..833a40f8a3 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -20,8 +20,6 @@ void IncrementalMarkingJob::Start(Heap* heap) {
ScheduleTask(heap);
}
-void IncrementalMarkingJob::NotifyTask() { task_pending_ = false; }
-
void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
if (!task_pending_) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
@@ -48,16 +46,20 @@ void IncrementalMarkingJob::Task::RunInternal() {
isolate(), &RuntimeCallStats::GC_IncrementalMarkingJob);
Heap* heap = isolate()->heap();
- job_->NotifyTask();
IncrementalMarking* incremental_marking = heap->incremental_marking();
if (incremental_marking->IsStopped()) {
if (heap->IncrementalMarkingLimitReached() !=
Heap::IncrementalMarkingLimit::kNoLimit) {
heap->StartIncrementalMarking(Heap::kNoGCFlags,
GarbageCollectionReason::kIdleTask,
- kNoGCCallbackFlags);
+ kGCCallbackScheduleIdleGarbageCollection);
}
}
+
+ // Clear this flag after StartIncrementalMarking call to avoid
+ // scheduling a new task when startining incremental marking.
+ job_->task_pending_ = false;
+
if (!incremental_marking->IsStopped()) {
Step(heap);
if (!incremental_marking->IsStopped()) {
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
index ccc60c55cb..902989b613 100644
--- a/deps/v8/src/heap/incremental-marking-job.h
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -21,12 +21,15 @@ class IncrementalMarkingJob {
class Task : public CancelableTask {
public:
explicit Task(Isolate* isolate, IncrementalMarkingJob* job)
- : CancelableTask(isolate), job_(job) {}
+ : CancelableTask(isolate), isolate_(isolate), job_(job) {}
static void Step(Heap* heap);
// CancelableTask overrides.
void RunInternal() override;
+ Isolate* isolate() { return isolate_; }
+
private:
+ Isolate* isolate_;
IncrementalMarkingJob* job_;
};
@@ -36,8 +39,6 @@ class IncrementalMarkingJob {
void Start(Heap* heap);
- void NotifyTask();
-
void ScheduleTask(Heap* heap);
private:
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 58731d570b..cdc8881f88 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -33,7 +33,7 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address, size_t) {
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
- marking_deque_(nullptr),
+ marking_worklist_(nullptr),
initial_old_generation_size_(0),
bytes_marked_ahead_of_schedule_(0),
unscanned_bytes_of_large_object_(0),
@@ -55,13 +55,14 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
DCHECK(!ObjectMarking::IsImpossible<kAtomicity>(
value_heap_obj, marking_state(value_heap_obj)));
DCHECK(!ObjectMarking::IsImpossible<kAtomicity>(obj, marking_state(obj)));
- const bool is_black =
+ const bool need_recording =
+ FLAG_concurrent_marking ||
ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj));
- if (is_black && WhiteToGreyAndPush(value_heap_obj)) {
+ if (need_recording && WhiteToGreyAndPush(value_heap_obj)) {
RestartIfNotMarking();
}
- return is_compacting_ && is_black;
+ return is_compacting_ && need_recording;
}
@@ -131,7 +132,7 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
if (ObjectMarking::WhiteToGrey<kAtomicity>(obj, marking_state(obj))) {
- marking_deque()->Push(obj);
+ marking_worklist()->Push(obj);
return true;
}
return false;
@@ -142,33 +143,47 @@ void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
ObjectMarking::WhiteToGrey<kAtomicity>(obj, marking_state(obj));
if (ObjectMarking::GreyToBlack<kAtomicity>(obj, marking_state(obj))) {
#ifdef V8_CONCURRENT_MARKING
- marking_deque()->Push(obj, MarkingThread::kMain, TargetDeque::kBailout);
+ marking_worklist()->PushBailout(obj);
#else
- if (!marking_deque()->Push(obj)) {
+ if (!marking_worklist()->Push(obj)) {
ObjectMarking::BlackToGrey<kAtomicity>(obj, marking_state(obj));
}
#endif
}
}
-void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
- HeapObject* to) {
+void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) {
+ DCHECK(IsMarking());
DCHECK(MemoryChunk::FromAddress(from->address())->SweepingDone());
- // This is only used when resizing an object.
- DCHECK(MemoryChunk::FromAddress(from->address()) ==
- MemoryChunk::FromAddress(to->address()));
+ DCHECK_EQ(MemoryChunk::FromAddress(from->address()),
+ MemoryChunk::FromAddress(to->address()));
+ DCHECK_NE(from, to);
- if (!IsMarking()) return;
+ MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(from, marking_state(from));
+ MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(to, marking_state(to));
- // If the mark doesn't move, we don't check the color of the object.
- // It doesn't matter whether the object is black, since it hasn't changed
- // size, so the adjustment to the live data count will be zero anyway.
- if (from == to) return;
+ if (black_allocation() && Marking::IsBlack(new_mark_bit)) {
+ // Nothing to do if the object is in black area.
+ return;
+ }
- MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(to, marking_state(to));
- MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(from, marking_state(from));
+ bool marked_black_due_to_left_trimming = false;
+ if (FLAG_concurrent_marking) {
+ // We need to mark the array black before overwriting its map and length
+ // so that the concurrent marker does not observe inconsistent state.
+ Marking::WhiteToGrey<kAtomicity>(old_mark_bit);
+ if (Marking::GreyToBlack<kAtomicity>(old_mark_bit)) {
+ // The concurrent marker will not mark the array. We need to push the
+ // new array start in marking deque to ensure that it will be marked.
+ marked_black_due_to_left_trimming = true;
+ }
+ DCHECK(Marking::IsBlack<kAtomicity>(old_mark_bit));
+ }
- if (Marking::IsBlack<kAtomicity>(old_mark_bit)) {
+ if (Marking::IsBlack<kAtomicity>(old_mark_bit) &&
+ !marked_black_due_to_left_trimming) {
+ // The array was black before left trimming or was marked black by the
+ // concurrent marker. Simply transfer the color.
if (from->address() + kPointerSize == to->address()) {
// The old and the new markbits overlap. The |to| object has the
// grey color. To make it black, we need to set the second bit.
@@ -179,12 +194,13 @@ void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
DCHECK(success);
USE(success);
}
- } else if (Marking::IsGrey<kAtomicity>(old_mark_bit)) {
+ } else if (Marking::IsGrey<kAtomicity>(old_mark_bit) ||
+ marked_black_due_to_left_trimming) {
+ // The array was already grey or was marked black by this function.
+ // Mark the new array grey and push it to marking deque.
if (from->address() + kPointerSize == to->address()) {
- // The old and the new markbits overlap. The |to| object has the
- // white color. To make it grey, we need to set the first bit.
- // Note that Marking::WhiteToGrey does not work here because
- // old_mark_bit.Next() can be set by the concurrent marker at any time.
+ // The old and the new markbits overlap. The |to| object is either white
+ // or grey. Set the first bit to make sure that it is grey.
new_mark_bit.Set();
DCHECK(!new_mark_bit.Next().Get());
} else {
@@ -192,67 +208,72 @@ void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
DCHECK(success);
USE(success);
}
- marking_deque()->Push(to);
+ marking_worklist()->Push(to);
RestartIfNotMarking();
}
}
-class IncrementalMarkingMarkingVisitor
- : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
+class IncrementalMarkingMarkingVisitor final
+ : public MarkingVisitor<IncrementalMarkingMarkingVisitor> {
public:
- static void Initialize() {
- StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
- table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
- table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
- }
+ typedef MarkingVisitor<IncrementalMarkingMarkingVisitor> Parent;
static const int kProgressBarScanningChunk = 32 * 1024;
- static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
+ explicit IncrementalMarkingMarkingVisitor(MarkCompactCollector* collector)
+ : MarkingVisitor<IncrementalMarkingMarkingVisitor>(collector->heap(),
+ collector),
+ incremental_marking_(collector->heap()->incremental_marking()) {}
+
+ V8_INLINE int VisitFixedArray(Map* map, FixedArray* object) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
DCHECK(!FLAG_use_marking_progress_bar ||
chunk->owner()->identity() == LO_SPACE);
- Heap* heap = map->GetHeap();
// When using a progress bar for large fixed arrays, scan only a chunk of
// the array and try to push it onto the marking deque again until it is
// fully scanned. Fall back to scanning it through to the end in case this
// fails because of a full deque.
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
int start_offset =
Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
- int end_offset =
- Min(object_size, start_offset + kProgressBarScanningChunk);
- int already_scanned_offset = start_offset;
- bool scan_until_end = false;
- do {
- VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
- start_offset = end_offset;
- end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
- scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
- } while (scan_until_end && start_offset < object_size);
- chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
+#ifdef V8_CONCURRENT_MARKING
+ incremental_marking_->marking_worklist()->PushBailout(object);
+#else
if (ObjectMarking::IsGrey<IncrementalMarking::kAtomicity>(
- object, heap->incremental_marking()->marking_state(object))) {
- heap->incremental_marking()->marking_deque()->Unshift(object);
+ object, incremental_marking_->marking_state(object))) {
+ incremental_marking_->marking_worklist()->Push(object);
} else {
DCHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
- object, heap->incremental_marking()->marking_state(object)));
- heap->mark_compact_collector()->UnshiftBlack(object);
+ object, incremental_marking_->marking_state(object)));
+ collector_->PushBlack(object);
+ }
+#endif
+ int end_offset =
+ Min(object_size, start_offset + kProgressBarScanningChunk);
+ int already_scanned_offset = start_offset;
+ bool scan_until_end = false;
+ do {
+ VisitPointers(object, HeapObject::RawField(object, start_offset),
+ HeapObject::RawField(object, end_offset));
+ start_offset = end_offset;
+ end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
+ scan_until_end = incremental_marking_->marking_worklist()->IsFull();
+ } while (scan_until_end && start_offset < object_size);
+ chunk->set_progress_bar(start_offset);
+ if (start_offset < object_size) {
+ incremental_marking_->NotifyIncompleteScanOfObject(
+ object_size - (start_offset - already_scanned_offset));
}
- heap->incremental_marking()->NotifyIncompleteScanOfObject(
- object_size - (start_offset - already_scanned_offset));
}
} else {
- FixedArrayVisitor::Visit(map, object);
+ FixedArray::BodyDescriptor::IterateBody(object, object_size, this);
}
+ return object_size;
}
- static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
- Context* context = Context::cast(object);
-
+ V8_INLINE int VisitNativeContext(Map* map, Context* context) {
// We will mark cache black with a separate pass when we finish marking.
// Note that GC can happen when the context is not fully initialized,
// so the cache can be undefined.
@@ -262,62 +283,48 @@ class IncrementalMarkingMarkingVisitor
HeapObject* heap_obj = HeapObject::cast(cache);
// Mark the object grey if it is white, do not enque it into the marking
// deque.
- Heap* heap = map->GetHeap();
- bool ignored =
- ObjectMarking::WhiteToGrey<IncrementalMarking::kAtomicity>(
- heap_obj, heap->incremental_marking()->marking_state(heap_obj));
- USE(ignored);
+ ObjectMarking::WhiteToGrey<IncrementalMarking::kAtomicity>(
+ heap_obj, incremental_marking_->marking_state(heap_obj));
}
}
- VisitNativeContext(map, context);
+ return Parent::VisitNativeContext(map, context);
}
- INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
+ V8_INLINE void VisitPointer(HeapObject* host, Object** p) final {
Object* target = *p;
if (target->IsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(object, p, target);
- MarkObject(heap, target);
+ collector_->RecordSlot(host, p, target);
+ MarkObject(target);
}
}
- INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
- Object** start, Object** end)) {
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
for (Object** p = start; p < end; p++) {
Object* target = *p;
if (target->IsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(object, p, target);
- MarkObject(heap, target);
+ collector_->RecordSlot(host, p, target);
+ MarkObject(target);
}
}
}
// Marks the object grey and pushes it on the marking stack.
- INLINE(static void MarkObject(Heap* heap, Object* obj)) {
- heap->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
+ V8_INLINE void MarkObject(Object* obj) {
+ incremental_marking_->WhiteToGreyAndPush(HeapObject::cast(obj));
}
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
- INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
+ V8_INLINE bool MarkObjectWithoutPush(Object* obj) {
HeapObject* heap_object = HeapObject::cast(obj);
return ObjectMarking::WhiteToBlack<IncrementalMarking::kAtomicity>(
- heap_object, heap->incremental_marking()->marking_state(heap_object));
+ heap_object, incremental_marking_->marking_state(heap_object));
}
-};
-void IncrementalMarking::IterateBlackObject(HeapObject* object) {
- if (IsMarking() &&
- ObjectMarking::IsBlack<kAtomicity>(object, marking_state(object))) {
- Page* page = Page::FromAddress(object->address());
- if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
- // IterateBlackObject requires us to visit the whole object.
- page->ResetProgressBar();
- }
- Map* map = object->map();
- WhiteToGreyAndPush(map);
- IncrementalMarkingMarkingVisitor::IterateBody(map, object);
- }
-}
+ private:
+ IncrementalMarking* const incremental_marking_;
+};
class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
public:
@@ -344,12 +351,6 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
Heap* heap_;
};
-
-void IncrementalMarking::Initialize() {
- IncrementalMarkingMarkingVisitor::Initialize();
-}
-
-
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
bool is_marking,
bool is_compacting) {
@@ -569,7 +570,7 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
- marking_deque()->StartUsing();
+ marking_worklist()->StartUsing();
ActivateIncrementalWriteBarrier();
@@ -580,16 +581,18 @@ void IncrementalMarking::StartMarking() {
}
#endif
- heap_->CompletelyClearInstanceofCache();
heap_->isolate()->compilation_cache()->MarkCompactPrologue();
+ if (FLAG_concurrent_marking && !black_allocation_) {
+ StartBlackAllocation();
+ }
+
// Mark strong roots grey.
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
if (FLAG_concurrent_marking) {
- ConcurrentMarking* concurrent_marking = heap_->concurrent_marking();
- concurrent_marking->StartTask();
+ heap_->concurrent_marking()->ScheduleTasks();
}
// Ready to start incremental marking.
@@ -724,7 +727,7 @@ void IncrementalMarking::RetainMaps() {
DCHECK(retained_maps->Get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
if (cell->cleared()) continue;
- int age = Smi::cast(retained_maps->Get(i + 1))->value();
+ int age = Smi::ToInt(retained_maps->Get(i + 1));
int new_age;
Map* map = Map::cast(cell->value());
if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
@@ -777,7 +780,7 @@ void IncrementalMarking::FinalizeIncrementally() {
ProcessWeakCells();
int marking_progress =
- heap_->mark_compact_collector()->marking_deque()->Size() +
+ heap_->mark_compact_collector()->marking_worklist()->Size() +
static_cast<int>(
heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
@@ -807,13 +810,13 @@ void IncrementalMarking::FinalizeIncrementally() {
}
}
-
-void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
+void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
if (!IsMarking()) return;
Map* filler_map = heap_->one_pointer_filler_map();
- marking_deque()->Update([this, filler_map](HeapObject* obj) -> HeapObject* {
+ marking_worklist()->Update([this, filler_map](HeapObject* obj,
+ HeapObject** out) -> bool {
DCHECK(obj->IsHeapObject());
// Only pointers to from space have to be updated.
if (heap_->InFromSpace(obj)) {
@@ -824,36 +827,44 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
// If these object are dead at scavenging time, their marking deque
// entries will not point to forwarding addresses. Hence, we can discard
// them.
- return nullptr;
+ return false;
}
HeapObject* dest = map_word.ToForwardingAddress();
DCHECK_IMPLIES(
ObjectMarking::IsWhite<kAtomicity>(obj, marking_state(obj)),
obj->IsFiller());
- return dest;
+ *out = dest;
+ return true;
} else if (heap_->InToSpace(obj)) {
// The object may be on a page that was moved in new space.
DCHECK(
Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
- return ObjectMarking::IsBlack<kAtomicity>(obj,
- MarkingState::External(obj))
- ? obj
- : nullptr;
+ if (ObjectMarking::IsGrey<kAtomicity>(obj, MarkingState::External(obj))) {
+ *out = obj;
+ return true;
+ }
+ return false;
} else {
// The object may be on a page that was moved from new to old space.
if (Page::FromAddress(obj->address())
->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
- return ObjectMarking::IsBlack<kAtomicity>(obj,
- MarkingState::External(obj))
- ? obj
- : nullptr;
+ if (ObjectMarking::IsGrey<kAtomicity>(obj,
+ MarkingState::External(obj))) {
+ *out = obj;
+ return true;
+ }
+ return false;
}
DCHECK_IMPLIES(
ObjectMarking::IsWhite<kAtomicity>(obj, marking_state(obj)),
obj->IsFiller());
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
- return (obj->map() == filler_map) ? nullptr : obj;
+ if (obj->map() != filler_map) {
+ *out = obj;
+ return true;
+ }
+ return false;
}
});
}
@@ -881,16 +892,37 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
}
DCHECK(ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj)));
WhiteToGreyAndPush(map);
- IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
+ IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector());
+ visitor.Visit(map, obj);
}
-intptr_t IncrementalMarking::ProcessMarkingDeque(
+void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject* obj) {
+ if (IsMarking() &&
+ ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj))) {
+ RevisitObject(obj);
+ }
+}
+
+void IncrementalMarking::RevisitObject(HeapObject* obj) {
+ DCHECK(IsMarking());
+ DCHECK(FLAG_concurrent_marking ||
+ ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj)));
+ Page* page = Page::FromAddress(obj->address());
+ if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
+ page->ResetProgressBar();
+ }
+ Map* map = obj->map();
+ WhiteToGreyAndPush(map);
+ IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector());
+ visitor.Visit(map, obj);
+}
+
+intptr_t IncrementalMarking::ProcessMarkingWorklist(
intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0;
- while (!marking_deque()->IsEmpty() && (bytes_processed < bytes_to_process ||
- completion == FORCE_COMPLETION)) {
- HeapObject* obj = marking_deque()->Pop();
-
+ while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
+ HeapObject* obj = marking_worklist()->Pop();
+ if (obj == nullptr) break;
// Left trimming may result in white, grey, or black filler objects on the
// marking deque. Ignore these objects.
if (obj->IsFiller()) {
@@ -919,7 +951,7 @@ void IncrementalMarking::Hurry() {
// forced e.g. in tests. It should not happen when COMPLETE was set when
// incremental marking finished and a regular GC was triggered after that
// because should_hurry_ will force a full GC.
- if (!marking_deque()->IsEmpty()) {
+ if (!marking_worklist()->IsEmpty()) {
double start = 0.0;
if (FLAG_trace_incremental_marking) {
start = heap_->MonotonicallyIncreasingTimeInMs();
@@ -929,7 +961,7 @@ void IncrementalMarking::Hurry() {
}
// TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped.
- ProcessMarkingDeque(0, FORCE_COMPLETION);
+ ProcessMarkingWorklist(0, FORCE_COMPLETION);
state_ = COMPLETE;
if (FLAG_trace_incremental_marking) {
double end = heap_->MonotonicallyIncreasingTimeInMs();
@@ -1081,7 +1113,7 @@ double IncrementalMarking::AdvanceIncrementalMarking(
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
} while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
- !marking_deque()->IsEmpty());
+ !marking_worklist()->IsEmpty());
return remaining_time_in_ms;
}
@@ -1178,12 +1210,16 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
size_t bytes_processed = 0;
if (state_ == MARKING) {
- bytes_processed = ProcessMarkingDeque(bytes_to_process);
+ if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
+ FLAG_trace_gc_verbose) {
+ marking_worklist()->Print();
+ }
+ bytes_processed = ProcessMarkingWorklist(bytes_to_process);
if (step_origin == StepOrigin::kTask) {
bytes_marked_ahead_of_schedule_ += bytes_processed;
}
- if (marking_deque()->IsEmpty()) {
+ if (marking_worklist()->IsEmpty()) {
if (heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
if (completion == FORCE_COMPLETION ||
@@ -1197,10 +1233,13 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
IncrementIdleMarkingDelayCounter();
}
} else {
- heap_->local_embedder_heap_tracer()->NotifyV8MarkingDequeWasEmpty();
+ heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
}
}
}
+ if (FLAG_concurrent_marking) {
+ heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ }
double end = heap_->MonotonicallyIncreasingTimeInMs();
double duration = (end - start);
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 4a88ab3fae..6fe5c9768a 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -53,8 +53,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool paused_;
};
- static void Initialize();
-
explicit IncrementalMarking(Heap* heap);
MarkingState marking_state(HeapObject* object) const {
@@ -65,12 +63,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
return MarkingState::Internal(chunk);
}
- // Transfers mark bits without requiring proper object headers.
- void TransferMark(Heap* heap, HeapObject* from, HeapObject* to);
+ void NotifyLeftTrimming(HeapObject* from, HeapObject* to);
// Transfers color including live byte count, requiring properly set up
// objects.
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE void TransferColor(HeapObject* from, HeapObject* to) {
if (ObjectMarking::IsBlack<access_mode>(to, marking_state(to))) {
DCHECK(black_allocation());
@@ -139,7 +136,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeIncrementally();
- void UpdateMarkingDequeAfterScavenge();
+ void UpdateMarkingWorklistAfterScavenge();
void Hurry();
@@ -183,9 +180,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
#endif
#ifdef V8_CONCURRENT_MARKING
- static const MarkBit::AccessMode kAtomicity = MarkBit::AccessMode::ATOMIC;
+ static const AccessMode kAtomicity = AccessMode::ATOMIC;
#else
- static const MarkBit::AccessMode kAtomicity = MarkBit::AccessMode::NON_ATOMIC;
+ static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
#endif
void FinalizeSweeping();
@@ -212,6 +209,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
+ INLINE(void RecordWrites(HeapObject* obj));
void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
@@ -248,7 +246,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool IsIdleMarkingDelayCounterLimitReached();
- void IterateBlackObject(HeapObject* object);
+ void ProcessBlackAllocatedObject(HeapObject* obj);
Heap* heap() const { return heap_; }
@@ -262,13 +260,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void AbortBlackAllocation();
- MarkingDeque* marking_deque() {
- SLOW_DCHECK(marking_deque_ != nullptr);
- return marking_deque_;
+ MarkCompactCollector::MarkingWorklist* marking_worklist() {
+ SLOW_DCHECK(marking_worklist_ != nullptr);
+ return marking_worklist_;
}
- void set_marking_deque(MarkingDeque* marking_deque) {
- marking_deque_ = marking_deque;
+ void set_marking_worklist(
+ MarkCompactCollector::MarkingWorklist* marking_worklist) {
+ marking_worklist_ = marking_worklist;
}
private:
@@ -311,15 +310,15 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
- INLINE(void ProcessMarkingDeque());
-
- INLINE(intptr_t ProcessMarkingDeque(
+ INLINE(intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION));
INLINE(bool IsFixedArrayWithProgressBar(HeapObject* object));
INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
+ void RevisitObject(HeapObject* obj);
+
void IncrementIdleMarkingDelayCounter();
void AdvanceIncrementalMarkingOnAllocation();
@@ -328,7 +327,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t StepSizeToMakeProgress();
Heap* heap_;
- MarkingDeque* marking_deque_;
+ MarkCompactCollector::MarkingWorklist* marking_worklist_;
double start_time_ms_;
size_t initial_old_generation_size_;
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 4c2b37e9e6..432d884bda 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -128,13 +128,15 @@ class ItemParallelJob {
// Adds an item to the job. Transfers ownership to the job.
void AddItem(Item* item) { items_.push_back(item); }
+ int NumberOfItems() const { return static_cast<int>(items_.size()); }
+ int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
+
void Run() {
DCHECK_GE(tasks_.size(), 0);
const size_t num_tasks = tasks_.size();
const size_t num_items = items_.size();
const size_t items_per_task = (num_items + num_tasks - 1) / num_tasks;
- CancelableTaskManager::Id* task_ids =
- new CancelableTaskManager::Id[num_tasks];
+ uint32_t* task_ids = new uint32_t[num_tasks];
size_t start_index = 0;
Task* main_task = nullptr;
Task* task = nullptr;
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
new file mode 100644
index 0000000000..2d7f95909e
--- /dev/null
+++ b/deps/v8/src/heap/local-allocator.h
@@ -0,0 +1,99 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/globals.h"
+#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+// Allocator encapsulating thread-local allocation. Assumes that all other
+// allocations also go through LocalAllocator.
+class LocalAllocator {
+ public:
+ static const int kLabSize = 32 * KB;
+ static const int kMaxLabObjectSize = 8 * KB;
+
+ explicit LocalAllocator(Heap* heap)
+ : heap_(heap),
+ new_space_(heap->new_space()),
+ compaction_spaces_(heap),
+ new_space_lab_(LocalAllocationBuffer::InvalidBuffer()) {}
+
+ // Needs to be called from the main thread to finalize this LocalAllocator.
+ void Finalize() {
+ heap_->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
+ // Give back remaining LAB space if this LocalAllocator's new space LAB
+ // sits right next to new space allocation top.
+ const AllocationInfo info = new_space_lab_.Close();
+ const Address top = new_space_->top();
+ if (info.limit() != nullptr && info.limit() == top) {
+ DCHECK_NOT_NULL(info.top());
+ *new_space_->allocation_top_address() = info.top();
+ }
+ }
+
+ template <AllocationSpace space>
+ AllocationResult Allocate(int object_size, AllocationAlignment alignment) {
+ switch (space) {
+ case NEW_SPACE:
+ return AllocateInNewSpace(object_size, alignment);
+ case OLD_SPACE:
+ return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
+ alignment);
+ default:
+ // Only new and old space supported.
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ private:
+ AllocationResult AllocateInNewSpace(int object_size,
+ AllocationAlignment alignment) {
+ if (object_size > kMaxLabObjectSize) {
+ return new_space_->AllocateRawSynchronized(object_size, alignment);
+ }
+ return AllocateInLAB(object_size, alignment);
+ }
+
+ inline bool NewLocalAllocationBuffer() {
+ LocalAllocationBuffer saved_lab_ = new_space_lab_;
+ AllocationResult result =
+ new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
+ new_space_lab_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
+ if (new_space_lab_.IsValid()) {
+ new_space_lab_.TryMerge(&saved_lab_);
+ return true;
+ }
+ return false;
+ }
+
+ AllocationResult AllocateInLAB(int object_size,
+ AllocationAlignment alignment) {
+ AllocationResult allocation;
+ if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
+ return AllocationResult::Retry(OLD_SPACE);
+ }
+ allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
+ if (allocation.IsRetry()) {
+ if (!NewLocalAllocationBuffer()) {
+ return AllocationResult::Retry(OLD_SPACE);
+ } else {
+ allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
+ CHECK(!allocation.IsRetry());
+ }
+ }
+ return allocation;
+ }
+
+ Heap* const heap_;
+ NewSpace* const new_space_;
+ CompactionSpaceCollection compaction_spaces_;
+ LocalAllocationBuffer new_space_lab_;
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index b8e4d46fc3..8873d213c2 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -13,23 +13,16 @@ namespace v8 {
namespace internal {
void MarkCompactCollector::PushBlack(HeapObject* obj) {
- DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>(
+ DCHECK((ObjectMarking::IsBlack<AccessMode::NON_ATOMIC>(
obj, MarkingState::Internal(obj))));
- if (!marking_deque()->Push(obj)) {
- ObjectMarking::BlackToGrey<MarkBit::NON_ATOMIC>(
+ if (!marking_worklist()->Push(obj)) {
+ ObjectMarking::BlackToGrey<AccessMode::NON_ATOMIC>(
obj, MarkingState::Internal(obj));
}
}
-void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
- DCHECK(ObjectMarking::IsBlack(obj, MarkingState::Internal(obj)));
- if (!marking_deque()->Unshift(obj)) {
- ObjectMarking::BlackToGrey(obj, MarkingState::Internal(obj));
- }
-}
-
void MarkCompactCollector::MarkObject(HeapObject* obj) {
- if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
+ if (ObjectMarking::WhiteToBlack<AccessMode::NON_ATOMIC>(
obj, MarkingState::Internal(obj))) {
PushBlack(obj);
}
@@ -48,95 +41,45 @@ void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
}
}
-
-void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
- if (GetNextCandidate(shared_info) == nullptr) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
+template <LiveObjectIterationMode mode>
+LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk,
+ MarkingState state, Address start)
+ : chunk_(chunk),
+ one_word_filler_map_(chunk->heap()->one_pointer_filler_map()),
+ two_word_filler_map_(chunk->heap()->two_pointer_filler_map()),
+ free_space_map_(chunk->heap()->free_space_map()),
+ it_(chunk, state) {
+ it_.Advance(Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(start))));
+ if (!it_.Done()) {
+ cell_base_ = it_.CurrentCellBase();
+ current_cell_ = *it_.CurrentCell();
+ AdvanceToNextValidObject();
+ } else {
+ current_object_ = nullptr;
}
}
-
-void CodeFlusher::AddCandidate(JSFunction* function) {
- DCHECK(function->code() == function->shared()->code());
- if (function->next_function_link()->IsUndefined(isolate_)) {
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
- }
-}
-
-
-JSFunction** CodeFlusher::GetNextCandidateSlot(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
-}
-
-
-JSFunction* CodeFlusher::GetNextCandidate(JSFunction* candidate) {
- Object* next_candidate = candidate->next_function_link();
- return reinterpret_cast<JSFunction*>(next_candidate);
-}
-
-
-void CodeFlusher::SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate) {
- candidate->set_next_function_link(next_candidate, UPDATE_WEAK_WRITE_BARRIER);
-}
-
-
-void CodeFlusher::ClearNextCandidate(JSFunction* candidate, Object* undefined) {
- DCHECK(undefined->IsUndefined(candidate->GetIsolate()));
- candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
-}
-
-
-SharedFunctionInfo* CodeFlusher::GetNextCandidate(
- SharedFunctionInfo* candidate) {
- Object* next_candidate = candidate->code()->gc_metadata();
- return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
-}
-
-
-void CodeFlusher::SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate) {
- candidate->code()->set_gc_metadata(next_candidate);
-}
-
-
-void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
- candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
-}
-
-void CodeFlusher::VisitListHeads(RootVisitor* visitor) {
- visitor->VisitRootPointer(
- Root::kCodeFlusher,
- reinterpret_cast<Object**>(&jsfunction_candidates_head_));
- visitor->VisitRootPointer(
- Root::kCodeFlusher,
- reinterpret_cast<Object**>(&shared_function_info_candidates_head_));
+template <LiveObjectIterationMode mode>
+typename LiveObjectRange<mode>::iterator& LiveObjectRange<mode>::iterator::
+operator++() {
+ AdvanceToNextValidObject();
+ return *this;
}
-template <typename StaticVisitor>
-void CodeFlusher::IteratePointersToFromSpace() {
- Heap* heap = isolate_->heap();
- JSFunction* candidate = jsfunction_candidates_head_;
- while (candidate != nullptr) {
- JSFunction** slot = GetNextCandidateSlot(candidate);
- if (heap->InFromSpace(*slot)) {
- StaticVisitor::VisitPointer(heap, candidate,
- reinterpret_cast<Object**>(slot));
- }
- candidate = GetNextCandidate(candidate);
- }
+template <LiveObjectIterationMode mode>
+typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::iterator::
+operator++(int) {
+ iterator retval = *this;
+ ++(*this);
+ return retval;
}
-template <LiveObjectIterationMode T>
-HeapObject* LiveObjectIterator<T>::Next() {
- Map* one_word_filler = heap()->one_pointer_filler_map();
- Map* two_word_filler = heap()->two_pointer_filler_map();
- Map* free_space_map = heap()->free_space_map();
+template <LiveObjectIterationMode mode>
+void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
while (!it_.Done()) {
HeapObject* object = nullptr;
+ int size = 0;
while (current_cell_ != 0) {
uint32_t trailing_zeros = base::bits::CountTrailingZeros32(current_cell_);
Address addr = cell_base_ + trailing_zeros * kPointerSize;
@@ -144,10 +87,8 @@ HeapObject* LiveObjectIterator<T>::Next() {
// Clear the first bit of the found object..
current_cell_ &= ~(1u << trailing_zeros);
- uint32_t second_bit_index = 0;
- if (trailing_zeros < Bitmap::kBitIndexMask) {
- second_bit_index = 1u << (trailing_zeros + 1);
- } else {
+ uint32_t second_bit_index = 1u << (trailing_zeros + 1);
+ if (trailing_zeros >= Bitmap::kBitIndexMask) {
second_bit_index = 0x1;
// The overlapping case; there has to exist a cell after the current
// cell.
@@ -155,11 +96,9 @@ HeapObject* LiveObjectIterator<T>::Next() {
// last word is a one word filler, we are not allowed to advance. In
// that case we can return immediately.
if (!it_.Advance()) {
- DCHECK(HeapObject::FromAddress(addr)->map() ==
- HeapObject::FromAddress(addr)
- ->GetHeap()
- ->one_pointer_filler_map());
- return nullptr;
+ DCHECK(HeapObject::FromAddress(addr)->map() == one_word_filler_map_);
+ current_object_ = nullptr;
+ return;
}
cell_base_ = it_.CurrentCellBase();
current_cell_ = *it_.CurrentCell();
@@ -172,7 +111,8 @@ HeapObject* LiveObjectIterator<T>::Next() {
// object ends.
HeapObject* black_object = HeapObject::FromAddress(addr);
map = base::NoBarrierAtomicValue<Map*>::FromAddress(addr)->Value();
- Address end = addr + black_object->SizeFromMap(map) - kPointerSize;
+ size = black_object->SizeFromMap(map);
+ Address end = addr + size - kPointerSize;
// One word filler objects do not borrow the second mark bit. We have
// to jump over the advancing and clearing part.
// Note that we know that we are at a one word filler when
@@ -193,12 +133,13 @@ HeapObject* LiveObjectIterator<T>::Next() {
current_cell_ &= ~(end_index_mask + end_index_mask - 1);
}
- if (T == kBlackObjects || T == kAllLiveObjects) {
+ if (mode == kBlackObjects || mode == kAllLiveObjects) {
object = black_object;
}
- } else if ((T == kGreyObjects || T == kAllLiveObjects)) {
+ } else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
map = base::NoBarrierAtomicValue<Map*>::FromAddress(addr)->Value();
object = HeapObject::FromAddress(addr);
+ size = object->SizeFromMap(map);
}
// We found a live object.
@@ -206,8 +147,8 @@ HeapObject* LiveObjectIterator<T>::Next() {
// Do not use IsFiller() here. This may cause a data race for reading
// out the instance type when a new map concurrently is written into
// this object while iterating over the object.
- if (map == one_word_filler || map == two_word_filler ||
- map == free_space_map) {
+ if (map == one_word_filler_map_ || map == two_word_filler_map_ ||
+ map == free_space_map_) {
// There are two reasons why we can get black or grey fillers:
// 1) Black areas together with slack tracking may result in black one
// word filler objects.
@@ -227,9 +168,23 @@ HeapObject* LiveObjectIterator<T>::Next() {
current_cell_ = *it_.CurrentCell();
}
}
- if (object != nullptr) return object;
+ if (object != nullptr) {
+ current_object_ = object;
+ current_size_ = size;
+ return;
+ }
}
- return nullptr;
+ current_object_ = nullptr;
+}
+
+template <LiveObjectIterationMode mode>
+typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::begin() {
+ return iterator(chunk_, state_, start_);
+}
+
+template <LiveObjectIterationMode mode>
+typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() {
+ return iterator(chunk_, state_, end_);
}
} // namespace internal
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index d970e1a50e..cc47333f1d 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -4,10 +4,11 @@
#include "src/heap/mark-compact.h"
+#include <unordered_map>
+
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/sys-info.h"
-#include "src/cancelable-task.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/deoptimizer.h"
@@ -24,9 +25,8 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
-#include "src/heap/page-parallel-job.h"
#include "src/heap/spaces-inl.h"
-#include "src/heap/workstealing-marking-deque.h"
+#include "src/heap/worklist.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/tracing/tracing-category-observer.h"
@@ -65,6 +65,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual void VerifyPointers(Object** start, Object** end) = 0;
+ virtual bool IsMarked(HeapObject* object) = 0;
+
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
VerifyPointers(start, end);
}
@@ -96,7 +98,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page& page,
// One word fillers at the end of a black area can be grey.
if (ObjectMarking::IsBlackOrGrey(object, state) &&
object->map() != heap_->one_pointer_filler_map()) {
- CHECK(ObjectMarking::IsBlack(object, state));
+ CHECK(IsMarked(object));
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(this);
next_object_must_be_here_or_later = current + object->Size();
@@ -165,6 +167,10 @@ class FullMarkingVerifier : public MarkingVerifier {
return MarkingState::Internal(object);
}
+ bool IsMarked(HeapObject* object) override {
+ return ObjectMarking::IsBlack(object, marking_state(object));
+ }
+
void VerifyPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
@@ -202,6 +208,10 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
return MarkingState::External(object);
}
+ bool IsMarked(HeapObject* object) override {
+ return ObjectMarking::IsGrey(object, marking_state(object));
+ }
+
void Run() override {
VerifyRoots(VISIT_ALL_IN_SCAVENGE);
VerifyMarking(heap_->new_space());
@@ -212,7 +222,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
if (!heap_->InNewSpace(object)) return;
- CHECK(ObjectMarking::IsBlackOrGrey(object, marking_state(object)));
+ CHECK(IsMarked(object));
}
}
}
@@ -345,22 +355,42 @@ static int NumberOfAvailableCores() {
}
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
+ DCHECK_GT(pages, 0);
return FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
}
-int MarkCompactCollectorBase::NumberOfPointerUpdateTasks(int pages) {
+int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
+ int slots) {
+ DCHECK_GT(pages, 0);
// Limit the number of update tasks as task creation often dominates the
// actual work that is being done.
- static const int kMaxPointerUpdateTasks = 8;
+ const int kMaxPointerUpdateTasks = 8;
+ const int kSlotsPerTask = 600;
+ const int wanted_tasks =
+ (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
return FLAG_parallel_pointer_update
- ? Min(kMaxPointerUpdateTasks, Min(NumberOfAvailableCores(), pages))
+ ? Min(kMaxPointerUpdateTasks,
+ Min(NumberOfAvailableCores(), wanted_tasks))
: 1;
}
-int MinorMarkCompactCollector::NumberOfMarkingTasks() {
- return FLAG_minor_mc_parallel_marking
- ? Min(NumberOfAvailableCores(), kNumMarkers)
- : 1;
+int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
+ int pages) {
+ DCHECK_GT(pages, 0);
+ // No cap needed because all pages we need to process are fully filled with
+ // interesting objects.
+ return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
+ : 1;
+}
+
+int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
+ DCHECK_GT(pages, 0);
+ if (!FLAG_minor_mc_parallel_marking) return 1;
+ // Pages are not private to markers but we can still use them to estimate the
+ // amount of marking that is required.
+ const int kPagesPerTask = 2;
+ const int wanted_tasks = Max(1, pages / kPagesPerTask);
+ return Min(NumberOfAvailableCores(), Min(wanted_tasks, kNumMarkers));
}
MarkCompactCollector::MarkCompactCollector(Heap* heap)
@@ -374,9 +404,9 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
compacting_(false),
black_allocation_(false),
have_code_to_deoptimize_(false),
- marking_deque_(heap),
- code_flusher_(nullptr),
+ marking_worklist_(heap),
sweeper_(heap) {
+ old_to_new_slots_ = -1;
}
void MarkCompactCollector::SetUp() {
@@ -384,22 +414,14 @@ void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- marking_deque()->SetUp();
-
- if (FLAG_flush_code) {
- code_flusher_ = new CodeFlusher(isolate());
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing is now on]\n");
- }
- }
+ marking_worklist()->SetUp();
}
void MinorMarkCompactCollector::SetUp() {}
void MarkCompactCollector::TearDown() {
AbortCompaction();
- marking_deque()->TearDown();
- delete code_flusher_;
+ marking_worklist()->TearDown();
}
void MinorMarkCompactCollector::TearDown() {}
@@ -407,7 +429,7 @@ void MinorMarkCompactCollector::TearDown() {}
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
p->MarkEvacuationCandidate();
- evacuation_candidates_.Add(p);
+ evacuation_candidates_.push_back(p);
}
@@ -422,7 +444,7 @@ static void TraceFragmentation(PagedSpace* space) {
bool MarkCompactCollector::StartCompaction() {
if (!compacting_) {
- DCHECK(evacuation_candidates_.length() == 0);
+ DCHECK(evacuation_candidates_.empty());
CollectEvacuationCandidates(heap()->old_space());
@@ -436,7 +458,7 @@ bool MarkCompactCollector::StartCompaction() {
TraceFragmentation(heap()->map_space());
}
- compacting_ = evacuation_candidates_.length() > 0;
+ compacting_ = !evacuation_candidates_.empty();
}
return compacting_;
@@ -547,14 +569,12 @@ void MarkCompactCollector::ClearMarkbits() {
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
}
-class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
+class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
public:
- SweeperTask(Isolate* isolate, Sweeper* sweeper,
- base::Semaphore* pending_sweeper_tasks,
+ SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
base::AtomicNumber<intptr_t>* num_sweeping_tasks,
AllocationSpace space_to_start)
- : CancelableTask(isolate),
- sweeper_(sweeper),
+ : sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
num_sweeping_tasks_(num_sweeping_tasks),
space_to_start_(space_to_start) {}
@@ -562,7 +582,8 @@ class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
virtual ~SweeperTask() {}
private:
- void RunInternal() final {
+ // v8::Task overrides.
+ void Run() override {
DCHECK_GE(space_to_start_, FIRST_SPACE);
DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
const int offset = space_to_start_ - FIRST_SPACE;
@@ -577,9 +598,9 @@ class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
pending_sweeper_tasks_->Signal();
}
- Sweeper* const sweeper_;
- base::Semaphore* const pending_sweeper_tasks_;
- base::AtomicNumber<intptr_t>* const num_sweeping_tasks_;
+ Sweeper* sweeper_;
+ base::Semaphore* pending_sweeper_tasks_;
+ base::AtomicNumber<intptr_t>* num_sweeping_tasks_;
AllocationSpace space_to_start_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
@@ -597,19 +618,15 @@ void MarkCompactCollector::Sweeper::StartSweeping() {
}
void MarkCompactCollector::Sweeper::StartSweeperTasks() {
- DCHECK_EQ(0, num_tasks_);
- DCHECK_EQ(0, num_sweeping_tasks_.Value());
if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) return;
num_sweeping_tasks_.Increment(1);
- SweeperTask* task = new SweeperTask(heap_->isolate(), this,
- &pending_sweeper_tasks_semaphore_,
- &num_sweeping_tasks_, space);
- DCHECK_LT(num_tasks_, kMaxSweeperTasks);
- task_ids_[num_tasks_++] = task->id();
+ semaphore_counter_++;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ new SweeperTask(this, &pending_sweeper_tasks_semaphore_,
+ &num_sweeping_tasks_, space),
+ v8::Platform::kShortRunningTask);
});
}
}
@@ -637,8 +654,10 @@ void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
base::LockGuard<base::Mutex> guard(&mutex_);
SweptList& list = swept_list_[space->identity()];
- if (list.length() > 0) {
- return list.RemoveLast();
+ if (!list.empty()) {
+ auto last_page = list.back();
+ list.pop_back();
+ return last_page;
}
return nullptr;
}
@@ -652,19 +671,15 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
if (FLAG_concurrent_sweeping) {
- for (int i = 0; i < num_tasks_; i++) {
- if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_sweeper_tasks_semaphore_.Wait();
- }
+ while (semaphore_counter_ > 0) {
+ pending_sweeper_tasks_semaphore_.Wait();
+ semaphore_counter_--;
}
- num_tasks_ = 0;
- num_sweeping_tasks_.SetValue(0);
}
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) {
- swept_list_[NEW_SPACE].Clear();
+ swept_list_[NEW_SPACE].clear();
}
DCHECK(sweeping_list_[space].empty());
});
@@ -889,9 +904,9 @@ void MarkCompactCollector::AbortCompaction() {
p->ClearEvacuationCandidate();
}
compacting_ = false;
- evacuation_candidates_.Rewind(0);
+ evacuation_candidates_.clear();
}
- DCHECK_EQ(0, evacuation_candidates_.length());
+ DCHECK(evacuation_candidates_.empty());
}
@@ -916,7 +931,7 @@ void MarkCompactCollector::Prepare() {
// them here.
heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
- heap()->concurrent_marking()->EnsureTaskCompleted();
+ heap()->concurrent_marking()->EnsureCompleted();
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
@@ -928,7 +943,7 @@ void MarkCompactCollector::Prepare() {
AbortTransitionArrays();
AbortCompaction();
heap_->local_embedder_heap_tracer()->AbortTracing();
- marking_deque()->Clear();
+ marking_worklist()->Clear();
was_marked_incrementally_ = false;
}
@@ -966,8 +981,7 @@ void MarkCompactCollector::Finish() {
}
// The hashing of weak_object_to_code_table is no longer valid.
- heap()->weak_object_to_code_table()->Rehash(
- heap()->isolate()->factory()->undefined_value());
+ heap()->weak_object_to_code_table()->Rehash();
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
@@ -1023,418 +1037,81 @@ void MarkCompactCollector::Finish() {
// and continue with marking. This process repeats until all reachable
// objects have been marked.
-void CodeFlusher::ProcessJSFunctionCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
- Code* interpreter_entry_trampoline =
- isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- Object* undefined = isolate_->heap()->undefined_value();
-
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- ClearNextCandidate(candidate, undefined);
-
- SharedFunctionInfo* shared = candidate->shared();
-
- Code* code = shared->code();
- if (ObjectMarking::IsWhite(code, MarkingState::Internal(code))) {
- if (FLAG_trace_code_flushing && shared->is_compiled()) {
- PrintF("[code-flushing clears: ");
- shared->ShortPrint();
- PrintF(" - age: %d]\n", code->GetAge());
- }
- // Always flush the optimized code.
- if (candidate->has_feedback_vector()) {
- candidate->feedback_vector()->ClearOptimizedCode();
- }
- if (shared->HasBytecodeArray()) {
- shared->set_code(interpreter_entry_trampoline);
- candidate->set_code(interpreter_entry_trampoline);
- } else {
- shared->set_code(lazy_compile);
- candidate->set_code(lazy_compile);
- }
- } else {
- DCHECK(ObjectMarking::IsBlack(code, MarkingState::Internal(code)));
- candidate->set_code(code);
- }
-
- // We are in the middle of a GC cycle so the write barrier in the code
- // setter did not record the slot update and we have to do that manually.
- Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
- Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
- isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
- candidate, slot, target);
-
- Object** shared_code_slot =
- HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(
- shared, shared_code_slot, *shared_code_slot);
-
- candidate = next_candidate;
- }
-
- jsfunction_candidates_head_ = NULL;
-}
-
-
-void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
- Code* interpreter_entry_trampoline =
- isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- ClearNextCandidate(candidate);
-
- Code* code = candidate->code();
- if (ObjectMarking::IsWhite(code, MarkingState::Internal(code))) {
- if (FLAG_trace_code_flushing && candidate->is_compiled()) {
- PrintF("[code-flushing clears: ");
- candidate->ShortPrint();
- PrintF(" - age: %d]\n", code->GetAge());
- }
- if (candidate->HasBytecodeArray()) {
- candidate->set_code(interpreter_entry_trampoline);
- } else {
- candidate->set_code(lazy_compile);
- }
- }
-
- Object** code_slot =
- HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
- *code_slot);
-
- candidate = next_candidate;
- }
-
- shared_function_info_candidates_head_ = NULL;
-}
-
-
-void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
-
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing abandons function-info: ");
- shared_info->ShortPrint();
- PrintF("]\n");
- }
-
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- if (candidate == shared_info) {
- next_candidate = GetNextCandidate(shared_info);
- shared_function_info_candidates_head_ = next_candidate;
- ClearNextCandidate(shared_info);
- } else {
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
-
- if (next_candidate == shared_info) {
- next_candidate = GetNextCandidate(shared_info);
- SetNextCandidate(candidate, next_candidate);
- ClearNextCandidate(shared_info);
- break;
- }
-
- candidate = next_candidate;
- }
- }
-}
-
-
-void CodeFlusher::EvictCandidate(JSFunction* function) {
- DCHECK(!function->next_function_link()->IsUndefined(isolate_));
- Object* undefined = isolate_->heap()->undefined_value();
-
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->IterateBlackObject(function);
- isolate_->heap()->incremental_marking()->IterateBlackObject(
- function->shared());
-
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing abandons closure: ");
- function->shared()->ShortPrint();
- PrintF("]\n");
- }
-
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- if (candidate == function) {
- next_candidate = GetNextCandidate(function);
- jsfunction_candidates_head_ = next_candidate;
- ClearNextCandidate(function, undefined);
- } else {
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
-
- if (next_candidate == function) {
- next_candidate = GetNextCandidate(function);
- SetNextCandidate(candidate, next_candidate);
- ClearNextCandidate(function, undefined);
- break;
- }
-
- candidate = next_candidate;
- }
- }
-}
-
-class MarkCompactMarkingVisitor
- : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
+class MarkCompactMarkingVisitor final
+ : public MarkingVisitor<MarkCompactMarkingVisitor> {
public:
- static void Initialize();
+ explicit MarkCompactMarkingVisitor(MarkCompactCollector* collector)
+ : MarkingVisitor<MarkCompactMarkingVisitor>(collector->heap(),
+ collector) {}
- INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
- MarkObjectByPointer(heap->mark_compact_collector(), object, p);
+ V8_INLINE void VisitPointer(HeapObject* host, Object** p) final {
+ MarkObjectByPointer(host, p);
}
- INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
- Object** start, Object** end)) {
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
// Mark all objects pointed to in [start, end).
const int kMinRangeForMarkingRecursion = 64;
if (end - start >= kMinRangeForMarkingRecursion) {
- if (VisitUnmarkedObjects(heap, object, start, end)) return;
+ if (VisitUnmarkedObjects(host, start, end)) return;
// We are close to a stack overflow, so just mark the objects.
}
- MarkCompactCollector* collector = heap->mark_compact_collector();
for (Object** p = start; p < end; p++) {
- MarkObjectByPointer(collector, object, p);
+ MarkObjectByPointer(host, p);
}
}
// Marks the object black and pushes it on the marking stack.
- INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
- heap->mark_compact_collector()->MarkObject(object);
+ V8_INLINE void MarkObject(HeapObject* object) {
+ collector_->MarkObject(object);
}
- // Marks the object black without pushing it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
+ // Marks the object black without pushing it on the marking stack. Returns
+ // true if object needed marking and false otherwise.
+ V8_INLINE bool MarkObjectWithoutPush(HeapObject* object) {
return ObjectMarking::WhiteToBlack(object, MarkingState::Internal(object));
}
- // Mark object pointed to by p.
- INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
- HeapObject* object, Object** p)) {
+ V8_INLINE void MarkObjectByPointer(HeapObject* host, Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* target_object = HeapObject::cast(*p);
- collector->RecordSlot(object, p, target_object);
- collector->MarkObject(target_object);
- }
-
-
- // Visit an unmarked object.
- INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
- HeapObject* obj)) {
-#ifdef DEBUG
- DCHECK(collector->heap()->Contains(obj));
-#endif
- if (ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj))) {
- Map* map = obj->map();
- Heap* heap = obj->GetHeap();
- ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj));
- // Mark the map pointer and the body.
- heap->mark_compact_collector()->MarkObject(map);
- IterateBody(map, obj);
- }
+ collector_->RecordSlot(host, p, target_object);
+ collector_->MarkObject(target_object);
}
+ protected:
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
- Object** start, Object** end)) {
+ inline bool VisitUnmarkedObjects(HeapObject* host, Object** start,
+ Object** end) {
// Return false is we are close to the stack limit.
- StackLimitCheck check(heap->isolate());
+ StackLimitCheck check(heap_->isolate());
if (check.HasOverflowed()) return false;
- MarkCompactCollector* collector = heap->mark_compact_collector();
// Visit the unmarked objects.
for (Object** p = start; p < end; p++) {
Object* o = *p;
if (!o->IsHeapObject()) continue;
- collector->RecordSlot(object, p, o);
+ collector_->RecordSlot(host, p, o);
HeapObject* obj = HeapObject::cast(o);
- VisitUnmarkedObject(collector, obj);
+ VisitUnmarkedObject(obj);
}
return true;
}
- private:
- // Code flushing support.
-
- static const int kRegExpCodeThreshold = 5;
-
- static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
- bool is_one_byte) {
- // Make sure that the fixed array is in fact initialized on the RegExp.
- // We could potentially trigger a GC when initializing the RegExp.
- if (HeapObject::cast(re->data())->map()->instance_type() !=
- FIXED_ARRAY_TYPE)
- return;
-
- // Make sure this is a RegExp that actually contains code.
- if (re->TypeTag() != JSRegExp::IRREGEXP) return;
-
- Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
- if (!code->IsSmi() &&
- HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
- // Save a copy that can be reinstated if we need the code again.
- re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
-
- // Saving a copy might create a pointer into compaction candidate
- // that was not observed by marker. This might happen if JSRegExp data
- // was marked through the compilation cache before marker reached JSRegExp
- // object.
- FixedArray* data = FixedArray::cast(re->data());
- if (ObjectMarking::IsBlackOrGrey(data, MarkingState::Internal(data))) {
- Object** slot =
- data->data_start() + JSRegExp::saved_code_index(is_one_byte);
- heap->mark_compact_collector()->RecordSlot(data, slot, code);
- }
-
- // Set a number in the 0-255 range to guarantee no smi overflow.
- re->SetDataAt(JSRegExp::code_index(is_one_byte),
- Smi::FromInt(heap->ms_count() & 0xff));
- } else if (code->IsSmi()) {
- int value = Smi::cast(code)->value();
- // The regexp has not been compiled yet or there was a compilation error.
- if (value == JSRegExp::kUninitializedValue ||
- value == JSRegExp::kCompilationErrorValue) {
- return;
- }
-
- // Check if we should flush now.
- if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
- re->SetDataAt(JSRegExp::code_index(is_one_byte),
- Smi::FromInt(JSRegExp::kUninitializedValue));
- re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
- Smi::FromInt(JSRegExp::kUninitializedValue));
- }
- }
- }
-
-
- // Works by setting the current sweep_generation (as a smi) in the
- // code object place in the data array of the RegExp and keeps a copy
- // around that can be reinstated if we reuse the RegExp before flushing.
- // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
- // we flush the code.
- static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (!collector->is_code_flushing_enabled()) {
- JSObjectVisitor::Visit(map, object);
- return;
- }
- JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
- // Flush code or set age on both one byte and two byte code.
- UpdateRegExpCodeAgeAndFlush(heap, re, true);
- UpdateRegExpCodeAgeAndFlush(heap, re, false);
- // Visit the fields of the RegExp, including the updated FixedArray.
- JSObjectVisitor::Visit(map, object);
- }
-};
-
-
-void MarkCompactMarkingVisitor::Initialize() {
- StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
-
- table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
-}
-
-
-class CodeMarkingVisitor : public ThreadVisitor {
- public:
- explicit CodeMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- collector_->PrepareThreadForCodeFlushing(isolate, top);
- }
-
- private:
- MarkCompactCollector* collector_;
-};
-
-class SharedFunctionInfoMarkingVisitor : public ObjectVisitor,
- public RootVisitor {
- public:
- explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
-
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) MarkObject(p);
- }
-
- void VisitPointer(HeapObject* host, Object** slot) override {
- MarkObject(slot);
- }
-
- void VisitRootPointers(Root root, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) MarkObject(p);
- }
-
- void VisitRootPointer(Root root, Object** slot) override { MarkObject(slot); }
-
- private:
- void MarkObject(Object** slot) {
- Object* obj = *slot;
- if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- collector_->MarkObject(shared->code());
- collector_->MarkObject(shared);
+ // Visit an unmarked object.
+ V8_INLINE void VisitUnmarkedObject(HeapObject* obj) {
+ DCHECK(heap_->Contains(obj));
+ if (ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj))) {
+ Map* map = obj->map();
+ ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj));
+ // Mark the map pointer and the body.
+ collector_->MarkObject(map);
+ Visit(map, obj);
}
}
- MarkCompactCollector* collector_;
};
-
-void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
- ThreadLocalTop* top) {
- for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- // Note: for the frame that has a pending lazy deoptimization
- // StackFrame::unchecked_code will return a non-optimized code object for
- // the outermost function and StackFrame::LookupCode will return
- // actual optimized code object.
- StackFrame* frame = it.frame();
- Code* code = frame->unchecked_code();
- MarkObject(code);
- if (frame->is_optimized()) {
- Code* optimized_code = frame->LookupCode();
- MarkObject(optimized_code);
- }
- }
-}
-
-
-void MarkCompactCollector::PrepareForCodeFlushing() {
- // If code flushing is disabled, there is no need to prepare for it.
- if (!is_code_flushing_enabled()) return;
-
- // Make sure we are not referencing the code from the stack.
- DCHECK(this == heap()->mark_compact_collector());
- PrepareThreadForCodeFlushing(heap()->isolate(),
- heap()->isolate()->thread_local_top());
-
- // Iterate the archived stacks in all threads to check if
- // the code is referenced.
- CodeMarkingVisitor code_marking_visitor(this);
- heap()->isolate()->thread_manager()->IterateArchivedThreads(
- &code_marking_visitor);
-
- SharedFunctionInfoMarkingVisitor visitor(this);
- heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
- heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
-
- ProcessMarkingDeque();
-}
-
void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
for (Page* p : sweep_to_iterate_pages_) {
if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
@@ -1452,7 +1129,7 @@ class MarkCompactCollector::RootMarkingVisitor : public ObjectVisitor,
public RootVisitor {
public:
explicit RootMarkingVisitor(Heap* heap)
- : collector_(heap->mark_compact_collector()) {}
+ : collector_(heap->mark_compact_collector()), visitor_(collector_) {}
void VisitPointer(HeapObject* host, Object** p) override {
MarkObjectByPointer(p);
@@ -1480,19 +1157,20 @@ class MarkCompactCollector::RootMarkingVisitor : public ObjectVisitor,
HeapObject* object = HeapObject::cast(*p);
- if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
+ if (ObjectMarking::WhiteToBlack<AccessMode::NON_ATOMIC>(
object, MarkingState::Internal(object))) {
Map* map = object->map();
// Mark the map pointer and body, and push them on the marking stack.
collector_->MarkObject(map);
- MarkCompactMarkingVisitor::IterateBody(map, object);
+ visitor_.Visit(map, object);
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
- collector_->EmptyMarkingDeque();
+ collector_->EmptyMarkingWorklist();
}
}
MarkCompactCollector* collector_;
+ MarkCompactMarkingVisitor visitor_;
};
class InternalizedStringTableCleaner : public ObjectVisitor {
@@ -1609,10 +1287,11 @@ class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
HeapObject* heap_object = HeapObject::cast(object);
if (!collector_.heap()->InNewSpace(heap_object)) return object;
- DCHECK(!ObjectMarking::IsGrey(heap_object,
- collector_.marking_state(heap_object)));
- if (ObjectMarking::IsBlack(heap_object,
- collector_.marking_state(heap_object))) {
+ // Young generation marking only marks to grey instead of black.
+ DCHECK(!ObjectMarking::IsBlack(heap_object,
+ collector_.marking_state(heap_object)));
+ if (ObjectMarking::IsGrey(heap_object,
+ collector_.marking_state(heap_object))) {
return object;
}
return nullptr;
@@ -1655,29 +1334,28 @@ template <class T>
void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
// The caller should ensure that the marking stack is initially not full,
// so that we don't waste effort pointlessly scanning for objects.
- DCHECK(!marking_deque()->IsFull());
+ DCHECK(!marking_worklist()->IsFull());
Map* filler_map = heap()->one_pointer_filler_map();
for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
if ((object->map() != filler_map) &&
ObjectMarking::GreyToBlack(object, MarkingState::Internal(object))) {
PushBlack(object);
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
}
}
}
void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
- DCHECK(!marking_deque()->IsFull());
- LiveObjectIterator<kGreyObjects> it(p, MarkingState::Internal(p));
- HeapObject* object = NULL;
- while ((object = it.Next()) != NULL) {
- bool success =
- ObjectMarking::GreyToBlack(object, MarkingState::Internal(object));
+ DCHECK(!marking_worklist()->IsFull());
+ for (auto object_and_size :
+ LiveObjectRange<kGreyObjects>(p, marking_state(p))) {
+ HeapObject* const object = object_and_size.first;
+ bool success = ObjectMarking::GreyToBlack(object, marking_state(object));
DCHECK(success);
USE(success);
PushBlack(object);
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
}
}
@@ -1767,9 +1445,13 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
if (value->IsHeapObject()) {
Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
if (p->InNewSpace()) {
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ DCHECK_IMPLIES(p->InToSpace(),
+ p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
} else if (p->IsEvacuationCandidate()) {
- RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
}
}
}
@@ -1815,9 +1497,9 @@ class YoungGenerationMigrationObserver final : public MigrationObserver {
// Migrate color to old generation marking in case the object survived young
// generation garbage collection.
if (heap_->incremental_marking()->IsMarking()) {
- DCHECK(ObjectMarking::IsWhite(
+ DCHECK(ObjectMarking::IsWhite<AccessMode::ATOMIC>(
dst, mark_compact_collector_->marking_state(dst)));
- heap_->incremental_marking()->TransferColor<MarkBit::ATOMIC>(src, dst);
+ heap_->incremental_marking()->TransferColor<AccessMode::ATOMIC>(src, dst);
}
}
@@ -1865,9 +1547,13 @@ class YoungGenerationRecordMigratedSlotVisitor final
if (value->IsHeapObject()) {
Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
if (p->InNewSpace()) {
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ DCHECK_IMPLIES(p->InToSpace(),
+ p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
} else if (p->IsEvacuationCandidate() && IsLive(host)) {
- RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
}
}
}
@@ -1876,7 +1562,7 @@ class YoungGenerationRecordMigratedSlotVisitor final
class HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() {}
- virtual bool Visit(HeapObject* object) = 0;
+ virtual bool Visit(HeapObject* object, int size) = 0;
};
class EvacuateVisitorBase : public HeapObjectVisitor {
@@ -1924,8 +1610,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
- base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
- reinterpret_cast<base::AtomicWord>(dst_addr));
+ base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
+ reinterpret_cast<base::AtomicWord>(dst_addr));
}
EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
@@ -1937,11 +1623,10 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
}
inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
- HeapObject** target_object) {
+ int size, HeapObject** target_object) {
#ifdef VERIFY_HEAP
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
- int size = object->Size();
AllocationAlignment alignment = object->RequiredAlignment();
AllocationResult allocation = target_space->AllocateRaw(size, alignment);
if (allocation.To(target_object)) {
@@ -2006,19 +1691,18 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
semispace_copied_size_(0),
local_pretenuring_feedback_(local_pretenuring_feedback) {}
- inline bool Visit(HeapObject* object) override {
- heap_->UpdateAllocationSite<Heap::kCached>(object,
- local_pretenuring_feedback_);
- int size = object->Size();
+ inline bool Visit(HeapObject* object, int size) override {
HeapObject* target_object = nullptr;
- if (heap_->ShouldBePromoted(object->address(), size) &&
- TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
+ if (heap_->ShouldBePromoted(object->address()) &&
+ TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, size,
&target_object)) {
promoted_size_ += size;
return true;
}
+ heap_->UpdateAllocationSite<Heap::kCached>(object->map(), object,
+ local_pretenuring_feedback_);
HeapObject* target = nullptr;
- AllocationSpace space = AllocateTargetObject(object, &target);
+ AllocationSpace space = AllocateTargetObject(object, size, &target);
MigrateObject(HeapObject::cast(target), object, size, space);
semispace_copied_size_ += size;
return true;
@@ -2034,9 +1718,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
kStickyBailoutOldSpace,
};
- inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
+ inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
HeapObject** target_object) {
- const int size = old_object->Size();
AllocationAlignment alignment = old_object->RequiredAlignment();
AllocationResult allocation;
AllocationSpace space_allocated_in = space_to_allocate_;
@@ -2153,16 +1836,18 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
case NEW_TO_OLD: {
page->Unlink();
Page* new_page = Page::ConvertNewToOld(page);
+ DCHECK(!new_page->InNewSpace());
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
break;
}
}
}
- inline bool Visit(HeapObject* object) {
- heap_->UpdateAllocationSite<Heap::kCached>(object,
- local_pretenuring_feedback_);
- if (mode == NEW_TO_OLD) {
+ inline bool Visit(HeapObject* object, int size) {
+ if (mode == NEW_TO_NEW) {
+ heap_->UpdateAllocationSite<Heap::kCached>(object->map(), object,
+ local_pretenuring_feedback_);
+ } else if (mode == NEW_TO_OLD) {
object->IterateBodyFast(record_visitor_);
}
return true;
@@ -2185,11 +1870,11 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
RecordMigratedSlotVisitor* record_visitor)
: EvacuateVisitorBase(heap, compaction_spaces, record_visitor) {}
- inline bool Visit(HeapObject* object) override {
+ inline bool Visit(HeapObject* object, int size) override {
CompactionSpace* target_space = compaction_spaces_->Get(
Page::FromAddress(object->address())->owner()->identity());
HeapObject* target_object = nullptr;
- if (TryEvacuateObject(target_space, object, &target_object)) {
+ if (TryEvacuateObject(target_space, object, size, &target_object)) {
DCHECK(object->map_word().IsForwardingAddress());
return true;
}
@@ -2201,7 +1886,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
- inline bool Visit(HeapObject* object) {
+ inline bool Visit(HeapObject* object, int size) {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
object->IterateBody(&visitor);
return true;
@@ -2214,7 +1899,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
for (Page* p : *space) {
DiscoverGreyObjectsOnPage(p);
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
}
}
@@ -2223,7 +1908,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
NewSpace* space = heap()->new_space();
for (Page* page : PageRange(space->bottom(), space->top())) {
DiscoverGreyObjectsOnPage(page);
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
}
}
@@ -2242,7 +1927,7 @@ void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
MarkingState::Internal(string_table))) {
// Explicitly mark the prefix.
string_table->IteratePrefix(visitor);
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
}
}
@@ -2255,9 +1940,9 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
MarkStringTable(visitor);
// There may be overflowed objects in the heap. Visit them now.
- while (marking_deque()->overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+ while (marking_worklist()->overflowed()) {
+ RefillMarkingWorklist();
+ EmptyMarkingWorklist();
}
}
@@ -2265,20 +1950,21 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingDeque() {
- while (!marking_deque()->IsEmpty()) {
- HeapObject* object = marking_deque()->Pop();
-
+void MarkCompactCollector::EmptyMarkingWorklist() {
+ HeapObject* object;
+ MarkCompactMarkingVisitor visitor(this);
+ while ((object = marking_worklist()->Pop()) != nullptr) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
- DCHECK(!(ObjectMarking::IsWhite<MarkBit::NON_ATOMIC>(
+ DCHECK(!(ObjectMarking::IsWhite<AccessMode::NON_ATOMIC>(
object, MarkingState::Internal(object))));
Map* map = object->map();
MarkObject(map);
- MarkCompactMarkingVisitor::IterateBody(map, object);
+ visitor.Visit(map, object);
}
+ DCHECK(marking_worklist()->IsEmpty());
}
@@ -2287,44 +1973,44 @@ void MarkCompactCollector::EmptyMarkingDeque() {
// before sweeping completes. If sweeping completes, there are no remaining
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
-void MarkCompactCollector::RefillMarkingDeque() {
+void MarkCompactCollector::RefillMarkingWorklist() {
isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
- DCHECK(marking_deque()->overflowed());
+ DCHECK(marking_worklist()->overflowed());
DiscoverGreyObjectsInNewSpace();
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
DiscoverGreyObjectsInSpace(heap()->old_space());
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
DiscoverGreyObjectsInSpace(heap()->code_space());
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
DiscoverGreyObjectsInSpace(heap()->map_space());
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
LargeObjectIterator lo_it(heap()->lo_space());
DiscoverGreyObjectsWithIterator(&lo_it);
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
- marking_deque()->ClearOverflowed();
+ marking_worklist()->ClearOverflowed();
}
// Mark all objects reachable (transitively) from objects on the marking
// stack. Before: the marking stack contains zero or more heap object
// pointers. After: the marking stack is empty and there are no overflowed
// objects in the heap.
-void MarkCompactCollector::ProcessMarkingDeque() {
- EmptyMarkingDeque();
- while (marking_deque()->overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+void MarkCompactCollector::ProcessMarkingWorklist() {
+ EmptyMarkingWorklist();
+ while (marking_worklist()->overflowed()) {
+ RefillMarkingWorklist();
+ EmptyMarkingWorklist();
}
- DCHECK(marking_deque()->IsEmpty());
+ DCHECK(marking_worklist()->IsEmpty());
}
// Mark all objects reachable (transitively) from objects on the marking
// stack including references only considered in the atomic marking pause.
void MarkCompactCollector::ProcessEphemeralMarking(
bool only_process_harmony_weak_collections) {
- DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
+ DCHECK(marking_worklist()->IsEmpty() && !marking_worklist()->overflowed());
bool work_to_do = true;
while (work_to_do) {
if (!only_process_harmony_weak_collections) {
@@ -2344,10 +2030,10 @@ void MarkCompactCollector::ProcessEphemeralMarking(
heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
}
ProcessWeakCollections();
- work_to_do = !marking_deque()->IsEmpty();
- ProcessMarkingDeque();
+ work_to_do = !marking_worklist()->IsEmpty();
+ ProcessMarkingWorklist();
}
- CHECK(marking_deque()->IsEmpty());
+ CHECK(marking_worklist()->IsEmpty());
CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
}
@@ -2363,7 +2049,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(
if (!code->CanDeoptAt(it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code, visitor);
}
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
return;
}
}
@@ -2380,7 +2066,7 @@ class ObjectStatsVisitor : public HeapObjectVisitor {
live_collector_.CollectGlobalStatistics();
}
- bool Visit(HeapObject* obj) override {
+ bool Visit(HeapObject* obj, int size) override {
if (ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))) {
live_collector_.CollectStatistics(obj);
} else {
@@ -2402,7 +2088,7 @@ void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
ObjectIterator* obj_it = it.get();
while ((obj = obj_it->Next()) != nullptr) {
- visitor->Visit(obj);
+ visitor->Visit(obj, obj->Size());
}
}
}
@@ -2433,65 +2119,28 @@ void MarkCompactCollector::RecordObjectStats() {
}
class YoungGenerationMarkingVisitor final
- : public HeapVisitor<void, YoungGenerationMarkingVisitor> {
+ : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
public:
- using BaseClass = HeapVisitor<int, YoungGenerationMarkingVisitor>;
+ YoungGenerationMarkingVisitor(
+ Heap* heap, MinorMarkCompactCollector::MarkingWorklist* global_worklist,
+ int task_id)
+ : heap_(heap), worklist_(global_worklist, task_id) {}
- YoungGenerationMarkingVisitor(Heap* heap,
- WorkStealingMarkingDeque* global_marking_deque,
- int task_id)
- : heap_(heap), marking_deque_(global_marking_deque, task_id) {}
-
- void VisitPointers(HeapObject* host, Object** start, Object** end) final {
- const int kMinRangeForMarkingRecursion = 64;
- if (end - start >= kMinRangeForMarkingRecursion) {
- if (MarkRecursively(host, start, end)) return;
- }
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
for (Object** p = start; p < end; p++) {
VisitPointer(host, p);
}
}
- void VisitPointer(HeapObject* host, Object** slot) final {
+ V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
Object* target = *slot;
if (heap_->InNewSpace(target)) {
HeapObject* target_object = HeapObject::cast(target);
- MarkObjectViaMarkingDeque(target_object);
+ MarkObjectViaMarkingWorklist(target_object);
}
}
- // Special cases for young generation. Also see StaticNewSpaceVisitor.
-
- void VisitJSFunction(Map* map, JSFunction* object) final {
- if (!ShouldVisit(object)) return;
- int size = JSFunction::BodyDescriptorWeakCode::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- JSFunction::BodyDescriptorWeakCode::IterateBody(object, size, this);
- return;
- }
-
- void VisitNativeContext(Map* map, Context* object) final {
- if (!ShouldVisit(object)) return;
- int size = Context::ScavengeBodyDescriptor::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- Context::ScavengeBodyDescriptor::IterateBody(object, size, this);
- return;
- }
-
- void VisitJSApiObject(Map* map, JSObject* object) final {
- return VisitJSObject(map, object);
- }
-
- void VisitBytecodeArray(Map* map, BytecodeArray* object) final {
- UNREACHABLE();
- return;
- }
-
- void VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) final {
- UNREACHABLE();
- return;
- }
-
private:
inline MarkingState marking_state(HeapObject* object) {
SLOW_DCHECK(
@@ -2500,32 +2149,16 @@ class YoungGenerationMarkingVisitor final
return MarkingState::External(object);
}
- inline void MarkObjectViaMarkingDeque(HeapObject* object) {
- if (ObjectMarking::WhiteToBlack<MarkBit::ATOMIC>(object,
- marking_state(object))) {
+ inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
+ if (ObjectMarking::WhiteToGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
// Marking deque overflow is unsupported for the young generation.
- CHECK(marking_deque_.Push(object));
+ CHECK(worklist_.Push(object));
}
}
- inline bool MarkRecursively(HeapObject* host, Object** start, Object** end) {
- // TODO(mlippautz): Stack check on background tasks. We cannot do a reliable
- // stack check on background tasks yet.
- for (Object** p = start; p < end; p++) {
- Object* target = *p;
- if (heap_->InNewSpace(target)) {
- HeapObject* target_object = HeapObject::cast(target);
- if (ObjectMarking::WhiteToBlack<MarkBit::ATOMIC>(
- target_object, marking_state(target_object))) {
- Visit(target_object);
- }
- }
- }
- return true;
- }
-
Heap* heap_;
- LocalWorkStealingMarkingDeque marking_deque_;
+ MinorMarkCompactCollector::MarkingWorklist::View worklist_;
};
class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
@@ -2555,10 +2188,10 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
if (!collector_->heap()->InNewSpace(object)) return;
- if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
+ if (ObjectMarking::WhiteToGrey<AccessMode::NON_ATOMIC>(
object, marking_state(object))) {
- collector_->marking_visitor(kMainMarker)->Visit(object);
- collector_->EmptyMarkingDeque();
+ collector_->main_marking_visitor()->Visit(object);
+ collector_->EmptyMarkingWorklist();
}
}
@@ -2566,6 +2199,7 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
};
class MarkingItem;
+class GlobalHandlesMarkingItem;
class PageMarkingItem;
class RootMarkingItem;
class YoungGenerationMarkingTask;
@@ -2578,15 +2212,16 @@ class MarkingItem : public ItemParallelJob::Item {
class YoungGenerationMarkingTask : public ItemParallelJob::Task {
public:
- YoungGenerationMarkingTask(Isolate* isolate,
- MinorMarkCompactCollector* collector,
- WorkStealingMarkingDeque* marking_deque,
- YoungGenerationMarkingVisitor* visitor,
- int task_id)
+ YoungGenerationMarkingTask(
+ Isolate* isolate, MinorMarkCompactCollector* collector,
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
: ItemParallelJob::Task(isolate),
collector_(collector),
- marking_deque_(marking_deque, task_id),
- visitor_(visitor) {}
+ marking_worklist_(global_worklist, task_id),
+ visitor_(isolate->heap(), global_worklist, task_id) {
+ local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
+ Page::kPageSize);
+ }
void RunInParallel() override {
double marking_time = 0.0;
@@ -2596,10 +2231,11 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
while ((item = GetItem<MarkingItem>()) != nullptr) {
item->Process(this);
item->MarkFinished();
- EmptyLocalMarkingDeque();
+ EmptyLocalMarkingWorklist();
}
- EmptyMarkingDeque();
- DCHECK(marking_deque_.IsEmpty());
+ EmptyMarkingWorklist();
+ DCHECK(marking_worklist_.IsLocalEmpty());
+ FlushLiveBytes();
}
if (FLAG_trace_minor_mc_parallel_marking) {
PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
@@ -2610,32 +2246,50 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
void MarkObject(Object* object) {
if (!collector_->heap()->InNewSpace(object)) return;
HeapObject* heap_object = HeapObject::cast(object);
- if (ObjectMarking::WhiteToBlack<MarkBit::ATOMIC>(
+ if (ObjectMarking::WhiteToGrey<AccessMode::ATOMIC>(
heap_object, collector_->marking_state(heap_object))) {
- visitor_->Visit(heap_object);
+ const int size = visitor_.Visit(heap_object);
+ IncrementLiveBytes(heap_object, size);
}
}
private:
- void EmptyLocalMarkingDeque() {
+ MarkingState marking_state(HeapObject* object) {
+ return MarkingState::External(object);
+ }
+
+ void EmptyLocalMarkingWorklist() {
HeapObject* object = nullptr;
- while (marking_deque_.Pop(&object)) {
- visitor_->Visit(object);
+ while (marking_worklist_.Pop(&object)) {
+ const int size = visitor_.Visit(object);
+ IncrementLiveBytes(object, size);
}
}
- void EmptyMarkingDeque() {
+ void EmptyMarkingWorklist() {
HeapObject* object = nullptr;
- while (marking_deque_.WaitForMoreObjects()) {
- while (marking_deque_.Pop(&object)) {
- visitor_->Visit(object);
- }
+ while (marking_worklist_.Pop(&object)) {
+ const int size = visitor_.Visit(object);
+ IncrementLiveBytes(object, size);
+ }
+ }
+
+ void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
+ local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
+ bytes;
+ }
+
+ void FlushLiveBytes() {
+ for (auto pair : local_live_bytes_) {
+ collector_->marking_state(pair.first)
+ .IncrementLiveBytes<AccessMode::ATOMIC>(pair.second);
}
}
MinorMarkCompactCollector* collector_;
- LocalWorkStealingMarkingDeque marking_deque_;
- YoungGenerationMarkingVisitor* visitor_;
+ MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
+ YoungGenerationMarkingVisitor visitor_;
+ std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
};
class BatchedRootMarkingItem : public MarkingItem {
@@ -2656,8 +2310,10 @@ class BatchedRootMarkingItem : public MarkingItem {
class PageMarkingItem : public MarkingItem {
public:
- explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
- virtual ~PageMarkingItem() {}
+ explicit PageMarkingItem(MemoryChunk* chunk,
+ base::AtomicNumber<intptr_t>* global_slots)
+ : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
+ virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
void Process(YoungGenerationMarkingTask* task) override {
base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
@@ -2669,9 +2325,10 @@ class PageMarkingItem : public MarkingItem {
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
- RememberedSet<OLD_TO_NEW>::Iterate(chunk_, [this, task](Address slot) {
- return CheckAndMarkObject(task, slot);
- });
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
@@ -2696,12 +2353,56 @@ class PageMarkingItem : public MarkingItem {
DCHECK(heap()->InToSpace(object));
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
task->MarkObject(heap_object);
+ slots_++;
return KEEP_SLOT;
}
return REMOVE_SLOT;
}
MemoryChunk* chunk_;
+ base::AtomicNumber<intptr_t>* global_slots_;
+ intptr_t slots_;
+};
+
+class GlobalHandlesMarkingItem : public MarkingItem {
+ public:
+ GlobalHandlesMarkingItem(GlobalHandles* global_handles, size_t start,
+ size_t end)
+ : global_handles_(global_handles), start_(start), end_(end) {}
+ virtual ~GlobalHandlesMarkingItem() {}
+
+ void Process(YoungGenerationMarkingTask* task) override {
+ GlobalHandlesRootMarkingVisitor visitor(task);
+ global_handles_
+ ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
+ &visitor, start_, end_);
+ }
+
+ private:
+ class GlobalHandlesRootMarkingVisitor : public RootVisitor {
+ public:
+ explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
+ : task_(task) {}
+
+ void VisitRootPointer(Root root, Object** p) override {
+ DCHECK(Root::kGlobalHandles == root);
+ task_->MarkObject(*p);
+ }
+
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ DCHECK(Root::kGlobalHandles == root);
+ for (Object** p = start; p < end; p++) {
+ task_->MarkObject(*p);
+ }
+ }
+
+ private:
+ YoungGenerationMarkingTask* task_;
+ };
+
+ GlobalHandles* global_handles_;
+ size_t start_;
+ size_t end_;
};
// This root visitor walks all roots and creates items bundling objects that
@@ -2738,7 +2439,7 @@ class MinorMarkCompactCollector::RootMarkingVisitorSeedOnly
// Bundling several objects together in items avoids issues with allocating
// and deallocating items; both are operations that are performed on the main
// thread.
- static const int kBufferSize = 32;
+ static const int kBufferSize = 128;
void AddObject(Object* object) {
buffered_objects_.push_back(object);
@@ -2750,73 +2451,81 @@ class MinorMarkCompactCollector::RootMarkingVisitorSeedOnly
};
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
- : MarkCompactCollectorBase(heap), page_parallel_job_semaphore_(0) {
- marking_deque_ = new WorkStealingMarkingDeque();
- for (int i = 0; i < kNumMarkers; i++) {
- marking_visitor_[i] =
- new YoungGenerationMarkingVisitor(heap, marking_deque_, i);
- }
+ : MarkCompactCollectorBase(heap),
+ worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
+ main_marking_visitor_(
+ new YoungGenerationMarkingVisitor(heap, worklist_, kMainMarker)),
+ page_parallel_job_semaphore_(0) {
+ static_assert(
+ kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
+ "more marker tasks than marking deque can handle");
}
MinorMarkCompactCollector::~MinorMarkCompactCollector() {
- for (int i = 0; i < kNumMarkers; i++) {
- DCHECK_NOT_NULL(marking_visitor_[i]);
- delete marking_visitor_[i];
- }
- delete marking_deque_;
-}
-
-SlotCallbackResult MinorMarkCompactCollector::CheckAndMarkObject(
- Heap* heap, Address slot_address) {
- Object* object = *reinterpret_cast<Object**>(slot_address);
- if (heap->InNewSpace(object)) {
- // Marking happens before flipping the young generation, so the object
- // has to be in ToSpace.
- DCHECK(heap->InToSpace(object));
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- const MarkingState state = MarkingState::External(heap_object);
- if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(heap_object, state)) {
- heap->minor_mark_compact_collector()
- ->marking_visitor(kMainMarker)
- ->Visit(heap_object);
- }
- return KEEP_SLOT;
- }
- return REMOVE_SLOT;
+ delete worklist_;
+ delete main_marking_visitor_;
}
static bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
return heap->InNewSpace(*p) &&
- !ObjectMarking::IsBlack(HeapObject::cast(*p),
- MarkingState::External(HeapObject::cast(*p)));
+ !ObjectMarking::IsGrey(HeapObject::cast(*p),
+ MarkingState::External(HeapObject::cast(*p)));
}
-void MinorMarkCompactCollector::MarkRootSetInParallel() {
- // Seed the root set (roots + old->new set).
- ItemParallelJob job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
- RootMarkingVisitorSeedOnly root_seed_visitor(&job);
- heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_SCAVENGE);
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap(), [&job](MemoryChunk* chunk) {
- job.AddItem(new PageMarkingItem(chunk));
- });
- root_seed_visitor.FlushObjects();
+template <class ParallelItem>
+static void SeedGlobalHandles(GlobalHandles* global_handles,
+ ItemParallelJob* job) {
+ // Create batches of global handles.
+ const size_t kGlobalHandlesBufferSize = 1000;
+ const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
+ for (size_t start = 0; start < new_space_nodes;
+ start += kGlobalHandlesBufferSize) {
+ size_t end = start + kGlobalHandlesBufferSize;
+ if (end > new_space_nodes) end = new_space_nodes;
+ job->AddItem(new ParallelItem(global_handles, start, end));
}
+}
+void MinorMarkCompactCollector::MarkRootSetInParallel() {
+ base::AtomicNumber<intptr_t> slots;
{
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
- const int num_tasks = NumberOfMarkingTasks();
- for (int i = 0; i < num_tasks; i++) {
- job.AddTask(new YoungGenerationMarkingTask(
- isolate(), this, marking_deque(), marking_visitor(i), i));
+ ItemParallelJob job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ // Seed the root set (roots + old->new set).
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
+ // Create batches of roots.
+ RootMarkingVisitorSeedOnly root_seed_visitor(&job);
+ heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
+ // Create batches of global handles.
+ SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
+ &job);
+ // Create items for each page.
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [&job, &slots](MemoryChunk* chunk) {
+ job.AddItem(new PageMarkingItem(chunk, &slots));
+ });
+ // Flush any remaining objects in the seeding visitor.
+ root_seed_visitor.FlushObjects();
+ }
+
+ // Add tasks and run in parallel.
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
+ const int new_space_pages =
+ static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
+ const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
+ for (int i = 0; i < num_tasks; i++) {
+ job.AddTask(
+ new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
+ }
+ job.Run();
+ DCHECK(worklist()->IsGlobalEmpty());
}
- job.Run();
}
+ old_to_new_slots_ = static_cast<int>(slots.Value());
}
void MinorMarkCompactCollector::MarkLiveObjects() {
@@ -2826,20 +2535,13 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
RootMarkingVisitor root_visitor(this);
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_MARK_IDENTIFY_GLOBAL_HANDLES);
- isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &Heap::IsUnmodifiedHeapObject);
- }
-
MarkRootSetInParallel();
// Mark rest on the main thread.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
heap()->IterateEncounteredWeakCollections(&root_visitor);
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
}
{
@@ -2848,29 +2550,28 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
&IsUnmarkedObjectForYoungGeneration);
isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
&root_visitor);
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
}
}
-void MinorMarkCompactCollector::ProcessMarkingDeque() {
- EmptyMarkingDeque();
+void MinorMarkCompactCollector::ProcessMarkingWorklist() {
+ EmptyMarkingWorklist();
}
-void MinorMarkCompactCollector::EmptyMarkingDeque() {
- LocalWorkStealingMarkingDeque local_marking_deque(marking_deque(),
- kMainMarker);
+void MinorMarkCompactCollector::EmptyMarkingWorklist() {
+ MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
HeapObject* object = nullptr;
- while (local_marking_deque.Pop(&object)) {
+ while (marking_worklist.Pop(&object)) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
- DCHECK(!(ObjectMarking::IsWhite<MarkBit::NON_ATOMIC>(
+ DCHECK(!(ObjectMarking::IsWhite<AccessMode::NON_ATOMIC>(
object, marking_state(object))));
- DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>(
+ DCHECK((ObjectMarking::IsGrey<AccessMode::NON_ATOMIC>(
object, marking_state(object))));
- marking_visitor(kMainMarker)->Visit(object);
+ main_marking_visitor()->Visit(object);
}
- DCHECK(local_marking_deque.IsEmpty());
+ DCHECK(marking_worklist.IsLocalEmpty());
}
void MinorMarkCompactCollector::CollectGarbage() {
@@ -2899,7 +2600,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
- heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge();
+ heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
}
{
@@ -2922,20 +2623,20 @@ void MinorMarkCompactCollector::MakeIterable(
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
- LiveObjectIterator<kBlackObjects> it(p, marking_state(p));
- HeapObject* object = nullptr;
- while ((object = it.Next()) != nullptr) {
- DCHECK(ObjectMarking::IsBlack(object, marking_state(object)));
+ for (auto object_and_size :
+ LiveObjectRange<kGreyObjects>(p, marking_state(p))) {
+ HeapObject* const object = object_and_size.first;
+ DCHECK(ObjectMarking::IsGrey(object, marking_state(object)));
Address free_end = object->address();
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
+ full_collector->marking_state(p).bitmap()->ClearRange(
+ p->AddressToMarkbitIndex(free_start),
+ p->AddressToMarkbitIndex(free_end));
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
- full_collector->marking_state(p).bitmap()->ClearRange(
- p->AddressToMarkbitIndex(free_start),
- p->AddressToMarkbitIndex(free_end));
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
@@ -2948,11 +2649,11 @@ void MinorMarkCompactCollector::MakeIterable(
if (free_start != p->area_end()) {
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
+ full_collector->marking_state(p).bitmap()->ClearRange(
+ p->AddressToMarkbitIndex(free_start),
+ p->AddressToMarkbitIndex(p->area_end()));
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
- full_collector->marking_state(p).bitmap()->ClearRange(
- p->AddressToMarkbitIndex(free_start),
- p->AddressToMarkbitIndex(p->area_end()));
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
@@ -2988,7 +2689,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
- new_space_evacuation_pages_.Add(p);
+ new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
new_space->ResetAllocationInfo();
@@ -3035,7 +2736,7 @@ void MinorMarkCompactCollector::Evacuate() {
sweep_to_iterate_pages_.push_back(p);
}
}
- new_space_evacuation_pages_.Rewind(0);
+ new_space_evacuation_pages_.clear();
}
{
@@ -3066,15 +2767,10 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS;
#endif
- marking_deque()->StartUsing();
+ marking_worklist()->StartUsing();
heap_->local_embedder_heap_tracer()->EnterFinalPause();
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
- PrepareForCodeFlushing();
- }
-
RootMarkingVisitor root_visitor(heap());
{
@@ -3107,7 +2803,7 @@ void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
heap()->isolate()->global_handles()->IdentifyWeakHandles(
&IsUnmarkedHeapObject);
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
}
// Then we mark the objects.
@@ -3115,7 +2811,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
}
// Repeat Harmony weak maps marking to mark unmarked objects reachable from
@@ -3161,13 +2857,6 @@ void MarkCompactCollector::ClearNonLiveReferences() {
heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
}
- // Flush code from collected candidates.
- if (is_code_flushing_enabled()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
- code_flusher_->ProcessCandidates();
- }
-
-
DependentCode* dependent_code_list;
Object* non_live_map_list;
ClearWeakCells(&non_live_map_list, &dependent_code_list);
@@ -3392,8 +3081,7 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
DescriptorArray* descriptors) {
int live_enum = map->EnumLength();
if (live_enum == kInvalidEnumCacheSentinel) {
- live_enum =
- map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
+ live_enum = map->NumberOfEnumerableProperties();
}
if (live_enum == 0) return descriptors->ClearEnumCache();
@@ -3410,6 +3098,7 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
void MarkCompactCollector::ProcessWeakCollections() {
+ MarkCompactMarkingVisitor visitor(this);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
@@ -3427,8 +3116,7 @@ void MarkCompactCollector::ProcessWeakCollections() {
RecordSlot(table, key_slot, *key_slot);
Object** value_slot =
table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
- MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
- value_slot);
+ visitor.MarkObjectByPointer(table, value_slot);
}
}
}
@@ -3596,10 +3284,9 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
}
}
+template <AccessMode access_mode>
static inline SlotCallbackResult UpdateSlot(Object** slot) {
- Object* obj = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-
+ Object* obj = *slot;
if (obj->IsHeapObject()) {
HeapObject* heap_obj = HeapObject::cast(obj);
MapWord map_word = heap_obj->map_word();
@@ -3609,14 +3296,16 @@ static inline SlotCallbackResult UpdateSlot(Object** slot) {
Page::FromAddress(heap_obj->address())
->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
HeapObject* target = map_word.ToForwardingAddress();
- base::NoBarrier_CompareAndSwap(
- reinterpret_cast<base::AtomicWord*>(slot),
- reinterpret_cast<base::AtomicWord>(obj),
- reinterpret_cast<base::AtomicWord>(target));
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ *slot = target;
+ } else {
+ base::AsAtomicWord::Release_CompareAndSwap(slot, obj, target);
+ }
DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
}
}
+ // OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
}
@@ -3626,36 +3315,45 @@ static inline SlotCallbackResult UpdateSlot(Object** slot) {
// nevers visits code objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- void VisitPointer(HeapObject* host, Object** p) override { UpdateSlot(p); }
+ void VisitPointer(HeapObject* host, Object** p) override {
+ UpdateSlotInternal(p);
+ }
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) UpdateSlot(p);
+ for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
}
- void VisitRootPointer(Root root, Object** p) override { UpdateSlot(p); }
+ void VisitRootPointer(Root root, Object** p) override {
+ UpdateSlotInternal(p);
+ }
void VisitRootPointers(Root root, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) UpdateSlot(p);
+ for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
}
void VisitCellPointer(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
+ UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlotInternal);
}
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
+ UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlotInternal);
}
void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
+ UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlotInternal);
}
void VisitCodeEntry(JSFunction* host, Address entry_address) override {
- UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
+ UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlotInternal);
}
void VisitDebugTarget(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
+ UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlotInternal);
+ }
+
+ private:
+ static inline SlotCallbackResult UpdateSlotInternal(Object** slot) {
+ return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
}
};
@@ -3675,15 +3373,16 @@ void MarkCompactCollector::EvacuatePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
- new_space_evacuation_pages_.Add(p);
+ new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
new_space->ResetAllocationInfo();
// Old space.
- DCHECK(old_space_evacuation_pages_.is_empty());
- old_space_evacuation_pages_.Swap(&evacuation_candidates_);
- DCHECK(evacuation_candidates_.is_empty());
+ DCHECK(old_space_evacuation_pages_.empty());
+ old_space_evacuation_pages_ = std::move(evacuation_candidates_);
+ evacuation_candidates_.clear();
+ DCHECK(evacuation_candidates_.empty());
}
void MarkCompactCollector::EvacuateEpilogue() {
@@ -3709,7 +3408,6 @@ class Evacuator : public Malloced {
if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
return kPageNewToNew;
if (chunk->InNewSpace()) return kObjectsNewToOld;
- DCHECK(chunk->IsEvacuationCandidate());
return kObjectsOldToOld;
}
@@ -3738,7 +3436,7 @@ class Evacuator : public Malloced {
virtual ~Evacuator() {}
- bool EvacuatePage(Page* page);
+ void EvacuatePage(Page* page);
void AddObserver(MigrationObserver* observer) {
new_space_visitor_.AddObserver(observer);
@@ -3756,7 +3454,7 @@ class Evacuator : public Malloced {
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
// |saved_live_bytes| returns the live bytes of the page that was processed.
- virtual bool RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
+ virtual void RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
inline Heap* heap() { return heap_; }
@@ -3784,31 +3482,29 @@ class Evacuator : public Malloced {
intptr_t bytes_compacted_;
};
-bool Evacuator::EvacuatePage(Page* page) {
- bool success = false;
+void Evacuator::EvacuatePage(Page* page) {
DCHECK(page->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
- success = RawEvacuatePage(page, &saved_live_bytes);
+ RawEvacuatePage(page, &saved_live_bytes);
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
- PrintIsolate(heap()->isolate(),
- "evacuation[%p]: page=%p new_space=%d "
- "page_evacuation=%d executable=%d contains_age_mark=%d "
- "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
- static_cast<void*>(this), static_cast<void*>(page),
- page->InNewSpace(),
- page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
- page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
- page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
- page->Contains(heap()->new_space()->age_mark()),
- saved_live_bytes, evacuation_time, success);
+ PrintIsolate(
+ heap()->isolate(),
+ "evacuation[%p]: page=%p new_space=%d "
+ "page_evacuation=%d executable=%d contains_age_mark=%d "
+ "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
+ static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
+ page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
+ page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
+ page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
+ page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
+ evacuation_time, page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
- return success;
}
void Evacuator::Finalize() {
@@ -3836,64 +3532,49 @@ class FullEvacuator : public Evacuator {
: Evacuator(collector->heap(), record_visitor), collector_(collector) {}
protected:
- bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+ void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
MarkCompactCollector* collector_;
};
-bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
- bool success = false;
- LiveObjectVisitor object_visitor;
+void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
const MarkingState state = collector_->marking_state(page);
*live_bytes = state.live_bytes();
+ HeapObject* failed_object = nullptr;
switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitBlackObjectsNoFail(
page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
- DCHECK(success);
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ // ArrayBufferTracker will be updated during pointers updating.
break;
case kPageNewToOld:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitBlackObjectsNoFail(
page, state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
- DCHECK(success);
new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
// ArrayBufferTracker will be updated during sweeping.
break;
case kPageNewToNew:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitBlackObjectsNoFail(
page, state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
- DCHECK(success);
new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
// ArrayBufferTracker will be updated during sweeping.
break;
- case kObjectsOldToOld:
- success = object_visitor.VisitBlackObjects(
- page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits);
+ case kObjectsOldToOld: {
+ const bool success = LiveObjectVisitor::VisitBlackObjects(
+ page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits,
+ &failed_object);
if (!success) {
- // Aborted compaction page. We have to record slots here, since we
- // might not have recorded them in first place.
- // Note: We mark the page as aborted here to be able to record slots
- // for code objects in |RecordMigratedSlotVisitor| and to be able
- // to identify the page later on for post processing.
- page->SetFlag(Page::COMPACTION_WAS_ABORTED);
- EvacuateRecordOnlyVisitor record_visitor(heap());
- success = object_visitor.VisitBlackObjects(
- page, state, &record_visitor, LiveObjectVisitor::kKeepMarking);
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
- DCHECK(success);
- success = false;
+ // Aborted compaction page. Actual processing happens on the main
+ // thread for simplicity reasons.
+ collector_->ReportAbortedEvacuationCandidate(failed_object, page);
} else {
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ // ArrayBufferTracker will be updated during pointers updating.
}
break;
+ }
}
- return success;
}
class YoungGenerationEvacuator : public Evacuator {
@@ -3903,76 +3584,95 @@ class YoungGenerationEvacuator : public Evacuator {
: Evacuator(collector->heap(), record_visitor), collector_(collector) {}
protected:
- bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+ void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
MinorMarkCompactCollector* collector_;
};
-bool YoungGenerationEvacuator::RawEvacuatePage(Page* page,
+void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
intptr_t* live_bytes) {
- bool success = false;
- LiveObjectVisitor object_visitor;
const MarkingState state = collector_->marking_state(page);
*live_bytes = state.live_bytes();
switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
- DCHECK(success);
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ // ArrayBufferTracker will be updated during pointers updating.
break;
case kPageNewToOld:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
page, state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
- DCHECK(success);
new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
ArrayBufferTracker::FreeDead(page, state);
- if (heap()->ShouldZapGarbage())
+ if (heap()->ShouldZapGarbage()) {
collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
break;
case kPageNewToNew:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
page, state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
- DCHECK(success);
new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
ArrayBufferTracker::FreeDead(page, state);
- if (heap()->ShouldZapGarbage())
+ if (heap()->ShouldZapGarbage()) {
collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
break;
case kObjectsOldToOld:
UNREACHABLE();
break;
}
- return success;
}
-class EvacuationJobTraits {
+class PageEvacuationItem : public ItemParallelJob::Item {
public:
- struct PageData {
- MarkingState marking_state;
- };
+ explicit PageEvacuationItem(Page* page) : page_(page) {}
+ virtual ~PageEvacuationItem() {}
+ Page* page() const { return page_; }
- typedef PageData PerPageData;
- typedef Evacuator* PerTaskData;
+ private:
+ Page* page_;
+};
- static void ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
- MemoryChunk* chunk, PerPageData) {
- evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
- }
+class PageEvacuationTask : public ItemParallelJob::Task {
+ public:
+ PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
+ : ItemParallelJob::Task(isolate), evacuator_(evacuator) {}
+
+ void RunInParallel() override {
+ PageEvacuationItem* item = nullptr;
+ while ((item = GetItem<PageEvacuationItem>()) != nullptr) {
+ evacuator_->EvacuatePage(item->page());
+ item->MarkFinished();
+ }
+ };
+
+ private:
+ Evacuator* evacuator_;
};
template <class Evacuator, class Collector>
void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
- Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
+ Collector* collector, ItemParallelJob* job,
RecordMigratedSlotVisitor* record_visitor,
MigrationObserver* migration_observer, const intptr_t live_bytes) {
// Used for trace summary.
@@ -3988,15 +3688,16 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
ProfilingMigrationObserver profiling_observer(heap());
const int wanted_num_tasks =
- NumberOfParallelCompactionTasks(job->NumberOfPages());
+ NumberOfParallelCompactionTasks(job->NumberOfItems());
Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i] = new Evacuator(collector, record_visitor);
if (profiling) evacuators[i]->AddObserver(&profiling_observer);
if (migration_observer != nullptr)
evacuators[i]->AddObserver(migration_observer);
+ job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
}
- job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
+ job->Run();
const Address top = heap()->new_space()->top();
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i]->Finalize();
@@ -4017,7 +3718,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
"wanted_tasks=%d tasks=%d cores=%" PRIuS
" live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(),
+ FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
wanted_num_tasks, job->NumberOfTasks(),
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
live_bytes, compaction_speed);
@@ -4033,18 +3734,18 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
}
void MarkCompactCollector::EvacuatePagesInParallel() {
- PageParallelJob<EvacuationJobTraits> job(
- heap_, heap_->isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
intptr_t live_bytes = 0;
for (Page* page : old_space_evacuation_pages_) {
live_bytes += MarkingState::Internal(page).live_bytes();
- job.AddPage(page, {marking_state(page)});
+ evacuation_job.AddItem(new PageEvacuationItem(page));
}
for (Page* page : new_space_evacuation_pages_) {
intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes();
+ if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
@@ -4053,24 +3754,24 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- job.AddPage(page, {marking_state(page)});
+ evacuation_job.AddItem(new PageEvacuationItem(page));
}
- DCHECK_GE(job.NumberOfPages(), 1);
+ if (evacuation_job.NumberOfItems() == 0) return;
RecordMigratedSlotVisitor record_visitor(this);
- CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &job, &record_visitor,
- nullptr, live_bytes);
+ CreateAndExecuteEvacuationTasks<FullEvacuator>(
+ this, &evacuation_job, &record_visitor, nullptr, live_bytes);
PostProcessEvacuationCandidates();
}
void MinorMarkCompactCollector::EvacuatePagesInParallel() {
- PageParallelJob<EvacuationJobTraits> job(
- heap_, heap_->isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
intptr_t live_bytes = 0;
for (Page* page : new_space_evacuation_pages_) {
intptr_t live_bytes_on_page = marking_state(page).live_bytes();
+ if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
@@ -4079,16 +3780,16 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- job.AddPage(page, {marking_state(page)});
+ evacuation_job.AddItem(new PageEvacuationItem(page));
}
- DCHECK_GE(job.NumberOfPages(), 1);
+ if (evacuation_job.NumberOfItems() == 0) return;
YoungGenerationMigrationObserver observer(heap(),
heap()->mark_compact_collector());
YoungGenerationRecordMigratedSlotVisitor record_visitor(
heap()->mark_compact_collector());
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- this, &job, &record_visitor, &observer, live_bytes);
+ this, &evacuation_job, &record_visitor, &observer, live_bytes);
}
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
@@ -4157,10 +3858,8 @@ int MarkCompactCollector::Sweeper::RawSweep(
intptr_t max_freed_bytes = 0;
int curr_region = -1;
- LiveObjectIterator<kBlackObjects> it(p, state);
- HeapObject* object = NULL;
-
- while ((object = it.Next()) != NULL) {
+ for (auto object_and_size : LiveObjectRange<kBlackObjects>(p, state)) {
+ HeapObject* const object = object_and_size.first;
DCHECK(ObjectMarking::IsBlack(object, state));
Address free_end = object->address();
if (free_end != free_start) {
@@ -4271,34 +3970,25 @@ bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
EvacuateRecordOnlyVisitor visitor(heap());
- LiveObjectVisitor object_visitor;
- object_visitor.VisitBlackObjects(page, MarkingState::Internal(page), &visitor,
- LiveObjectVisitor::kKeepMarking);
+ LiveObjectVisitor::VisitBlackObjectsNoFail(page, MarkingState::Internal(page),
+ &visitor,
+ LiveObjectVisitor::kKeepMarking);
}
template <class Visitor>
bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
const MarkingState& state,
Visitor* visitor,
- IterationMode iteration_mode) {
- LiveObjectIterator<kBlackObjects> it(chunk, state);
- HeapObject* object = nullptr;
- while ((object = it.Next()) != nullptr) {
- DCHECK(ObjectMarking::IsBlack(object, state));
- if (!visitor->Visit(object)) {
+ IterationMode iteration_mode,
+ HeapObject** failed_object) {
+ for (auto object_and_size : LiveObjectRange<kBlackObjects>(chunk, state)) {
+ HeapObject* const object = object_and_size.first;
+ if (!visitor->Visit(object, object_and_size.second)) {
if (iteration_mode == kClearMarkbits) {
state.bitmap()->ClearRange(
chunk->AddressToMarkbitIndex(chunk->area_start()),
chunk->AddressToMarkbitIndex(object->address()));
- SlotSet* slot_set = chunk->slot_set<OLD_TO_NEW>();
- if (slot_set != nullptr) {
- slot_set->RemoveRange(
- 0, static_cast<int>(object->address() - chunk->address()),
- SlotSet::PREFREE_EMPTY_BUCKETS);
- }
- RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(chunk, chunk->address(),
- object->address());
- RecomputeLiveBytes(chunk, state);
+ *failed_object = object;
}
return false;
}
@@ -4309,13 +3999,45 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
return true;
}
+template <class Visitor>
+void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
+ const MarkingState& state,
+ Visitor* visitor,
+ IterationMode iteration_mode) {
+ for (auto object_and_size : LiveObjectRange<kBlackObjects>(chunk, state)) {
+ HeapObject* const object = object_and_size.first;
+ DCHECK(ObjectMarking::IsBlack(object, state));
+ const bool success = visitor->Visit(object, object_and_size.second);
+ USE(success);
+ DCHECK(success);
+ }
+ if (iteration_mode == kClearMarkbits) {
+ state.ClearLiveness();
+ }
+}
+
+template <class Visitor>
+void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
+ const MarkingState& state,
+ Visitor* visitor,
+ IterationMode iteration_mode) {
+ for (auto object_and_size : LiveObjectRange<kGreyObjects>(chunk, state)) {
+ HeapObject* const object = object_and_size.first;
+ DCHECK(ObjectMarking::IsGrey(object, state));
+ const bool success = visitor->Visit(object, object_and_size.second);
+ USE(success);
+ DCHECK(success);
+ }
+ if (iteration_mode == kClearMarkbits) {
+ state.ClearLiveness();
+ }
+}
+
void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
const MarkingState& state) {
- LiveObjectIterator<kBlackObjects> it(chunk, state);
int new_live_size = 0;
- HeapObject* object = nullptr;
- while ((object = it.Next()) != nullptr) {
- new_live_size += object->Size();
+ for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(chunk, state)) {
+ new_live_size += object_and_size.second;
}
state.SetLiveBytes(new_live_size);
}
@@ -4323,7 +4045,7 @@ void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
Page* page) {
base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[space->identity()].Add(page);
+ swept_list_[space->identity()].push_back(page);
}
void MarkCompactCollector::Evacuate() {
@@ -4370,7 +4092,7 @@ void MarkCompactCollector::Evacuate() {
sweeper().AddPage(p->owner()->identity(), p);
}
}
- new_space_evacuation_pages_.Rewind(0);
+ new_space_evacuation_pages_.clear();
for (Page* p : old_space_evacuation_pages_) {
// Important: skip list should be cleared only after roots were updated
@@ -4398,211 +4120,356 @@ void MarkCompactCollector::Evacuate() {
#endif
}
-template <RememberedSetType type>
-class PointerUpdateJobTraits {
+class UpdatingItem : public ItemParallelJob::Item {
public:
- typedef int PerPageData; // Per page data is not used in this job.
- typedef const MarkCompactCollectorBase* PerTaskData;
+ virtual ~UpdatingItem() {}
+ virtual void Process() = 0;
+};
- static void ProcessPageInParallel(Heap* heap, PerTaskData task_data,
- MemoryChunk* chunk, PerPageData) {
- UpdateUntypedPointers(heap, chunk, task_data);
- UpdateTypedPointers(heap, chunk, task_data);
+class PointersUpatingTask : public ItemParallelJob::Task {
+ public:
+ explicit PointersUpatingTask(Isolate* isolate)
+ : ItemParallelJob::Task(isolate) {}
+
+ void RunInParallel() override {
+ UpdatingItem* item = nullptr;
+ while ((item = GetItem<UpdatingItem>()) != nullptr) {
+ item->Process();
+ item->MarkFinished();
+ }
+ };
+};
+
+class ToSpaceUpdatingItem : public UpdatingItem {
+ public:
+ explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
+ MarkingState marking_state)
+ : chunk_(chunk),
+ start_(start),
+ end_(end),
+ marking_state_(marking_state) {}
+ virtual ~ToSpaceUpdatingItem() {}
+
+ void Process() override {
+ if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+ // New->new promoted pages contain garbage so they require iteration using
+ // markbits.
+ ProcessVisitLive();
+ } else {
+ ProcessVisitAll();
+ }
}
private:
- static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk,
- const MarkCompactCollectorBase* collector) {
- base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
- if (type == OLD_TO_NEW) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk, [heap, collector](Address slot) {
- return CheckAndUpdateOldToNewSlot(heap, slot, collector);
- });
- } else {
- RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
- return UpdateSlot(reinterpret_cast<Object**>(slot));
- });
+ void ProcessVisitAll() {
+ PointersUpdatingVisitor visitor;
+ for (Address cur = start_; cur < end_;) {
+ HeapObject* object = HeapObject::FromAddress(cur);
+ Map* map = object->map();
+ int size = object->SizeFromMap(map);
+ object->IterateBody(map->instance_type(), size, &visitor);
+ cur += size;
}
}
- static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
- const MarkCompactCollectorBase* collector) {
- if (type == OLD_TO_OLD) {
- Isolate* isolate = heap->isolate();
- RememberedSet<OLD_TO_OLD>::IterateTyped(
- chunk,
- [isolate](SlotType slot_type, Address host_addr, Address slot) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
- slot, UpdateSlot);
- });
- } else {
- Isolate* isolate = heap->isolate();
- RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk, [isolate, heap, collector](SlotType slot_type,
- Address host_addr, Address slot) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate, slot_type, slot, [heap, collector](Object** slot) {
- return CheckAndUpdateOldToNewSlot(
- heap, reinterpret_cast<Address>(slot), collector);
- });
- });
+ void ProcessVisitLive() {
+ // For young generation evacuations we want to visit grey objects, for
+ // full MC, we need to visit black objects.
+ PointersUpdatingVisitor visitor;
+ for (auto object_and_size :
+ LiveObjectRange<kAllLiveObjects>(chunk_, marking_state_)) {
+ object_and_size.first->IterateBodyFast(&visitor);
}
}
- static SlotCallbackResult CheckAndUpdateOldToNewSlot(
- Heap* heap, Address slot_address,
- const MarkCompactCollectorBase* collector) {
- // There may be concurrent action on slots in dead objects. Concurrent
- // sweeper threads may overwrite the slot content with a free space object.
- // Moreover, the pointed-to object may also get concurrently overwritten
- // with a free space object. The sweeper always gets priority performing
- // these writes.
- base::NoBarrierAtomicValue<Object*>* slot =
- base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
- Object* slot_reference = slot->Value();
- if (heap->InFromSpace(slot_reference)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
+ MemoryChunk* chunk_;
+ Address start_;
+ Address end_;
+ MarkingState marking_state_;
+};
+
+class RememberedSetUpdatingItem : public UpdatingItem {
+ public:
+ explicit RememberedSetUpdatingItem(Heap* heap,
+ MarkCompactCollectorBase* collector,
+ MemoryChunk* chunk,
+ RememberedSetUpdatingMode updating_mode)
+ : heap_(heap),
+ collector_(collector),
+ chunk_(chunk),
+ updating_mode_(updating_mode) {}
+ virtual ~RememberedSetUpdatingItem() {}
+
+ void Process() override {
+ base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
+ UpdateUntypedPointers();
+ UpdateTypedPointers();
+ }
+
+ private:
+ template <AccessMode access_mode>
+ inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (heap_->InFromSpace(*slot)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
DCHECK(heap_object->IsHeapObject());
MapWord map_word = heap_object->map_word();
- // There could still be stale pointers in large object space, map space,
- // and old space for pages that have been promoted.
if (map_word.IsForwardingAddress()) {
- // A sweeper thread may concurrently write a size value which looks like
- // a forwarding pointer. We have to ignore these values.
- if (map_word.ToRawValue() < Page::kPageSize) {
- return REMOVE_SLOT;
+ if (access_mode == AccessMode::ATOMIC) {
+ HeapObject** heap_obj_slot = reinterpret_cast<HeapObject**>(slot);
+ base::AsAtomicWord::Relaxed_Store(heap_obj_slot,
+ map_word.ToForwardingAddress());
+ } else {
+ *slot = map_word.ToForwardingAddress();
}
- // Update the corresponding slot only if the slot content did not
- // change in the meantime. This may happen when a concurrent sweeper
- // thread stored a free space object at that memory location.
- slot->TrySetValue(slot_reference, map_word.ToForwardingAddress());
}
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
- if (heap->InToSpace(slot->Value())) {
+ if (heap_->InToSpace(*slot)) {
return KEEP_SLOT;
}
- } else if (heap->InToSpace(slot_reference)) {
+ } else if (heap_->InToSpace(*slot)) {
// Slots can point to "to" space if the page has been moved, or if the
- // slot has been recorded multiple times in the remembered set. Since
- // there is no forwarding information present we need to check the
- // markbits to determine liveness.
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
- if (ObjectMarking::IsBlack(heap_object,
- collector->marking_state(heap_object)))
- return KEEP_SLOT;
+ // slot has been recorded multiple times in the remembered set, or
+ // if the slot was already updated during old->old updating.
+ // In case the page has been moved, check markbits to determine liveness
+ // of the slot. In the other case, the slot can just be kept.
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
+ // IsBlackOrGrey is required because objects are marked as grey for
+ // the young generation collector while they are black for the full MC.);
+ if (Page::FromAddress(heap_object->address())
+ ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+ if (ObjectMarking::IsBlackOrGrey(
+ heap_object, collector_->marking_state(heap_object))) {
+ return KEEP_SLOT;
+ } else {
+ return REMOVE_SLOT;
+ }
+ }
+ return KEEP_SLOT;
} else {
- DCHECK(!heap->InNewSpace(slot_reference));
+ DCHECK(!heap_->InNewSpace(*slot));
}
return REMOVE_SLOT;
}
-};
-template <RememberedSetType type>
-void MarkCompactCollectorBase::UpdatePointersInParallel(
- Heap* heap, base::Semaphore* semaphore,
- const MarkCompactCollectorBase* collector) {
- PageParallelJob<PointerUpdateJobTraits<type> > job(
- heap, heap->isolate()->cancelable_task_manager(), semaphore);
- RememberedSet<type>::IterateMemoryChunks(
- heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
- int num_pages = job.NumberOfPages();
- int num_tasks = NumberOfPointerUpdateTasks(num_pages);
- job.Run(num_tasks, [collector](int i) { return collector; });
-}
+ void UpdateUntypedPointers() {
+ // A map slot might point to new space and be required for iterating
+ // an object concurrently by another task. Hence, we need to update
+ // those slots using atomics.
+ if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
+ if (chunk_->owner() == heap_->map_space()) {
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this](Address slot) {
+ return CheckAndUpdateOldToNewSlot<AccessMode::ATOMIC>(slot);
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this](Address slot) {
+ return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(slot);
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ }
+ }
+ if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
+ (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
+ if (chunk_->owner() == heap_->map_space()) {
+ RememberedSet<OLD_TO_OLD>::Iterate(
+ chunk_,
+ [](Address slot) {
+ return UpdateSlot<AccessMode::ATOMIC>(
+ reinterpret_cast<Object**>(slot));
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_OLD>::Iterate(
+ chunk_,
+ [](Address slot) {
+ return UpdateSlot<AccessMode::NON_ATOMIC>(
+ reinterpret_cast<Object**>(slot));
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ }
+ }
+ }
-class ToSpacePointerUpdateJobTraits {
- public:
- struct PageData {
- Address start;
- Address end;
- MarkingState marking_state;
- };
+ void UpdateTypedPointers() {
+ Isolate* isolate = heap_->isolate();
+ if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
+ nullptr) {
+ CHECK_NE(chunk_->owner(), heap_->map_space());
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ chunk_,
+ [isolate, this](SlotType slot_type, Address host_addr, Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate, slot_type, slot, [this](Object** slot) {
+ return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(
+ reinterpret_cast<Address>(slot));
+ });
+ });
+ }
+ if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
+ (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
+ nullptr)) {
+ CHECK_NE(chunk_->owner(), heap_->map_space());
+ RememberedSet<OLD_TO_OLD>::IterateTyped(
+ chunk_,
+ [isolate](SlotType slot_type, Address host_addr, Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate, slot_type, slot, UpdateSlot<AccessMode::NON_ATOMIC>);
+ });
+ }
+ }
- typedef PageData PerPageData;
- typedef PointersUpdatingVisitor* PerTaskData;
+ Heap* heap_;
+ MarkCompactCollectorBase* collector_;
+ MemoryChunk* chunk_;
+ RememberedSetUpdatingMode updating_mode_;
+};
- static void ProcessPageInParallel(Heap* heap, PerTaskData visitor,
- MemoryChunk* chunk, PerPageData page_data) {
- if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
- // New->new promoted pages contain garbage so they require iteration
- // using markbits.
- ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data);
- } else {
- ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data);
- }
+class GlobalHandlesUpdatingItem : public UpdatingItem {
+ public:
+ GlobalHandlesUpdatingItem(GlobalHandles* global_handles, size_t start,
+ size_t end)
+ : global_handles_(global_handles), start_(start), end_(end) {}
+ virtual ~GlobalHandlesUpdatingItem() {}
+
+ void Process() override {
+ PointersUpdatingVisitor updating_visitor;
+ global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
}
private:
- static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
- MemoryChunk* chunk,
- PerPageData page_data) {
- for (Address cur = page_data.start; cur < page_data.end;) {
- HeapObject* object = HeapObject::FromAddress(cur);
- Map* map = object->map();
- int size = object->SizeFromMap(map);
- object->IterateBody(map->instance_type(), size, visitor);
- cur += size;
- }
- }
+ GlobalHandles* global_handles_;
+ size_t start_;
+ size_t end_;
+};
- static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
- MemoryChunk* chunk,
- PerPageData page_data) {
- LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state);
- HeapObject* object = NULL;
- while ((object = it.Next()) != NULL) {
- Map* map = object->map();
- int size = object->SizeFromMap(map);
- object->IterateBody(map->instance_type(), size, visitor);
- }
+// Update array buffers on a page that has been evacuated by copying objects.
+// Target page exclusivity in old space is guaranteed by the fact that
+// evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
+// free list items of a given page. For new space the tracker will update
+// using a lock.
+class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
+ public:
+ explicit ArrayBufferTrackerUpdatingItem(Page* page) : page_(page) {}
+ virtual ~ArrayBufferTrackerUpdatingItem() {}
+
+ void Process() override {
+ ArrayBufferTracker::ProcessBuffers(
+ page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
}
+
+ private:
+ Page* page_;
};
-template <class MarkingStateProvider>
-void UpdateToSpacePointersInParallel(
- Heap* heap, base::Semaphore* semaphore,
- const MarkingStateProvider& marking_state_provider) {
- PageParallelJob<ToSpacePointerUpdateJobTraits> job(
- heap, heap->isolate()->cancelable_task_manager(), semaphore);
- Address space_start = heap->new_space()->bottom();
- Address space_end = heap->new_space()->top();
+int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
+ ItemParallelJob* job) {
+ // Seed to space pages.
+ const Address space_start = heap()->new_space()->bottom();
+ const Address space_end = heap()->new_space()->top();
+ int pages = 0;
for (Page* page : PageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
- job.AddPage(page, {start, end, marking_state_provider.marking_state(page)});
+ job->AddItem(
+ new ToSpaceUpdatingItem(page, start, end, marking_state(page)));
+ pages++;
+ }
+ if (pages == 0) return 0;
+ return NumberOfParallelToSpacePointerUpdateTasks(pages);
+}
+
+int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
+ ItemParallelJob* job, RememberedSetUpdatingMode mode) {
+ int pages = 0;
+ if (mode == RememberedSetUpdatingMode::ALL) {
+ RememberedSet<OLD_TO_OLD>::IterateMemoryChunks(
+ heap(), [this, &job, &pages, mode](MemoryChunk* chunk) {
+ job->AddItem(
+ new RememberedSetUpdatingItem(heap(), this, chunk, mode));
+ pages++;
+ });
+ }
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [this, &job, &pages, mode](MemoryChunk* chunk) {
+ const bool contains_old_to_old_slots =
+ chunk->slot_set<OLD_TO_OLD>() != nullptr ||
+ chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
+ if (mode == RememberedSetUpdatingMode::OLD_TO_NEW_ONLY ||
+ !contains_old_to_old_slots) {
+ job->AddItem(
+ new RememberedSetUpdatingItem(heap(), this, chunk, mode));
+ pages++;
+ }
+ });
+ return (pages == 0)
+ ? 0
+ : NumberOfParallelPointerUpdateTasks(pages, old_to_new_slots_);
+}
+
+void MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
+ ItemParallelJob* job) {
+ for (Page* p : new_space_evacuation_pages_) {
+ if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
+ }
+ }
+}
+
+void MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
+ ItemParallelJob* job) {
+ for (Page* p : new_space_evacuation_pages_) {
+ if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
+ }
+ }
+}
+
+void MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
+ ItemParallelJob* job) {
+ for (Page* p : old_space_evacuation_pages_) {
+ if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
+ p->IsEvacuationCandidate()) {
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
+ }
}
- PointersUpdatingVisitor visitor;
- int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
- job.Run(num_tasks, [&visitor](int i) { return &visitor; });
}
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
+ PointersUpdatingVisitor updating_visitor;
+ ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ CollectNewSpaceArrayBufferTrackerItems(&updating_job);
+ CollectOldSpaceArrayBufferTrackerItems(&updating_job);
+
+ const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
+ const int remembered_set_tasks = CollectRememberedSetUpdatingItems(
+ &updating_job, RememberedSetUpdatingMode::ALL);
+ const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
+ for (int i = 0; i < num_tasks; i++) {
+ updating_job.AddTask(new PointersUpatingTask(isolate()));
+ }
{
TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
- UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
- *this);
- // Update roots.
- PointersUpdatingVisitor updating_visitor;
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
- this);
}
-
{
- Heap* heap = this->heap();
- TRACE_GC(heap->tracer(),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
- UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_,
- this);
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS);
+ updating_job.Run();
}
{
@@ -4622,28 +4489,30 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
PointersUpdatingVisitor updating_visitor;
+ ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ CollectNewSpaceArrayBufferTrackerItems(&updating_job);
+ // Create batches of global handles.
+ SeedGlobalHandles<GlobalHandlesUpdatingItem>(isolate()->global_handles(),
+ &updating_job);
+ const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
+ const int remembered_set_tasks = CollectRememberedSetUpdatingItems(
+ &updating_job, RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
+ for (int i = 0; i < num_tasks; i++) {
+ updating_job.AddTask(new PointersUpatingTask(isolate()));
+ }
{
TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
- {
- TRACE_GC(
- heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE);
- UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
- *this);
- }
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
- heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
- }
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD);
- UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
- this);
- }
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
+ heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
+ }
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
+ updating_job.Run();
}
{
@@ -4660,18 +4529,56 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
}
}
+void MarkCompactCollector::ReportAbortedEvacuationCandidate(
+ HeapObject* failed_object, Page* page) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
+}
+
void MarkCompactCollector::PostProcessEvacuationCandidates() {
- int aborted_pages = 0;
+ for (auto object_and_page : aborted_evacuation_candidates_) {
+ HeapObject* failed_object = object_and_page.first;
+ Page* page = object_and_page.second;
+ DCHECK(page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ // Aborted compaction page. We have to record slots here, since we
+ // might not have recorded them in first place.
+
+ // Remove outdated slots.
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
+ failed_object->address(),
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
+ failed_object->address());
+ const MarkingState state = marking_state(page);
+ // Recompute live bytes.
+ LiveObjectVisitor::RecomputeLiveBytes(page, state);
+ // Re-record slots.
+ EvacuateRecordOnlyVisitor record_visitor(heap());
+ LiveObjectVisitor::VisitBlackObjectsNoFail(page, state, &record_visitor,
+ LiveObjectVisitor::kKeepMarking);
+ // Fix up array buffers.
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
+ }
+ const int aborted_pages =
+ static_cast<int>(aborted_evacuation_candidates_.size());
+ aborted_evacuation_candidates_.clear();
+ int aborted_pages_verified = 0;
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ // After clearing the evacuation candidate flag the page is again in a
+ // regular state.
p->ClearEvacuationCandidate();
- aborted_pages++;
+ aborted_pages_verified++;
} else {
DCHECK(p->IsEvacuationCandidate());
DCHECK(p->SweepingDone());
p->Unlink();
}
}
+ DCHECK_EQ(aborted_pages_verified, aborted_pages);
if (FLAG_trace_evacuation && (aborted_pages > 0)) {
PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
isolate()->time_millis_since_init(), aborted_pages);
@@ -4686,7 +4593,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
CHECK(p->SweepingDone());
space->ReleasePage(p);
}
- old_space_evacuation_pages_.Rewind(0);
+ old_space_evacuation_pages_.clear();
compacting_ = false;
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
@@ -4741,7 +4648,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
{
base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[identity].Add(page);
+ swept_list_[identity].push_back(page);
}
return max_freed;
}
@@ -4774,12 +4681,6 @@ Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
return page;
}
-void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
- Page* page) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- sweeping_list_[space].push_back(page);
-}
-
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearStats();
@@ -4793,7 +4694,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (p->IsEvacuationCandidate()) {
// Will be processed in Evacuate.
- DCHECK(evacuation_candidates_.length() > 0);
+ DCHECK(!evacuation_candidates_.empty());
continue;
}
@@ -4863,11 +4764,6 @@ void MarkCompactCollector::StartSweepSpaces() {
heap_->lo_space()->FreeUnmarkedObjects();
}
-void MarkCompactCollector::Initialize() {
- MarkCompactMarkingVisitor::Initialize();
- IncrementalMarking::Initialize();
-}
-
void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
Code* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index e32ab4c6f1..937dad1a91 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -6,41 +6,25 @@
#define V8_HEAP_MARK_COMPACT_H_
#include <deque>
+#include <vector>
#include "src/base/bits.h"
-#include "src/base/platform/condition-variable.h"
-#include "src/cancelable-task.h"
-#include "src/heap/concurrent-marking-deque.h"
#include "src/heap/marking.h"
#include "src/heap/sequential-marking-deque.h"
#include "src/heap/spaces.h"
-#include "src/heap/store-buffer.h"
+#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
// Forward declarations.
-class CodeFlusher;
class EvacuationJobTraits;
class HeapObjectVisitor;
-class LocalWorkStealingMarkingDeque;
-class MarkCompactCollector;
-class MinorMarkCompactCollector;
-class MarkingVisitor;
+class ItemParallelJob;
class MigrationObserver;
-template <typename JobTraits>
-class PageParallelJob;
class RecordMigratedSlotVisitor;
-class ThreadLocalTop;
-class WorkStealingMarkingDeque;
class YoungGenerationMarkingVisitor;
-#ifdef V8_CONCURRENT_MARKING
-using MarkingDeque = ConcurrentMarkingDeque;
-#else
-using MarkingDeque = SequentialMarkingDeque;
-#endif
-
class ObjectMarking : public AllStatic {
public:
V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj,
@@ -55,34 +39,34 @@ class ObjectMarking : public AllStatic {
return Marking::Color(ObjectMarking::MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsImpossible(HeapObject* obj,
const MarkingState& state) {
return Marking::IsImpossible<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsBlack(HeapObject* obj, const MarkingState& state) {
return Marking::IsBlack<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsWhite(HeapObject* obj, const MarkingState& state) {
return Marking::IsWhite<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsGrey(HeapObject* obj, const MarkingState& state) {
return Marking::IsGrey<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsBlackOrGrey(HeapObject* obj,
const MarkingState& state) {
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool BlackToGrey(HeapObject* obj,
const MarkingState& state) {
MarkBit markbit = MarkBitFrom(obj, state);
@@ -91,20 +75,20 @@ class ObjectMarking : public AllStatic {
return true;
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool WhiteToGrey(HeapObject* obj,
const MarkingState& state) {
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool WhiteToBlack(HeapObject* obj,
const MarkingState& state) {
return ObjectMarking::WhiteToGrey<access_mode>(obj, state) &&
ObjectMarking::GreyToBlack<access_mode>(obj, state);
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool GreyToBlack(HeapObject* obj,
const MarkingState& state) {
MarkBit markbit = MarkBitFrom(obj, state);
@@ -117,86 +101,34 @@ class ObjectMarking : public AllStatic {
DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
};
-// CodeFlusher collects candidates for code flushing during marking and
-// processes those candidates after marking has completed in order to
-// reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in two ways:
-// - SharedFunctionInfo references unoptimized code.
-// - JSFunction references either unoptimized or optimized code.
-// We are not allowed to flush unoptimized code for functions that got
-// optimized or inlined into optimized code, because we might bailout
-// into the unoptimized code again during deoptimization.
-class CodeFlusher {
- public:
- explicit CodeFlusher(Isolate* isolate)
- : isolate_(isolate),
- jsfunction_candidates_head_(nullptr),
- shared_function_info_candidates_head_(nullptr) {}
-
- inline void AddCandidate(SharedFunctionInfo* shared_info);
- inline void AddCandidate(JSFunction* function);
-
- void EvictCandidate(SharedFunctionInfo* shared_info);
- void EvictCandidate(JSFunction* function);
-
- void ProcessCandidates() {
- ProcessSharedFunctionInfoCandidates();
- ProcessJSFunctionCandidates();
- }
-
- inline void VisitListHeads(RootVisitor* v);
-
- template <typename StaticVisitor>
- inline void IteratePointersToFromSpace();
-
- private:
- void ProcessJSFunctionCandidates();
- void ProcessSharedFunctionInfoCandidates();
-
- static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
- static inline JSFunction* GetNextCandidate(JSFunction* candidate);
- static inline void SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate);
- static inline void ClearNextCandidate(JSFunction* candidate,
- Object* undefined);
-
- static inline SharedFunctionInfo* GetNextCandidate(
- SharedFunctionInfo* candidate);
- static inline void SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate);
- static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
-
- Isolate* isolate_;
- JSFunction* jsfunction_candidates_head_;
- SharedFunctionInfo* shared_function_info_candidates_head_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
-};
-
-class MarkBitCellIterator BASE_EMBEDDED {
+class MarkBitCellIterator {
public:
MarkBitCellIterator(MemoryChunk* chunk, MarkingState state) : chunk_(chunk) {
- last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ DCHECK(Bitmap::IsCellAligned(
+ chunk_->AddressToMarkbitIndex(chunk_->area_start())));
+ DCHECK(Bitmap::IsCellAligned(
chunk_->AddressToMarkbitIndex(chunk_->area_end())));
+ last_cell_index_ =
+ Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
cell_base_ = chunk_->area_start();
- cell_index_ = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
+ cell_index_ =
+ Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
cells_ = state.bitmap()->cells();
}
- inline bool Done() { return cell_index_ == last_cell_index_; }
+ inline bool Done() { return cell_index_ >= last_cell_index_; }
inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
inline MarkBit::CellType* CurrentCell() {
- DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(cell_base_))));
+ DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_))));
return &cells_[cell_index_];
}
inline Address CurrentCellBase() {
- DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(cell_base_))));
+ DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_))));
return cell_base_;
}
@@ -233,55 +165,105 @@ class MarkBitCellIterator BASE_EMBEDDED {
Address cell_base_;
};
-// Grey objects can happen on black pages when black objects transition to
-// grey e.g. when calling RecordWrites on them.
enum LiveObjectIterationMode {
kBlackObjects,
kGreyObjects,
kAllLiveObjects
};
-template <LiveObjectIterationMode T>
-class LiveObjectIterator BASE_EMBEDDED {
+template <LiveObjectIterationMode mode>
+class LiveObjectRange {
public:
- LiveObjectIterator(MemoryChunk* chunk, MarkingState state)
+ class iterator {
+ public:
+ using value_type = std::pair<HeapObject*, int /* size */>;
+ using pointer = const value_type*;
+ using reference = const value_type&;
+ using iterator_category = std::forward_iterator_tag;
+
+ inline iterator(MemoryChunk* chunk, MarkingState state, Address start);
+
+ inline iterator& operator++();
+ inline iterator operator++(int);
+
+ bool operator==(iterator other) const {
+ return current_object_ == other.current_object_;
+ }
+
+ bool operator!=(iterator other) const { return !(*this == other); }
+
+ value_type operator*() {
+ return std::make_pair(current_object_, current_size_);
+ }
+
+ private:
+ inline void AdvanceToNextValidObject();
+
+ MemoryChunk* const chunk_;
+ Map* const one_word_filler_map_;
+ Map* const two_word_filler_map_;
+ Map* const free_space_map_;
+ MarkBitCellIterator it_;
+ Address cell_base_;
+ MarkBit::CellType current_cell_;
+ HeapObject* current_object_;
+ int current_size_;
+ };
+
+ LiveObjectRange(MemoryChunk* chunk, MarkingState state)
: chunk_(chunk),
- it_(chunk_, state),
- cell_base_(it_.CurrentCellBase()),
- current_cell_(*it_.CurrentCell()) {}
+ state_(state),
+ start_(chunk_->area_start()),
+ end_(chunk->area_end()) {}
- HeapObject* Next();
+ inline iterator begin();
+ inline iterator end();
private:
- inline Heap* heap() { return chunk_->heap(); }
-
- MemoryChunk* chunk_;
- MarkBitCellIterator it_;
- Address cell_base_;
- MarkBit::CellType current_cell_;
+ MemoryChunk* const chunk_;
+ MarkingState state_;
+ Address start_;
+ Address end_;
};
-class LiveObjectVisitor BASE_EMBEDDED {
+class LiveObjectVisitor : AllStatic {
public:
enum IterationMode {
kKeepMarking,
kClearMarkbits,
};
- // Visits black objects on a MemoryChunk until the Visitor returns for an
- // object. If IterationMode::kClearMarkbits is passed the markbits and slots
- // for visited objects are cleared for each successfully visited object.
+ // Visits black objects on a MemoryChunk until the Visitor returns |false| for
+ // an object. If IterationMode::kClearMarkbits is passed the markbits and
+ // slots for visited objects are cleared for each successfully visited object.
template <class Visitor>
- bool VisitBlackObjects(MemoryChunk* chunk, const MarkingState& state,
- Visitor* visitor, IterationMode iteration_mode);
+ static bool VisitBlackObjects(MemoryChunk* chunk, const MarkingState& state,
+ Visitor* visitor, IterationMode iteration_mode,
+ HeapObject** failed_object);
- private:
- void RecomputeLiveBytes(MemoryChunk* chunk, const MarkingState& state);
+ // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
+ // visitation for an object.
+ template <class Visitor>
+ static void VisitBlackObjectsNoFail(MemoryChunk* chunk,
+ const MarkingState& state,
+ Visitor* visitor,
+ IterationMode iteration_mode);
+
+ // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
+ // visitation for an object.
+ template <class Visitor>
+ static void VisitGreyObjectsNoFail(MemoryChunk* chunk,
+ const MarkingState& state,
+ Visitor* visitor,
+ IterationMode iteration_mode);
+
+ static void RecomputeLiveBytes(MemoryChunk* chunk, const MarkingState& state);
};
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum MarkingTreatmentMode { KEEP, CLEAR };
+enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
// Base class for minor and full MC collectors.
class MarkCompactCollectorBase {
@@ -301,14 +283,15 @@ class MarkCompactCollectorBase {
inline Isolate* isolate() { return heap()->isolate(); }
protected:
- explicit MarkCompactCollectorBase(Heap* heap) : heap_(heap) {}
+ explicit MarkCompactCollectorBase(Heap* heap)
+ : heap_(heap), old_to_new_slots_(0) {}
// Marking operations for objects reachable from roots.
virtual void MarkLiveObjects() = 0;
// Mark objects reachable (transitively) from objects in the marking
// stack.
- virtual void EmptyMarkingDeque() = 0;
- virtual void ProcessMarkingDeque() = 0;
+ virtual void EmptyMarkingWorklist() = 0;
+ virtual void ProcessMarkingWorklist() = 0;
// Clear non-live references held in side data structures.
virtual void ClearNonLiveReferences() = 0;
virtual void EvacuatePrologue() = 0;
@@ -317,26 +300,27 @@ class MarkCompactCollectorBase {
virtual void EvacuatePagesInParallel() = 0;
virtual void UpdatePointersAfterEvacuation() = 0;
- // The number of parallel compaction tasks, including the main thread.
- int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
-
template <class Evacuator, class Collector>
void CreateAndExecuteEvacuationTasks(
- Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
+ Collector* collector, ItemParallelJob* job,
RecordMigratedSlotVisitor* record_visitor,
MigrationObserver* migration_observer, const intptr_t live_bytes);
// Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes);
- template <RememberedSetType type>
- void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore,
- const MarkCompactCollectorBase* collector);
+ int CollectToSpaceUpdatingItems(ItemParallelJob* job);
+ int CollectRememberedSetUpdatingItems(ItemParallelJob* job,
+ RememberedSetUpdatingMode mode);
int NumberOfParallelCompactionTasks(int pages);
- int NumberOfPointerUpdateTasks(int pages);
+ int NumberOfParallelPointerUpdateTasks(int pages, int slots);
+ int NumberOfParallelToSpacePointerUpdateTasks(int pages);
Heap* heap_;
+ // Number of old to new slots. Should be computed during MarkLiveObjects.
+ // -1 indicates that the value couldn't be computed.
+ int old_to_new_slots_;
};
// Collector for young-generation only.
@@ -362,24 +346,23 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void CleanupSweepToIteratePages();
private:
+ using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
class RootMarkingVisitorSeedOnly;
class RootMarkingVisitor;
- static const int kNumMarkers = 4;
+ static const int kNumMarkers = 8;
static const int kMainMarker = 0;
- inline WorkStealingMarkingDeque* marking_deque() { return marking_deque_; }
+ inline MarkingWorklist* worklist() { return worklist_; }
- inline YoungGenerationMarkingVisitor* marking_visitor(int index) {
- DCHECK_LT(index, kNumMarkers);
- return marking_visitor_[index];
+ inline YoungGenerationMarkingVisitor* main_marking_visitor() {
+ return main_marking_visitor_;
}
- SlotCallbackResult CheckAndMarkObject(Heap* heap, Address slot_address);
void MarkLiveObjects() override;
void MarkRootSetInParallel();
- void ProcessMarkingDeque() override;
- void EmptyMarkingDeque() override;
+ void ProcessMarkingWorklist() override;
+ void EmptyMarkingWorklist() override;
void ClearNonLiveReferences() override;
void EvacuatePrologue() override;
@@ -388,15 +371,16 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
- int NumberOfMarkingTasks();
+ void CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+
+ int NumberOfParallelMarkingTasks(int pages);
- WorkStealingMarkingDeque* marking_deque_;
- YoungGenerationMarkingVisitor* marking_visitor_[kNumMarkers];
+ MarkingWorklist* worklist_;
+ YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
- List<Page*> new_space_evacuation_pages_;
+ std::vector<Page*> new_space_evacuation_pages_;
std::vector<Page*> sweep_to_iterate_pages_;
- friend class MarkYoungGenerationJobTraits;
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingVisitor;
};
@@ -404,10 +388,111 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
+ // Wrapper for the shared and bailout worklists.
+ class MarkingWorklist {
+ public:
+ using ConcurrentMarkingWorklist = Worklist<HeapObject*, 64>;
+
+ static const int kMainThread = 0;
+ // The heap parameter is not used but needed to match the sequential case.
+ explicit MarkingWorklist(Heap* heap) {}
+
+ bool Push(HeapObject* object) { return shared_.Push(kMainThread, object); }
+
+ bool PushBailout(HeapObject* object) {
+ return bailout_.Push(kMainThread, object);
+ }
+
+ HeapObject* Pop() {
+ HeapObject* result;
+#ifdef V8_CONCURRENT_MARKING
+ if (bailout_.Pop(kMainThread, &result)) return result;
+#endif
+ if (shared_.Pop(kMainThread, &result)) return result;
+ return nullptr;
+ }
+
+ void Clear() {
+ bailout_.Clear();
+ shared_.Clear();
+ }
+
+ bool IsFull() { return false; }
+
+ bool IsEmpty() {
+ return bailout_.IsLocalEmpty(kMainThread) &&
+ shared_.IsLocalEmpty(kMainThread) &&
+ bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty();
+ }
+
+ int Size() {
+ return static_cast<int>(bailout_.LocalSize(kMainThread) +
+ shared_.LocalSize(kMainThread));
+ }
+
+ // Calls the specified callback on each element of the deques and replaces
+ // the element with the result of the callback. If the callback returns
+ // nullptr then the element is removed from the deque.
+ // The callback must accept HeapObject* and return HeapObject*.
+ template <typename Callback>
+ void Update(Callback callback) {
+ bailout_.Update(callback);
+ shared_.Update(callback);
+ }
+
+ ConcurrentMarkingWorklist* shared() { return &shared_; }
+ ConcurrentMarkingWorklist* bailout() { return &bailout_; }
+
+ // These empty functions are needed to match the interface
+ // of the sequential marking deque.
+ void SetUp() {}
+ void TearDown() { Clear(); }
+ void StartUsing() {}
+ void StopUsing() {}
+ void ClearOverflowed() {}
+ void SetOverflowed() {}
+ bool overflowed() const { return false; }
+
+ void Print() {
+ PrintWorklist("shared", &shared_);
+ PrintWorklist("bailout", &bailout_);
+ }
+
+ private:
+ // Prints the stats about the global pool of the worklist.
+ void PrintWorklist(const char* worklist_name,
+ ConcurrentMarkingWorklist* worklist) {
+ std::map<InstanceType, int> count;
+ int total_count = 0;
+ worklist->IterateGlobalPool([&count, &total_count](HeapObject* obj) {
+ ++total_count;
+ count[obj->map()->instance_type()]++;
+ });
+ std::vector<std::pair<int, InstanceType>> rank;
+ for (auto i : count) {
+ rank.push_back(std::make_pair(i.second, i.first));
+ }
+ std::map<InstanceType, std::string> instance_type_name;
+#define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
+ INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
+#undef INSTANCE_TYPE_NAME
+ std::sort(rank.begin(), rank.end(),
+ std::greater<std::pair<int, InstanceType>>());
+ PrintF("Worklist %s: %d\n", worklist_name, total_count);
+ for (auto i : rank) {
+ PrintF(" [%s]: %d\n", instance_type_name[i.second].c_str(), i.first);
+ }
+ }
+ ConcurrentMarkingWorklist shared_;
+ ConcurrentMarkingWorklist bailout_;
+ };
+
class RootMarkingVisitor;
class Sweeper {
public:
+ class SweeperTask;
+
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
@@ -416,15 +501,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
};
typedef std::deque<Page*> SweepingList;
- typedef List<Page*> SweptList;
+ typedef std::vector<Page*> SweptList;
static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
explicit Sweeper(Heap* heap)
: heap_(heap),
- num_tasks_(0),
pending_sweeper_tasks_semaphore_(0),
+ semaphore_counter_(0),
sweeping_in_progress_(false),
num_sweeping_tasks_(0) {}
@@ -450,10 +535,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
Page* GetSweptPageSafe(PagedSpace* space);
private:
- class SweeperTask;
-
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
- static const int kMaxSweeperTasks = kAllocationSpaces;
static ClearOldToNewSlotsMode GetClearOldToNewSlotsMode(Page* p);
@@ -465,14 +547,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
}
Page* GetSweepingPageSafe(AllocationSpace space);
- void AddSweepingPageSafe(AllocationSpace space, Page* page);
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
- Heap* const heap_;
- int num_tasks_;
- CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
+ Heap* heap_;
base::Semaphore pending_sweeper_tasks_semaphore_;
+ // Counter is only used for waiting on the semaphore.
+ intptr_t semaphore_counter_;
base::Mutex mutex_;
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
@@ -487,8 +568,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
kClearMarkbits,
};
- static void Initialize();
-
MarkingState marking_state(HeapObject* object) const override {
return MarkingState::Internal(object);
}
@@ -514,9 +593,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void AbortCompaction();
- CodeFlusher* code_flusher() { return code_flusher_; }
- inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
-
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
return Page::FromAddress(reinterpret_cast<Address>(host))
->ShouldSkipEvacuationSlotRecording();
@@ -562,7 +638,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool evacuation() const { return evacuation_; }
- MarkingDeque* marking_deque() { return &marking_deque_; }
+ MarkingWorklist* marking_worklist() { return &marking_worklist_; }
Sweeper& sweeper() { return sweeper_; }
@@ -597,22 +673,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Finishes GC, performs heap verification if enabled.
void Finish();
- // Mark code objects that are active on the stack to prevent them
- // from being flushed.
- void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
-
- void PrepareForCodeFlushing();
-
void MarkLiveObjects() override;
// Pushes a black object onto the marking stack and accounts for live bytes.
// Note that this assumes live bytes have not yet been counted.
V8_INLINE void PushBlack(HeapObject* obj);
- // Unshifts a black object into the marking stack and accounts for live bytes.
- // Note that this assumes lives bytes have already been counted.
- V8_INLINE void UnshiftBlack(HeapObject* obj);
-
// Marks the object black and pushes it on the marking stack.
// This is for non-incremental marking only.
V8_INLINE void MarkObject(HeapObject* obj);
@@ -624,7 +690,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the string table are weak.
void MarkStringTable(RootMarkingVisitor* visitor);
- void ProcessMarkingDeque() override;
+ void ProcessMarkingWorklist() override;
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap. This respects references only considered in
@@ -644,15 +710,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// This function empties the marking stack, but may leave overflowed objects
// in the heap, in which case the marking stack's overflow flag will be set.
- void EmptyMarkingDeque() override;
+ void EmptyMarkingWorklist() override;
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
// flag on the marking stack.
- void RefillMarkingDeque();
+ void RefillMarkingWorklist();
// Helper methods for refilling the marking stack by discovering grey objects
- // on various pages of the heap. Used by {RefillMarkingDeque} only.
+ // on various pages of the heap. Used by {RefillMarkingWorklist} only.
template <class T>
void DiscoverGreyObjectsWithIterator(T* it);
void DiscoverGreyObjectsOnPage(MemoryChunk* p);
@@ -710,9 +776,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
+ void CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+ void CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+
void ReleaseEvacuationCandidates();
void PostProcessEvacuationCandidates();
+ void ReportAbortedEvacuationCandidate(HeapObject* failed_object, Page* page);
+ base::Mutex mutex_;
base::Semaphore page_parallel_job_semaphore_;
#ifdef DEBUG
@@ -742,29 +813,25 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool have_code_to_deoptimize_;
- MarkingDeque marking_deque_;
-
- CodeFlusher* code_flusher_;
+ MarkingWorklist marking_worklist_;
// Candidates for pages that should be evacuated.
- List<Page*> evacuation_candidates_;
+ std::vector<Page*> evacuation_candidates_;
// Pages that are actually processed during evacuation.
- List<Page*> old_space_evacuation_pages_;
- List<Page*> new_space_evacuation_pages_;
+ std::vector<Page*> old_space_evacuation_pages_;
+ std::vector<Page*> new_space_evacuation_pages_;
+ std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
Sweeper sweeper_;
- friend class CodeMarkingVisitor;
+ friend class FullEvacuator;
friend class Heap;
friend class IncrementalMarkingMarkingVisitor;
friend class MarkCompactMarkingVisitor;
- friend class MarkingVisitor;
friend class RecordMigratedSlotVisitor;
- friend class SharedFunctionInfoMarkingVisitor;
- friend class StoreBuffer;
};
-class EvacuationScope BASE_EMBEDDED {
+class EvacuationScope {
public:
explicit EvacuationScope(MarkCompactCollector* collector)
: collector_(collector) {
diff --git a/deps/v8/src/heap/marking.cc b/deps/v8/src/heap/marking.cc
new file mode 100644
index 0000000000..eef3d0a59f
--- /dev/null
+++ b/deps/v8/src/heap/marking.cc
@@ -0,0 +1,201 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/marking.h"
+
+namespace v8 {
+namespace internal {
+
+void Bitmap::Clear() {
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (int i = 0; i < CellsCount(); i++) {
+ base::Relaxed_Store(cell_base + i, 0);
+ }
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // clearing stores.
+ base::MemoryFence();
+}
+
+void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+ if (start_cell_index != end_cell_index) {
+ // Firstly, fill all bits from the start address to the end of the first
+ // cell with 1s.
+ SetBitsInCell<AccessMode::ATOMIC>(start_cell_index,
+ ~(start_index_mask - 1));
+ // Then fill all in between cells with 1s.
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ base::Relaxed_Store(cell_base + i, ~0u);
+ }
+ // Finally, fill all bits until the end address in the last cell with 1s.
+ SetBitsInCell<AccessMode::ATOMIC>(end_cell_index, (end_index_mask - 1));
+ } else {
+ SetBitsInCell<AccessMode::ATOMIC>(start_cell_index,
+ end_index_mask - start_index_mask);
+ }
+ // This fence prevents re-ordering of publishing stores with the mark-
+ // bit setting stores.
+ base::MemoryFence();
+}
+
+void Bitmap::ClearRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ if (start_cell_index != end_cell_index) {
+ // Firstly, fill all bits from the start address to the end of the first
+ // cell with 0s.
+ ClearBitsInCell<AccessMode::ATOMIC>(start_cell_index,
+ ~(start_index_mask - 1));
+ // Then fill all in between cells with 0s.
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ base::Relaxed_Store(cell_base + i, 0);
+ }
+ // Finally, set all bits until the end address in the last cell with 0s.
+ ClearBitsInCell<AccessMode::ATOMIC>(end_cell_index, (end_index_mask - 1));
+ } else {
+ ClearBitsInCell<AccessMode::ATOMIC>(start_cell_index,
+ (end_index_mask - start_index_mask));
+ }
+ // This fence prevents re-ordering of publishing stores with the mark-
+ // bit clearing stores.
+ base::MemoryFence();
+}
+
+bool Bitmap::AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ MarkBit::CellType matching_mask;
+ if (start_cell_index != end_cell_index) {
+ matching_mask = ~(start_index_mask - 1);
+ if ((cells()[start_cell_index] & matching_mask) != matching_mask) {
+ return false;
+ }
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ if (cells()[i] != ~0u) return false;
+ }
+ matching_mask = (end_index_mask - 1);
+ // Check against a mask of 0 to avoid dereferencing the cell after the
+ // end of the bitmap.
+ return (matching_mask == 0) ||
+ ((cells()[end_cell_index] & matching_mask) == matching_mask);
+ } else {
+ matching_mask = end_index_mask - start_index_mask;
+ // Check against a mask of 0 to avoid dereferencing the cell after the
+ // end of the bitmap.
+ return (matching_mask == 0) ||
+ (cells()[end_cell_index] & matching_mask) == matching_mask;
+ }
+}
+
+bool Bitmap::AllBitsClearInRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ MarkBit::CellType matching_mask;
+ if (start_cell_index != end_cell_index) {
+ matching_mask = ~(start_index_mask - 1);
+ if ((cells()[start_cell_index] & matching_mask)) return false;
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ if (cells()[i]) return false;
+ }
+ matching_mask = (end_index_mask - 1);
+ // Check against a mask of 0 to avoid dereferencing the cell after the
+ // end of the bitmap.
+ return (matching_mask == 0) || !(cells()[end_cell_index] & matching_mask);
+ } else {
+ matching_mask = end_index_mask - start_index_mask;
+ // Check against a mask of 0 to avoid dereferencing the cell after the
+ // end of the bitmap.
+ return (matching_mask == 0) || !(cells()[end_cell_index] & matching_mask);
+ }
+}
+
+namespace {
+
+void PrintWord(uint32_t word, uint32_t himask = 0) {
+ for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+ if ((mask & himask) != 0) PrintF("[");
+ PrintF((mask & word) ? "1" : "0");
+ if ((mask & himask) != 0) PrintF("]");
+ }
+}
+
+class CellPrinter {
+ public:
+ CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
+
+ void Print(uint32_t pos, uint32_t cell) {
+ if (cell == seq_type) {
+ seq_length++;
+ return;
+ }
+
+ Flush();
+
+ if (IsSeq(cell)) {
+ seq_start = pos;
+ seq_length = 0;
+ seq_type = cell;
+ return;
+ }
+
+ PrintF("%d: ", pos);
+ PrintWord(cell);
+ PrintF("\n");
+ }
+
+ void Flush() {
+ if (seq_length > 0) {
+ PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
+ seq_length * Bitmap::kBitsPerCell);
+ seq_length = 0;
+ }
+ }
+
+ static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+
+ private:
+ uint32_t seq_start;
+ uint32_t seq_type;
+ uint32_t seq_length;
+};
+
+} // anonymous namespace
+
+void Bitmap::Print() {
+ CellPrinter printer;
+ for (int i = 0; i < CellsCount(); i++) {
+ printer.Print(i, cells()[i]);
+ }
+ printer.Flush();
+ PrintF("\n");
+}
+
+bool Bitmap::IsClean() {
+ for (int i = 0; i < CellsCount(); i++) {
+ if (cells()[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index ab98a124bc..c76302218f 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -16,11 +16,7 @@ class MarkBit {
typedef uint32_t CellType;
STATIC_ASSERT(sizeof(CellType) == sizeof(base::Atomic32));
- enum AccessMode { ATOMIC, NON_ATOMIC };
-
- inline MarkBit(base::Atomic32* cell, CellType mask) : cell_(cell) {
- mask_ = static_cast<base::Atomic32>(mask);
- }
+ inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
#ifdef DEBUG
bool operator==(const MarkBit& other) {
@@ -40,19 +36,19 @@ class MarkBit {
// The function returns true if it succeeded to
// transition the bit from 0 to 1.
- template <AccessMode mode = NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
inline bool Set();
- template <AccessMode mode = NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
inline bool Get();
// The function returns true if it succeeded to
// transition the bit from 1 to 0.
- template <AccessMode mode = NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
inline bool Clear();
- base::Atomic32* cell_;
- base::Atomic32 mask_;
+ CellType* cell_;
+ CellType mask_;
friend class IncrementalMarking;
friend class ConcurrentMarkingMarkbits;
@@ -60,57 +56,41 @@ class MarkBit {
};
template <>
-inline bool MarkBit::Set<MarkBit::NON_ATOMIC>() {
- base::Atomic32 old_value = *cell_;
+inline bool MarkBit::Set<AccessMode::NON_ATOMIC>() {
+ CellType old_value = *cell_;
*cell_ = old_value | mask_;
return (old_value & mask_) == 0;
}
template <>
-inline bool MarkBit::Set<MarkBit::ATOMIC>() {
- base::Atomic32 old_value;
- base::Atomic32 new_value;
- do {
- old_value = base::NoBarrier_Load(cell_);
- if (old_value & mask_) return false;
- new_value = old_value | mask_;
- } while (base::Release_CompareAndSwap(cell_, old_value, new_value) !=
- old_value);
- return true;
+inline bool MarkBit::Set<AccessMode::ATOMIC>() {
+ return base::AsAtomic32::SetBits(cell_, mask_, mask_);
}
template <>
-inline bool MarkBit::Get<MarkBit::NON_ATOMIC>() {
- return (base::NoBarrier_Load(cell_) & mask_) != 0;
+inline bool MarkBit::Get<AccessMode::NON_ATOMIC>() {
+ return (*cell_ & mask_) != 0;
}
template <>
-inline bool MarkBit::Get<MarkBit::ATOMIC>() {
- return (base::Acquire_Load(cell_) & mask_) != 0;
+inline bool MarkBit::Get<AccessMode::ATOMIC>() {
+ return (base::AsAtomic32::Acquire_Load(cell_) & mask_) != 0;
}
template <>
-inline bool MarkBit::Clear<MarkBit::NON_ATOMIC>() {
- base::Atomic32 old_value = *cell_;
+inline bool MarkBit::Clear<AccessMode::NON_ATOMIC>() {
+ CellType old_value = *cell_;
*cell_ = old_value & ~mask_;
return (old_value & mask_) == mask_;
}
template <>
-inline bool MarkBit::Clear<MarkBit::ATOMIC>() {
- base::Atomic32 old_value;
- base::Atomic32 new_value;
- do {
- old_value = base::NoBarrier_Load(cell_);
- if (!(old_value & mask_)) return false;
- new_value = old_value & ~mask_;
- } while (base::Release_CompareAndSwap(cell_, old_value, new_value) !=
- old_value);
- return true;
+inline bool MarkBit::Clear<AccessMode::ATOMIC>() {
+ return base::AsAtomic32::SetBits(cell_, 0u, mask_);
}
// Bitmap is a sequence of cells each containing fixed number of bits.
-class Bitmap {
+class V8_EXPORT_PRIVATE Bitmap {
public:
static const uint32_t kBitsPerCell = 32;
static const uint32_t kBitsPerCellLog2 = 5;
@@ -129,11 +109,7 @@ class Bitmap {
int CellsCount() { return CellsForLength(kLength); }
- static int SizeFor(int cells_count) {
- return sizeof(MarkBit::CellType) * cells_count;
- }
-
- INLINE(static uint32_t IndexToCell(uint32_t index)) {
+ V8_INLINE static uint32_t IndexToCell(uint32_t index) {
return index >> kBitsPerCellLog2;
}
@@ -141,204 +117,85 @@ class Bitmap {
return index & kBitIndexMask;
}
- INLINE(static uint32_t CellToIndex(uint32_t index)) {
- return index << kBitsPerCellLog2;
+ // Retrieves the cell containing the provided markbit index.
+ V8_INLINE static uint32_t CellAlignIndex(uint32_t index) {
+ return index & ~kBitIndexMask;
}
- INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
- return (index + kBitIndexMask) & ~kBitIndexMask;
+ V8_INLINE static bool IsCellAligned(uint32_t index) {
+ return (index & kBitIndexMask) == 0;
}
- INLINE(MarkBit::CellType* cells()) {
+ V8_INLINE MarkBit::CellType* cells() {
return reinterpret_cast<MarkBit::CellType*>(this);
}
- INLINE(Address address()) { return reinterpret_cast<Address>(this); }
-
- INLINE(static Bitmap* FromAddress(Address addr)) {
+ V8_INLINE static Bitmap* FromAddress(Address addr) {
return reinterpret_cast<Bitmap*>(addr);
}
inline MarkBit MarkBitFromIndex(uint32_t index) {
MarkBit::CellType mask = 1u << IndexInCell(index);
MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
- return MarkBit(reinterpret_cast<base::Atomic32*>(cell), mask);
- }
-
- void Clear() {
- for (int i = 0; i < CellsCount(); i++) cells()[i] = 0;
- }
-
- // Sets all bits in the range [start_index, end_index).
- void SetRange(uint32_t start_index, uint32_t end_index) {
- unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
-
- unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
-
- if (start_cell_index != end_cell_index) {
- // Firstly, fill all bits from the start address to the end of the first
- // cell with 1s.
- cells()[start_cell_index] |= ~(start_index_mask - 1);
- // Then fill all in between cells with 1s.
- for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
- cells()[i] = ~0u;
- }
- // Finally, fill all bits until the end address in the last cell with 1s.
- cells()[end_cell_index] |= (end_index_mask - 1);
- } else {
- cells()[start_cell_index] |= end_index_mask - start_index_mask;
- }
+ return MarkBit(cell, mask);
}
- // Clears all bits in the range [start_index, end_index).
- void ClearRange(uint32_t start_index, uint32_t end_index) {
- unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
-
- unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
-
- if (start_cell_index != end_cell_index) {
- // Firstly, fill all bits from the start address to the end of the first
- // cell with 0s.
- cells()[start_cell_index] &= (start_index_mask - 1);
- // Then fill all in between cells with 0s.
- for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
- cells()[i] = 0;
- }
- // Finally, set all bits until the end address in the last cell with 0s.
- cells()[end_cell_index] &= ~(end_index_mask - 1);
- } else {
- cells()[start_cell_index] &= ~(end_index_mask - start_index_mask);
- }
- }
+ void Clear();
- // Returns true if all bits in the range [start_index, end_index) are set.
- bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
- unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
-
- unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
-
- MarkBit::CellType matching_mask;
- if (start_cell_index != end_cell_index) {
- matching_mask = ~(start_index_mask - 1);
- if ((cells()[start_cell_index] & matching_mask) != matching_mask) {
- return false;
- }
- for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
- if (cells()[i] != ~0u) return false;
- }
- matching_mask = (end_index_mask - 1);
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) ||
- ((cells()[end_cell_index] & matching_mask) == matching_mask);
- } else {
- matching_mask = end_index_mask - start_index_mask;
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) ||
- (cells()[end_cell_index] & matching_mask) == matching_mask;
- }
- }
-
- // Returns true if all bits in the range [start_index, end_index) are cleared.
- bool AllBitsClearInRange(uint32_t start_index, uint32_t end_index) {
- unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
-
- unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
-
- MarkBit::CellType matching_mask;
- if (start_cell_index != end_cell_index) {
- matching_mask = ~(start_index_mask - 1);
- if ((cells()[start_cell_index] & matching_mask)) return false;
- for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
- if (cells()[i]) return false;
- }
- matching_mask = (end_index_mask - 1);
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) || !(cells()[end_cell_index] & matching_mask);
- } else {
- matching_mask = end_index_mask - start_index_mask;
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) || !(cells()[end_cell_index] & matching_mask);
- }
- }
+ // Clears bits in the given cell. The mask specifies bits to clear: if a
+ // bit is set in the mask then the corresponding bit is cleared in the cell.
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
+ void ClearBitsInCell(uint32_t cell_index, uint32_t mask);
- static void PrintWord(uint32_t word, uint32_t himask = 0) {
- for (uint32_t mask = 1; mask != 0; mask <<= 1) {
- if ((mask & himask) != 0) PrintF("[");
- PrintF((mask & word) ? "1" : "0");
- if ((mask & himask) != 0) PrintF("]");
- }
- }
+ // Sets bits in the given cell. The mask specifies bits to set: if a
+ // bit is set in the mask then the corresponding bit is set in the cell.
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
+ void SetBitsInCell(uint32_t cell_index, uint32_t mask);
- class CellPrinter {
- public:
- CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
+ // Sets all bits in the range [start_index, end_index). The cells at the
+ // boundary of the range are updated with atomic compare and swap operation.
+ // The inner cells are updated with relaxed write.
+ void SetRange(uint32_t start_index, uint32_t end_index);
- void Print(uint32_t pos, uint32_t cell) {
- if (cell == seq_type) {
- seq_length++;
- return;
- }
+ // Clears all bits in the range [start_index, end_index). The cells at the
+ // boundary of the range are updated with atomic compare and swap operation.
+ // The inner cells are updated with relaxed write.
+ void ClearRange(uint32_t start_index, uint32_t end_index);
- Flush();
+ // Returns true if all bits in the range [start_index, end_index) are set.
+ bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index);
- if (IsSeq(cell)) {
- seq_start = pos;
- seq_length = 0;
- seq_type = cell;
- return;
- }
+ // Returns true if all bits in the range [start_index, end_index) are cleared.
+ bool AllBitsClearInRange(uint32_t start_index, uint32_t end_index);
- PrintF("%d: ", pos);
- PrintWord(cell);
- PrintF("\n");
- }
+ void Print();
- void Flush() {
- if (seq_length > 0) {
- PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
- seq_length * kBitsPerCell);
- seq_length = 0;
- }
- }
+ bool IsClean();
+};
- static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+template <>
+inline void Bitmap::SetBitsInCell<AccessMode::NON_ATOMIC>(uint32_t cell_index,
+ uint32_t mask) {
+ cells()[cell_index] |= mask;
+}
- private:
- uint32_t seq_start;
- uint32_t seq_type;
- uint32_t seq_length;
- };
+template <>
+inline void Bitmap::SetBitsInCell<AccessMode::ATOMIC>(uint32_t cell_index,
+ uint32_t mask) {
+ base::AsAtomic32::SetBits(cells() + cell_index, mask, mask);
+}
- void Print() {
- CellPrinter printer;
- for (int i = 0; i < CellsCount(); i++) {
- printer.Print(i, cells()[i]);
- }
- printer.Flush();
- PrintF("\n");
- }
+template <>
+inline void Bitmap::ClearBitsInCell<AccessMode::NON_ATOMIC>(uint32_t cell_index,
+ uint32_t mask) {
+ cells()[cell_index] &= ~mask;
+}
- bool IsClean() {
- for (int i = 0; i < CellsCount(); i++) {
- if (cells()[i] != 0) {
- return false;
- }
- }
- return true;
- }
-};
+template <>
+inline void Bitmap::ClearBitsInCell<AccessMode::ATOMIC>(uint32_t cell_index,
+ uint32_t mask) {
+ base::AsAtomic32::SetBits(cells() + cell_index, 0u, mask);
+}
class Marking : public AllStatic {
public:
@@ -348,9 +205,9 @@ class Marking : public AllStatic {
// Impossible markbits: 01
static const char* kImpossibleBitPattern;
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool IsImpossible(MarkBit mark_bit)) {
- if (mode == MarkBit::NON_ATOMIC) {
+ if (mode == AccessMode::NON_ATOMIC) {
return !mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
}
// If we are in concurrent mode we can only tell if an object has the
@@ -366,36 +223,36 @@ class Marking : public AllStatic {
// Black markbits: 11
static const char* kBlackBitPattern;
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool IsBlack(MarkBit mark_bit)) {
return mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
}
// White markbits: 00 - this is required by the mark bit clearer.
static const char* kWhiteBitPattern;
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool IsWhite(MarkBit mark_bit)) {
- DCHECK(!IsImpossible(mark_bit));
+ DCHECK(!IsImpossible<mode>(mark_bit));
return !mark_bit.Get<mode>();
}
// Grey markbits: 10
static const char* kGreyBitPattern;
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool IsGrey(MarkBit mark_bit)) {
return mark_bit.Get<mode>() && !mark_bit.Next().Get<mode>();
}
// IsBlackOrGrey assumes that the first bit is set for black or grey
// objects.
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) {
return mark_bit.Get<mode>();
}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static void MarkWhite(MarkBit markbit)) {
- STATIC_ASSERT(mode == MarkBit::NON_ATOMIC);
+ STATIC_ASSERT(mode == AccessMode::NON_ATOMIC);
markbit.Clear<mode>();
markbit.Next().Clear<mode>();
}
@@ -403,30 +260,30 @@ class Marking : public AllStatic {
// Warning: this method is not safe in general in concurrent scenarios.
// If you know that nobody else will change the bits on the given location
// then you may use it.
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static void MarkBlack(MarkBit markbit)) {
markbit.Set<mode>();
markbit.Next().Set<mode>();
}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool BlackToGrey(MarkBit markbit)) {
- STATIC_ASSERT(mode == MarkBit::NON_ATOMIC);
+ STATIC_ASSERT(mode == AccessMode::NON_ATOMIC);
DCHECK(IsBlack(markbit));
return markbit.Next().Clear<mode>();
}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool WhiteToGrey(MarkBit markbit)) {
return markbit.Set<mode>();
}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool WhiteToBlack(MarkBit markbit)) {
return markbit.Set<mode>() && markbit.Next().Set<mode>();
}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool GreyToBlack(MarkBit markbit)) {
return markbit.Get<mode>() && markbit.Next().Set<mode>();
}
@@ -457,7 +314,6 @@ class Marking : public AllStatic {
if (IsWhite(mark_bit)) return WHITE_OBJECT;
if (IsGrey(mark_bit)) return GREY_OBJECT;
UNREACHABLE();
- return IMPOSSIBLE_COLOR;
}
private:
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 46b7b576d2..0e1449bb92 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -197,7 +197,6 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
}
}
UNREACHABLE();
- return State(kDone, 0, 0, 0.0, 0); // Make the compiler happy.
}
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index a9f50cdfbf..66c864b945 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -320,7 +320,7 @@ void ObjectStatsCollector::CollectGlobalStatistics() {
OBJECT_TO_CODE_SUB_TYPE);
RecordHashTableHelper(nullptr, heap_->code_stubs(),
CODE_STUBS_TABLE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->empty_properties_dictionary(),
+ RecordHashTableHelper(nullptr, heap_->empty_property_dictionary(),
EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE);
CompilationCache* compilation_cache = heap_->isolate()->compilation_cache();
CompilationCacheTableVisitor v(this);
@@ -335,7 +335,7 @@ static bool CanRecordFixedArray(Heap* heap, FixedArrayBase* array) {
array != heap->empty_sloppy_arguments_elements() &&
array != heap->empty_slow_element_dictionary() &&
array != heap->empty_descriptor_array() &&
- array != heap->empty_properties_dictionary();
+ array != heap->empty_property_dictionary();
}
static bool IsCowArray(Heap* heap, FixedArrayBase* array) {
@@ -393,27 +393,29 @@ void ObjectStatsCollector::RecordJSObjectDetails(JSObject* object) {
SeededNumberDictionary* dict = SeededNumberDictionary::cast(elements);
RecordHashTableHelper(object, dict, DICTIONARY_ELEMENTS_SUB_TYPE);
} else {
- if (IsFastHoleyElementsKind(object->GetElementsKind())) {
+ if (IsHoleyElementsKind(object->GetElementsKind())) {
int used = object->GetFastElementsUsage() * kPointerSize;
- if (object->GetElementsKind() == FAST_HOLEY_DOUBLE_ELEMENTS) used *= 2;
+ if (object->GetElementsKind() == HOLEY_DOUBLE_ELEMENTS) used *= 2;
CHECK_GE(elements->Size(), used);
overhead = elements->Size() - used - FixedArray::kHeaderSize;
}
- stats_->RecordFixedArraySubTypeStats(elements, FAST_ELEMENTS_SUB_TYPE,
+ stats_->RecordFixedArraySubTypeStats(elements, PACKED_ELEMENTS_SUB_TYPE,
elements->Size(), overhead);
}
}
- overhead = 0;
- FixedArrayBase* properties = object->properties();
- if (CanRecordFixedArray(heap_, properties) &&
- SameLiveness(object, properties) && !IsCowArray(heap_, properties)) {
- if (properties->IsDictionary()) {
- NameDictionary* dict = NameDictionary::cast(properties);
- RecordHashTableHelper(object, dict, DICTIONARY_PROPERTIES_SUB_TYPE);
- } else {
- stats_->RecordFixedArraySubTypeStats(properties, FAST_PROPERTIES_SUB_TYPE,
- properties->Size(), overhead);
+ if (object->IsJSGlobalObject()) {
+ GlobalDictionary* properties =
+ JSGlobalObject::cast(object)->global_dictionary();
+ if (CanRecordFixedArray(heap_, properties) &&
+ SameLiveness(object, properties)) {
+ RecordHashTableHelper(object, properties, DICTIONARY_PROPERTIES_SUB_TYPE);
+ }
+ } else if (!object->HasFastProperties()) {
+ NameDictionary* properties = object->property_dictionary();
+ if (CanRecordFixedArray(heap_, properties) &&
+ SameLiveness(object, properties)) {
+ RecordHashTableHelper(object, properties, DICTIONARY_PROPERTIES_SUB_TYPE);
}
}
}
@@ -462,8 +464,8 @@ void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
}
}
- if (map_obj->has_code_cache()) {
- FixedArray* code_cache = map_obj->code_cache();
+ FixedArray* code_cache = map_obj->code_cache();
+ if (code_cache->length() > 0) {
if (code_cache->IsCodeCacheHashTable()) {
RecordHashTableHelper(map_obj, CodeCacheHashTable::cast(code_cache),
MAP_CODE_CACHE_SUB_TYPE);
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 11bf679ec4..ad3ddbc52c 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -5,9 +5,11 @@
#ifndef V8_OBJECTS_VISITING_INL_H_
#define V8_OBJECTS_VISITING_INL_H_
+#include "src/heap/objects-visiting.h"
+
#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/mark-compact.h"
-#include "src/heap/objects-visiting.h"
#include "src/ic/ic-state.h"
#include "src/macro-assembler.h"
#include "src/objects-body-descriptors-inl.h"
@@ -15,461 +17,302 @@
namespace v8 {
namespace internal {
-
-template <typename Callback>
-Callback VisitorDispatchTable<Callback>::GetVisitor(Map* map) {
- return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject* object) {
+ return Visit(object->map(), object);
}
-
-template <typename StaticVisitor>
-void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
- table_.Register(
- kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit);
-
- table_.Register(
- kVisitConsString,
- &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit);
-
- table_.Register(
- kVisitThinString,
- &FixedBodyVisitor<StaticVisitor, ThinString::BodyDescriptor, int>::Visit);
-
- table_.Register(kVisitSlicedString,
- &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(
- kVisitSymbol,
- &FixedBodyVisitor<StaticVisitor, Symbol::BodyDescriptor, int>::Visit);
-
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor, int>::Visit);
-
- table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
- table_.Register(
- kVisitFixedTypedArrayBase,
- &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
- int>::Visit);
-
- table_.Register(
- kVisitFixedFloat64Array,
- &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
- int>::Visit);
-
- table_.Register(
- kVisitNativeContext,
- &FixedBodyVisitor<StaticVisitor, Context::ScavengeBodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitByteArray, &VisitByteArray);
-
- table_.Register(
- kVisitSharedFunctionInfo,
- &FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString);
-
- table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
- // Don't visit code entry. We are using this visitor only during scavenges.
- table_.Register(
- kVisitJSFunction,
- &FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
- int>::Visit);
-
- table_.Register(
- kVisitJSArrayBuffer,
- &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitFreeSpace, &VisitFreeSpace);
-
- table_.Register(
- kVisitJSWeakCollection,
- &FlexibleBodyVisitor<StaticVisitor, JSWeakCollection::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
-
- table_.Register(kVisitDataObject, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitJSObjectFast, &JSObjectFastVisitor::Visit);
- table_.Register(kVisitJSObject, &JSObjectVisitor::Visit);
-
- // Not using specialized Api object visitor for newspace.
- table_.Register(kVisitJSApiObject, &JSObjectVisitor::Visit);
-
- table_.Register(kVisitStruct, &StructVisitor::Visit);
-
- table_.Register(kVisitBytecodeArray, &UnreachableVisitor);
- table_.Register(kVisitSharedFunctionInfo, &UnreachableVisitor);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map* map,
+ HeapObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ switch (static_cast<VisitorId>(map->visitor_id())) {
+#define CASE(type) \
+ case kVisit##type: \
+ return visitor->Visit##type(map, type::cast(object));
+ TYPED_VISITOR_ID_LIST(CASE)
+#undef CASE
+ case kVisitShortcutCandidate:
+ return visitor->VisitShortcutCandidate(map, ConsString::cast(object));
+ case kVisitNativeContext:
+ return visitor->VisitNativeContext(map, Context::cast(object));
+ case kVisitDataObject:
+ return visitor->VisitDataObject(map, HeapObject::cast(object));
+ case kVisitJSObjectFast:
+ return visitor->VisitJSObjectFast(map, JSObject::cast(object));
+ case kVisitJSApiObject:
+ return visitor->VisitJSApiObject(map, JSObject::cast(object));
+ case kVisitStruct:
+ return visitor->VisitStruct(map, HeapObject::cast(object));
+ case kVisitFreeSpace:
+ return visitor->VisitFreeSpace(map, FreeSpace::cast(object));
+ case kVisitorIdCount:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ // Make the compiler happy.
+ return ResultType();
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::Initialize() {
- table_.Register(kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitConsString,
- &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitThinString,
- &FixedBodyVisitor<StaticVisitor, ThinString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitSlicedString,
- &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(
- kVisitSymbol,
- &FixedBodyVisitor<StaticVisitor, Symbol::BodyDescriptor, void>::Visit);
-
- table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit);
-
- table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
-
- table_.Register(
- kVisitFixedTypedArrayBase,
- &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
- void>::Visit);
-
- table_.Register(
- kVisitFixedFloat64Array,
- &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitNativeContext, &VisitNativeContext);
-
- table_.Register(
- kVisitAllocationSite,
- &FixedBodyVisitor<StaticVisitor, AllocationSite::MarkingBodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
-
- table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitJSWeakCollection, &VisitWeakCollection);
-
- table_.Register(
- kVisitOddball,
- &FixedBodyVisitor<StaticVisitor, Oddball::BodyDescriptor, void>::Visit);
-
- table_.Register(kVisitMap, &VisitMap);
-
- table_.Register(kVisitCode, &VisitCode);
-
- table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
-
- table_.Register(kVisitJSFunction, &VisitJSFunction);
-
- table_.Register(
- kVisitJSArrayBuffer,
- &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
-
- table_.Register(
- kVisitCell,
- &FixedBodyVisitor<StaticVisitor, Cell::BodyDescriptor, void>::Visit);
-
- table_.Register(kVisitPropertyCell,
- &FixedBodyVisitor<StaticVisitor, PropertyCell::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitWeakCell, &VisitWeakCell);
-
- table_.Register(kVisitTransitionArray, &VisitTransitionArray);
-
- table_.Register(kVisitDataObject, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitJSObjectFast, &JSObjectFastVisitor::Visit);
- table_.Register(kVisitJSObject, &JSObjectVisitor::Visit);
-
- table_.Register(kVisitJSApiObject, &JSApiObjectVisitor::Visit);
-
- table_.Register(kVisitStruct, &StructObjectVisitor::Visit);
+template <typename ResultType, typename ConcreteVisitor>
+void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
+ HeapObject* host, HeapObject** map) {
+ static_cast<ConcreteVisitor*>(this)->VisitPointer(
+ host, reinterpret_cast<Object**>(map));
}
+#define VISIT(type) \
+ template <typename ResultType, typename ConcreteVisitor> \
+ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
+ Map* map, type* object) { \
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
+ if (!visitor->ShouldVisit(object)) return ResultType(); \
+ int size = type::BodyDescriptor::SizeOf(map, object); \
+ if (visitor->ShouldVisitMapPointer()) \
+ visitor->VisitMapPointer(object, object->map_slot()); \
+ type::BodyDescriptor::IterateBody(object, size, visitor); \
+ return static_cast<ResultType>(size); \
+ }
+TYPED_VISITOR_ID_LIST(VISIT)
+#undef VISIT
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry(
- Heap* heap, HeapObject* object, Address entry_address) {
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- heap->mark_compact_collector()->RecordCodeEntrySlot(object, entry_address,
- code);
- StaticVisitor::MarkObject(heap, code);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitShortcutCandidate(
+ Map* map, ConsString* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = ConsString::BodyDescriptor::SizeOf(map, object);
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ ConsString::BodyDescriptor::IterateBody(object, size, visitor);
+ return static_cast<ResultType>(size);
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
- Heap* heap, RelocInfo* rinfo) {
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- HeapObject* object = HeapObject::cast(rinfo->target_object());
- Code* host = rinfo->host();
- heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, object);
- // TODO(ulan): It could be better to record slots only for strongly embedded
- // objects here and record slots for weakly embedded object during clearing
- // of non-live references in mark-compact.
- if (!host->IsWeakObject(object)) {
- StaticVisitor::MarkObject(heap, object);
- }
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitNativeContext(
+ Map* map, Context* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = Context::BodyDescriptor::SizeOf(map, object);
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ Context::BodyDescriptor::IterateBody(object, size, visitor);
+ return static_cast<ResultType>(size);
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCell(Heap* heap,
- RelocInfo* rinfo) {
- DCHECK(rinfo->rmode() == RelocInfo::CELL);
- Cell* cell = rinfo->target_cell();
- Code* host = rinfo->host();
- heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, cell);
- if (!host->IsWeakObject(cell)) {
- StaticVisitor::MarkObject(heap, cell);
- }
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
+ Map* map, HeapObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = map->instance_size();
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ return static_cast<ResultType>(size);
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(Heap* heap,
- RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence());
- Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
- Code* host = rinfo->host();
- heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
- StaticVisitor::MarkObject(heap, target);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
+ Map* map, JSObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ JSObject::FastBodyDescriptor::IterateBody(object, size, visitor);
+ return static_cast<ResultType>(size);
}
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
+ Map* map, JSObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = JSObject::BodyDescriptor::SizeOf(map, object);
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ JSObject::BodyDescriptor::IterateBody(object, size, visitor);
+ return static_cast<ResultType>(size);
+}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
- RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Code* host = rinfo->host();
- heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
- StaticVisitor::MarkObject(heap, target);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
+ Map* map, HeapObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = map->instance_size();
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ StructBodyDescriptor::IterateBody(object, size, visitor);
+ return static_cast<ResultType>(size);
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
- Heap* heap, RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Code* target = rinfo->code_age_stub();
- DCHECK(target != NULL);
- Code* host = rinfo->host();
- heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
- StaticVisitor::MarkObject(heap, target);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
+ Map* map, FreeSpace* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ return static_cast<ResultType>(FreeSpace::cast(object)->size());
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
- Map* map, HeapObject* object) {
- FixedBodyVisitor<StaticVisitor, BytecodeArray::MarkingBodyDescriptor,
- void>::Visit(map, object);
- BytecodeArray::cast(object)->MakeOlder();
+template <typename ConcreteVisitor>
+int NewSpaceVisitor<ConcreteVisitor>::VisitJSFunction(Map* map,
+ JSFunction* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
+ JSFunction::BodyDescriptorWeak::IterateBody(object, size, visitor);
+ return size;
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
- Map* map, HeapObject* object) {
- FixedBodyVisitor<StaticVisitor, Context::MarkCompactBodyDescriptor,
- void>::Visit(map, object);
+template <typename ConcreteVisitor>
+int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map* map,
+ Context* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = Context::BodyDescriptor::SizeOf(map, object);
+ Context::BodyDescriptor::IterateBody(object, size, visitor);
+ return size;
}
+template <typename ConcreteVisitor>
+int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
+ JSObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ return visitor->VisitJSObject(map, object);
+}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitMap(Map* map,
- HeapObject* object) {
- Heap* heap = map->GetHeap();
- Map* map_object = Map::cast(object);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitJSFunction(Map* map,
+ JSFunction* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
+ JSFunction::BodyDescriptorWeak::IterateBody(object, size, visitor);
+ return size;
+}
- // Clears the cache of ICs related to this map.
- if (FLAG_cleanup_code_caches_at_gc) {
- map_object->ClearCodeCache(heap);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitTransitionArray(
+ Map* map, TransitionArray* array) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ // Visit strong references.
+ if (array->HasPrototypeTransitions()) {
+ visitor->VisitPointer(array, array->GetPrototypeTransitionsSlot());
}
-
- // When map collection is enabled we have to mark through map's transitions
- // and back pointers in a special way to make these links weak.
- if (map_object->CanTransition()) {
- MarkMapContents(heap, map_object);
- } else {
- StaticVisitor::VisitPointers(
- heap, object,
- HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
+ int num_transitions = TransitionArray::NumberOfTransitions(array);
+ for (int i = 0; i < num_transitions; ++i) {
+ visitor->VisitPointer(array, array->GetKeySlot(i));
}
+ // Enqueue the array in linked list of encountered transition arrays if it is
+ // not already in the list.
+ if (array->next_link()->IsUndefined(heap_->isolate())) {
+ array->set_next_link(heap_->encountered_transition_arrays(),
+ UPDATE_WEAK_WRITE_BARRIER);
+ heap_->set_encountered_transition_arrays(array);
+ }
+ return TransitionArray::BodyDescriptor::SizeOf(map, array);
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitWeakCell(Map* map,
- HeapObject* object) {
- Heap* heap = map->GetHeap();
- WeakCell* weak_cell = reinterpret_cast<WeakCell*>(object);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitWeakCell(Map* map,
+ WeakCell* weak_cell) {
// Enqueue weak cell in linked list of encountered weak collections.
// We can ignore weak cells with cleared values because they will always
// contain smi zero.
if (weak_cell->next_cleared() && !weak_cell->cleared()) {
HeapObject* value = HeapObject::cast(weak_cell->value());
- if (ObjectMarking::IsBlackOrGrey(value, MarkingState::Internal(value))) {
+ if (ObjectMarking::IsBlackOrGrey<IncrementalMarking::kAtomicity>(
+ value, collector_->marking_state(value))) {
// Weak cells with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- map->GetHeap()->mark_compact_collector()->RecordSlot(weak_cell, slot,
- *slot);
+ collector_->RecordSlot(weak_cell, slot, *slot);
} else {
// If we do not know about liveness of values of weak cells, we have to
// process them when we know the liveness of the whole transitive
// closure.
- weak_cell->set_next(heap->encountered_weak_cells(),
+ weak_cell->set_next(heap_->encountered_weak_cells(),
UPDATE_WEAK_WRITE_BARRIER);
- heap->set_encountered_weak_cells(weak_cell);
+ heap_->set_encountered_weak_cells(weak_cell);
}
}
+ return WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitTransitionArray(
- Map* map, HeapObject* object) {
- TransitionArray* array = TransitionArray::cast(object);
- Heap* heap = array->GetHeap();
- // Visit strong references.
- if (array->HasPrototypeTransitions()) {
- StaticVisitor::VisitPointer(heap, array,
- array->GetPrototypeTransitionsSlot());
- }
- int num_transitions = TransitionArray::NumberOfTransitions(array);
- for (int i = 0; i < num_transitions; ++i) {
- StaticVisitor::VisitPointer(heap, array, array->GetKeySlot(i));
- }
- // Enqueue the array in linked list of encountered transition arrays if it is
- // not already in the list.
- if (array->next_link()->IsUndefined(heap->isolate())) {
- Heap* heap = map->GetHeap();
- array->set_next_link(heap->encountered_transition_arrays(),
- UPDATE_WEAK_WRITE_BARRIER);
- heap->set_encountered_transition_arrays(array);
- }
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitNativeContext(Map* map,
+ Context* context) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = Context::BodyDescriptorWeak::SizeOf(map, context);
+ Context::BodyDescriptorWeak::IterateBody(context, size, visitor);
+ return size;
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
- Map* map, HeapObject* object) {
- typedef FlexibleBodyVisitor<StaticVisitor,
- JSWeakCollection::BodyDescriptorWeak,
- void> JSWeakCollectionBodyVisitor;
- Heap* heap = map->GetHeap();
- JSWeakCollection* weak_collection =
- reinterpret_cast<JSWeakCollection*>(object);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitJSWeakCollection(
+ Map* map, JSWeakCollection* weak_collection) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
// Enqueue weak collection in linked list of encountered weak collections.
- if (weak_collection->next() == heap->undefined_value()) {
- weak_collection->set_next(heap->encountered_weak_collections());
- heap->set_encountered_weak_collections(weak_collection);
+ if (weak_collection->next() == heap_->undefined_value()) {
+ weak_collection->set_next(heap_->encountered_weak_collections());
+ heap_->set_encountered_weak_collections(weak_collection);
}
// Skip visiting the backing hash table containing the mappings and the
// pointer to the other enqueued weak collections, both are post-processed.
- JSWeakCollectionBodyVisitor::Visit(map, object);
+ int size = JSWeakCollection::BodyDescriptorWeak::SizeOf(map, weak_collection);
+ JSWeakCollection::BodyDescriptorWeak::IterateBody(weak_collection, size,
+ visitor);
// Partially initialized weak collection is enqueued, but table is ignored.
- if (!weak_collection->table()->IsHashTable()) return;
+ if (!weak_collection->table()->IsHashTable()) return size;
// Mark the backing hash table without pushing it on the marking stack.
- Object** slot = HeapObject::RawField(object, JSWeakCollection::kTableOffset);
+ Object** slot =
+ HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
HeapObject* obj = HeapObject::cast(*slot);
- heap->mark_compact_collector()->RecordSlot(object, slot, obj);
- StaticVisitor::MarkObjectWithoutPush(heap, obj);
+ collector_->RecordSlot(weak_collection, slot, obj);
+ visitor->MarkObjectWithoutPush(obj);
+ return size;
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map,
- HeapObject* object) {
- typedef FlexibleBodyVisitor<StaticVisitor, Code::BodyDescriptor, void>
- CodeBodyVisitor;
- Heap* heap = map->GetHeap();
- Code* code = Code::cast(object);
- if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
- code->MakeOlder();
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitSharedFunctionInfo(
+ Map* map, SharedFunctionInfo* sfi) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (sfi->ic_age() != heap_->global_ic_age()) {
+ sfi->ResetForNewContext(heap_->global_ic_age());
}
- CodeBodyVisitor::Visit(map, object);
+ int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, sfi);
+ SharedFunctionInfo::BodyDescriptor::IterateBody(sfi, size, visitor);
+ return size;
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
- Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
- if (shared->ic_age() != heap->global_ic_age()) {
- shared->ResetForNewContext(heap->global_ic_age());
- }
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- if (IsFlushable(heap, shared)) {
- // This function's code looks flushable. But we have to postpone
- // the decision until we see all functions that point to the same
- // SharedFunctionInfo because some of them might be optimized.
- // That would also make the non-optimized version of the code
- // non-flushable, because it is required for bailing out from
- // optimized code.
- collector->code_flusher()->AddCandidate(shared);
- // Treat the reference to the code object weakly.
- VisitSharedFunctionInfoWeakCode(map, object);
- return;
- }
- }
- VisitSharedFunctionInfoStrongCode(map, object);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitBytecodeArray(Map* map,
+ BytecodeArray* array) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = BytecodeArray::BodyDescriptor::SizeOf(map, array);
+ BytecodeArray::BodyDescriptor::IterateBody(array, size, visitor);
+ array->MakeOlder();
+ return size;
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
- HeapObject* object) {
- Heap* heap = map->GetHeap();
- JSFunction* function = JSFunction::cast(object);
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- if (IsFlushable(heap, function)) {
- // This function's code looks flushable. But we have to postpone
- // the decision until we see all functions that point to the same
- // SharedFunctionInfo because some of them might be optimized.
- // That would also make the non-optimized version of the code
- // non-flushable, because it is required for bailing out from
- // optimized code.
- collector->code_flusher()->AddCandidate(function);
- // Treat the reference to the code object weakly.
- VisitJSFunctionWeakCode(map, object);
- return;
- } else {
- // Visit all unoptimized code objects to prevent flushing them.
- StaticVisitor::MarkObject(heap, function->shared()->code());
- }
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitCode(Map* map, Code* code) {
+ if (FLAG_age_code && !heap_->isolate()->serializer_enabled()) {
+ code->MakeOlder();
}
- VisitJSFunctionStrongCode(map, object);
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = Code::BodyDescriptor::SizeOf(map, code);
+ Code::BodyDescriptor::IterateBody(code, size, visitor);
+ return size;
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
- Map* map) {
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::MarkMapContents(Map* map) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a non-empty
// descriptor array is marked, its header is also visited. The slot holding
@@ -478,296 +321,134 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
// just mark the entire descriptor array.
if (!map->is_prototype_map()) {
DescriptorArray* descriptors = map->instance_descriptors();
- if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
+ if (visitor->MarkObjectWithoutPush(descriptors) &&
descriptors->length() > 0) {
- StaticVisitor::VisitPointers(heap, descriptors,
- descriptors->GetFirstElementAddress(),
- descriptors->GetDescriptorEndSlot(0));
+ visitor->VisitPointers(descriptors, descriptors->GetFirstElementAddress(),
+ descriptors->GetDescriptorEndSlot(0));
}
int start = 0;
int end = map->NumberOfOwnDescriptors();
if (start < end) {
- StaticVisitor::VisitPointers(heap, descriptors,
- descriptors->GetDescriptorStartSlot(start),
- descriptors->GetDescriptorEndSlot(end));
+ visitor->VisitPointers(descriptors,
+ descriptors->GetDescriptorStartSlot(start),
+ descriptors->GetDescriptorEndSlot(end));
}
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
- StaticVisitor::VisitPointers(
- heap, map, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+ visitor->VisitPointers(
+ map, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitMap(Map* map, Map* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
-inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
- Object* undefined = heap->undefined_value();
- return (info->script() != undefined) &&
- (reinterpret_cast<Script*>(info->script())->source() != undefined);
-}
-
-
-template <typename StaticVisitor>
-bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap,
- JSFunction* function) {
- SharedFunctionInfo* shared_info = function->shared();
-
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- if (ObjectMarking::IsBlackOrGrey(function->code(),
- MarkingState::Internal(function->code()))) {
- return false;
- }
-
- // We do not (yet) flush code for optimized functions.
- if (function->code() != shared_info->code()) {
- return false;
+ // Clears the cache of ICs related to this map.
+ if (FLAG_cleanup_code_caches_at_gc) {
+ object->ClearCodeCache(heap_);
}
- // Check age of optimized code.
- if (FLAG_age_code && !function->code()->IsOld()) {
- return false;
+ // When map collection is enabled we have to mark through map's transitions
+ // and back pointers in a special way to make these links weak.
+ if (object->CanTransition()) {
+ MarkMapContents(object);
+ } else {
+ visitor->VisitPointers(
+ object, HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
+ HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
}
-
- return IsFlushable(heap, shared_info);
+ return Map::BodyDescriptor::SizeOf(map, object);
}
-
-template <typename StaticVisitor>
-bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
- Heap* heap, SharedFunctionInfo* shared_info) {
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- if (ObjectMarking::IsBlackOrGrey(
- shared_info->code(), MarkingState::Internal(shared_info->code()))) {
- return false;
- }
-
- // The function must be compiled and have the source code available,
- // to be able to recompile it in case we need the function again.
- if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
- return false;
- }
-
- // We never flush code for API functions.
- if (shared_info->IsApiFunction()) {
- return false;
- }
-
- // Only flush code for functions.
- if (shared_info->code()->kind() != Code::FUNCTION) {
- return false;
- }
-
- // Function must be lazy compilable.
- if (!shared_info->allows_lazy_compilation()) {
- return false;
- }
-
- // We do not (yet?) flush code for generator functions, or async functions,
- // because we don't know if there are still live activations
- // (generator objects) on the heap.
- if (IsResumableFunction(shared_info->kind())) {
- return false;
- }
-
- // If this is a full script wrapped in a function we do not flush the code.
- if (shared_info->is_toplevel()) {
- return false;
- }
-
- // The function must be user code.
- if (!shared_info->IsUserJavaScript()) {
- return false;
- }
-
- // Maintain debug break slots in the code.
- if (shared_info->HasDebugCode()) {
- return false;
- }
-
- // If this is a function initialized with %SetCode then the one-to-one
- // relation between SharedFunctionInfo and Code is broken.
- if (shared_info->dont_flush()) {
- return false;
- }
-
- // Check age of code. If code aging is disabled we never flush.
- if (!FLAG_age_code || !shared_info->code()->IsOld()) {
- return false;
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
+ JSObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (heap_->local_embedder_heap_tracer()->InUse()) {
+ DCHECK(object->IsJSObject());
+ heap_->TracePossibleWrapper(object);
}
-
- return true;
-}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
- Map* map, HeapObject* object) {
- FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor,
- void>::Visit(map, object);
-}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
- Map* map, HeapObject* object) {
- // Skip visiting kCodeOffset as it is treated weakly here.
- STATIC_ASSERT(SharedFunctionInfo::kCodeOffset <
- SharedFunctionInfo::BodyDescriptorWeakCode::kStartOffset);
- FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptorWeakCode,
- void>::Visit(map, object);
+ int size = JSObject::BodyDescriptor::SizeOf(map, object);
+ JSObject::BodyDescriptor::IterateBody(object, size, visitor);
+ return size;
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
- Map* map, HeapObject* object) {
- typedef FlexibleBodyVisitor<StaticVisitor,
- JSFunction::BodyDescriptorStrongCode,
- void> JSFunctionStrongCodeBodyVisitor;
- JSFunctionStrongCodeBodyVisitor::Visit(map, object);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitAllocationSite(
+ Map* map, AllocationSite* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
+ AllocationSite::BodyDescriptorWeak::IterateBody(object, size, visitor);
+ return size;
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
- Map* map, HeapObject* object) {
- typedef FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
- void> JSFunctionWeakCodeBodyVisitor;
- JSFunctionWeakCodeBodyVisitor::Visit(map, object);
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitCodeEntry(JSFunction* host,
+ Address entry_address) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ collector_->RecordCodeEntrySlot(host, entry_address, code);
+ visitor->MarkObject(code);
}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject* object) {
- Map* map = object->map();
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitEmbeddedPointer(Code* host,
+ RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- switch (static_cast<VisitorId>(map->visitor_id())) {
-#define CASE(type) \
- case kVisit##type: \
- return visitor->Visit##type(map, type::cast(object));
- TYPED_VISITOR_ID_LIST(CASE)
-#undef CASE
- case kVisitShortcutCandidate:
- return visitor->VisitShortcutCandidate(map, ConsString::cast(object));
- case kVisitNativeContext:
- return visitor->VisitNativeContext(map, Context::cast(object));
- case kVisitDataObject:
- return visitor->VisitDataObject(map, HeapObject::cast(object));
- case kVisitJSObjectFast:
- return visitor->VisitJSObjectFast(map, JSObject::cast(object));
- case kVisitJSApiObject:
- return visitor->VisitJSApiObject(map, JSObject::cast(object));
- case kVisitStruct:
- return visitor->VisitStruct(map, HeapObject::cast(object));
- case kVisitFreeSpace:
- return visitor->VisitFreeSpace(map, FreeSpace::cast(object));
- case kVisitorIdCount:
- UNREACHABLE();
+ DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ HeapObject* object = HeapObject::cast(rinfo->target_object());
+ collector_->RecordRelocSlot(host, rinfo, object);
+ if (!host->IsWeakObject(object)) {
+ visitor->MarkObject(object);
}
- UNREACHABLE();
- // Make the compiler happy.
- return ResultType();
-}
-
-template <typename ResultType, typename ConcreteVisitor>
-void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
- HeapObject* host, HeapObject** map) {
- static_cast<ConcreteVisitor*>(this)->VisitPointer(
- host, reinterpret_cast<Object**>(map));
-}
-
-template <typename ResultType, typename ConcreteVisitor>
-bool HeapVisitor<ResultType, ConcreteVisitor>::ShouldVisit(HeapObject* object) {
- return true;
}
-#define VISIT(type) \
- template <typename ResultType, typename ConcreteVisitor> \
- ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
- Map* map, type* object) { \
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
- if (!visitor->ShouldVisit(object)) return ResultType(); \
- int size = type::BodyDescriptor::SizeOf(map, object); \
- visitor->VisitMapPointer(object, object->map_slot()); \
- type::BodyDescriptor::IterateBody(object, size, visitor); \
- return static_cast<ResultType>(size); \
- }
-TYPED_VISITOR_ID_LIST(VISIT)
-#undef VISIT
-
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitShortcutCandidate(
- Map* map, ConsString* object) {
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitCellPointer(Code* host,
+ RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = ConsString::BodyDescriptor::SizeOf(map, object);
- visitor->VisitMapPointer(object, object->map_slot());
- ConsString::BodyDescriptor::IterateBody(object, size,
- static_cast<ConcreteVisitor*>(this));
- return static_cast<ResultType>(size);
+ DCHECK(rinfo->rmode() == RelocInfo::CELL);
+ Cell* cell = rinfo->target_cell();
+ collector_->RecordRelocSlot(host, rinfo, cell);
+ if (!host->IsWeakObject(cell)) {
+ visitor->MarkObject(cell);
+ }
}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitNativeContext(
- Map* map, Context* object) {
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitDebugTarget(Code* host,
+ RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = Context::BodyDescriptor::SizeOf(map, object);
- visitor->VisitMapPointer(object, object->map_slot());
- Context::BodyDescriptor::IterateBody(object, size,
- static_cast<ConcreteVisitor*>(this));
- return static_cast<ResultType>(size);
+ DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence());
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+ collector_->RecordRelocSlot(host, rinfo, target);
+ visitor->MarkObject(target);
}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
- Map* map, HeapObject* object) {
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitCodeTarget(Code* host,
+ RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = map->instance_size();
- visitor->VisitMapPointer(object, object->map_slot());
- return static_cast<ResultType>(size);
+ DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ collector_->RecordRelocSlot(host, rinfo, target);
+ visitor->MarkObject(target);
}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
- Map* map, JSObject* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
- visitor->VisitMapPointer(object, object->map_slot());
- JSObject::FastBodyDescriptor::IterateBody(
- object, size, static_cast<ConcreteVisitor*>(this));
- return static_cast<ResultType>(size);
-}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
- Map* map, JSObject* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = JSObject::BodyDescriptor::SizeOf(map, object);
- visitor->VisitMapPointer(object, object->map_slot());
- JSObject::BodyDescriptor::IterateBody(object, size,
- static_cast<ConcreteVisitor*>(this));
- return static_cast<ResultType>(size);
-}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
- Map* map, HeapObject* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = map->instance_size();
- visitor->VisitMapPointer(object, object->map_slot());
- StructBodyDescriptor::IterateBody(object, size,
- static_cast<ConcreteVisitor*>(this));
- return static_cast<ResultType>(size);
-}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
- Map* map, FreeSpace* object) {
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitCodeAgeSequence(Code* host,
+ RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- visitor->VisitMapPointer(object, object->map_slot());
- return static_cast<ResultType>(FreeSpace::cast(object)->size());
+ DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+ Code* target = rinfo->code_age_stub();
+ DCHECK_NOT_NULL(target);
+ collector_->RecordRelocSlot(host, rinfo, target);
+ visitor->MarkObject(target);
}
} // namespace internal
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 5849fcb882..e6e59e1f77 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -11,201 +11,6 @@
namespace v8 {
namespace internal {
-VisitorId StaticVisitorBase::GetVisitorId(Map* map) {
- return GetVisitorId(map->instance_type(), map->instance_size(),
- FLAG_unbox_double_fields && !map->HasFastPointerLayout());
-}
-
-VisitorId StaticVisitorBase::GetVisitorId(int instance_type, int instance_size,
- bool has_unboxed_fields) {
- if (instance_type < FIRST_NONSTRING_TYPE) {
- switch (instance_type & kStringRepresentationMask) {
- case kSeqStringTag:
- if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
- return kVisitSeqOneByteString;
- } else {
- return kVisitSeqTwoByteString;
- }
-
- case kConsStringTag:
- if (IsShortcutCandidate(instance_type)) {
- return kVisitShortcutCandidate;
- } else {
- return kVisitConsString;
- }
-
- case kSlicedStringTag:
- return kVisitSlicedString;
-
- case kExternalStringTag:
- return kVisitDataObject;
-
- case kThinStringTag:
- return kVisitThinString;
- }
- UNREACHABLE();
- }
-
- switch (instance_type) {
- case BYTE_ARRAY_TYPE:
- return kVisitByteArray;
-
- case BYTECODE_ARRAY_TYPE:
- return kVisitBytecodeArray;
-
- case FREE_SPACE_TYPE:
- return kVisitFreeSpace;
-
- case FIXED_ARRAY_TYPE:
- return kVisitFixedArray;
-
- case FIXED_DOUBLE_ARRAY_TYPE:
- return kVisitFixedDoubleArray;
-
- case ODDBALL_TYPE:
- return kVisitOddball;
-
- case MAP_TYPE:
- return kVisitMap;
-
- case CODE_TYPE:
- return kVisitCode;
-
- case CELL_TYPE:
- return kVisitCell;
-
- case PROPERTY_CELL_TYPE:
- return kVisitPropertyCell;
-
- case WEAK_CELL_TYPE:
- return kVisitWeakCell;
-
- case TRANSITION_ARRAY_TYPE:
- return kVisitTransitionArray;
-
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- return kVisitJSWeakCollection;
-
- case JS_REGEXP_TYPE:
- return kVisitJSRegExp;
-
- case SHARED_FUNCTION_INFO_TYPE:
- return kVisitSharedFunctionInfo;
-
- case JS_PROXY_TYPE:
- return kVisitStruct;
-
- case SYMBOL_TYPE:
- return kVisitSymbol;
-
- case JS_ARRAY_BUFFER_TYPE:
- return kVisitJSArrayBuffer;
-
- case JS_OBJECT_TYPE:
- case JS_ERROR_TYPE:
- case JS_ARGUMENTS_TYPE:
- case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_NAMESPACE_TYPE:
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
- case JS_STRING_ITERATOR_TYPE:
-
- case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
- case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
- case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
-
- case JS_PROMISE_CAPABILITY_TYPE:
- case JS_PROMISE_TYPE:
- case JS_BOUND_FUNCTION_TYPE:
- return has_unboxed_fields ? kVisitJSObject : kVisitJSObjectFast;
- case JS_API_OBJECT_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
- return kVisitJSApiObject;
-
- case JS_FUNCTION_TYPE:
- return kVisitJSFunction;
-
- case FILLER_TYPE:
- case FOREIGN_TYPE:
- case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
- return kVisitDataObject;
-
- case FIXED_UINT8_ARRAY_TYPE:
- case FIXED_INT8_ARRAY_TYPE:
- case FIXED_UINT16_ARRAY_TYPE:
- case FIXED_INT16_ARRAY_TYPE:
- case FIXED_UINT32_ARRAY_TYPE:
- case FIXED_INT32_ARRAY_TYPE:
- case FIXED_FLOAT32_ARRAY_TYPE:
- case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
- return kVisitFixedTypedArrayBase;
-
- case FIXED_FLOAT64_ARRAY_TYPE:
- return kVisitFixedFloat64Array;
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- if (instance_type == ALLOCATION_SITE_TYPE) {
- return kVisitAllocationSite;
- }
-
- return kVisitStruct;
-
- default:
- UNREACHABLE();
- return kVisitorIdCount;
- }
-}
-
-
// We don't record weak slots during marking or scavenges. Instead we do it
// once when we complete mark-compact cycle. Note that write barrier has no
// effect if we are already in the middle of compacting mark-sweep cycle and we
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index c578a42d64..efb1c32f1c 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -6,348 +6,14 @@
#define V8_OBJECTS_VISITING_H_
#include "src/allocation.h"
-#include "src/heap/embedder-tracing.h"
#include "src/heap/heap.h"
-#include "src/heap/spaces.h"
#include "src/layout-descriptor.h"
#include "src/objects-body-descriptors.h"
-
-// This file provides base classes and auxiliary methods for defining
-// static object visitors used during GC.
-// Visiting HeapObject body with a normal ObjectVisitor requires performing
-// two switches on object's instance type to determine object size and layout
-// and one or more virtual method calls on visitor itself.
-// Static visitor is different: it provides a dispatch table which contains
-// pointers to specialized visit functions. Each map has the visitor_id
-// field which contains an index of specialized visitor to use.
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
-#define VISITOR_ID_LIST(V) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(ShortcutCandidate) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(FreeSpace) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(FixedTypedArrayBase) \
- V(FixedFloat64Array) \
- V(NativeContext) \
- V(AllocationSite) \
- V(DataObject) \
- V(JSObjectFast) \
- V(JSObject) \
- V(JSApiObject) \
- V(Struct) \
- V(ConsString) \
- V(SlicedString) \
- V(ThinString) \
- V(Symbol) \
- V(Oddball) \
- V(Code) \
- V(Map) \
- V(Cell) \
- V(PropertyCell) \
- V(WeakCell) \
- V(TransitionArray) \
- V(SharedFunctionInfo) \
- V(JSFunction) \
- V(JSWeakCollection) \
- V(JSArrayBuffer) \
- V(JSRegExp)
-
-// For data objects, JS objects and structs along with generic visitor which
-// can visit object of any size we provide visitors specialized by
-// object size in words.
-// Ids of specialized visitors are declared in a linear order (without
-// holes) starting from the id of visitor specialized for 2 words objects
-// (base visitor id) and ending with the id of generic visitor.
-// Method GetVisitorIdForSize depends on this ordering to calculate visitor
-// id of specialized visitor from given instance size, base visitor id and
-// generic visitor's id.
-enum VisitorId {
-#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
- VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
-#undef VISITOR_ID_ENUM_DECL
- kVisitorIdCount
-};
-
-// Base class for all static visitors.
-class StaticVisitorBase : public AllStatic {
- public:
- // Visitor ID should fit in one byte.
- STATIC_ASSERT(kVisitorIdCount <= 256);
-
- // Determine which specialized visitor should be used for given instance type
- // and instance type.
- static VisitorId GetVisitorId(int instance_type, int instance_size,
- bool has_unboxed_fields);
-
- // Determine which specialized visitor should be used for given map.
- static VisitorId GetVisitorId(Map* map);
-};
-
-
-template <typename Callback>
-class VisitorDispatchTable {
- public:
- void CopyFrom(VisitorDispatchTable* other) {
- // We are not using memcpy to guarantee that during update
- // every element of callbacks_ array will remain correct
- // pointer (memcpy might be implemented as a byte copying loop).
- for (int i = 0; i < kVisitorIdCount; i++) {
- base::NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
- }
- }
-
- inline Callback GetVisitor(Map* map);
-
- inline Callback GetVisitorById(VisitorId id) {
- return reinterpret_cast<Callback>(callbacks_[id]);
- }
-
- void Register(VisitorId id, Callback callback) {
- DCHECK(id < kVisitorIdCount); // id is unsigned.
- callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback);
- }
-
- private:
- base::AtomicWord callbacks_[kVisitorIdCount];
-};
-
-
-template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FlexibleBodyVisitor : public AllStatic {
- public:
- INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
- int object_size = BodyDescriptor::SizeOf(map, object);
- BodyDescriptor::template IterateBody<StaticVisitor>(object, object_size);
- return static_cast<ReturnType>(object_size);
- }
-};
-
-
-template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FixedBodyVisitor : public AllStatic {
- public:
- INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
- BodyDescriptor::template IterateBody<StaticVisitor>(object);
- return static_cast<ReturnType>(BodyDescriptor::kSize);
- }
-};
-
-
-// Base class for visitors used for a linear new space iteration.
-// IterateBody returns size of visited object.
-// Certain types of objects (i.e. Code objects) are not handled
-// by dispatch table of this visitor because they cannot appear
-// in the new space.
-//
-// This class is intended to be used in the following way:
-//
-// class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> {
-// ...
-// }
-//
-// This is an example of Curiously recurring template pattern
-// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).
-// We use CRTP to guarantee aggressive compile time optimizations (i.e.
-// inlining and specialization of StaticVisitor::VisitPointers methods).
-template <typename StaticVisitor>
-class StaticNewSpaceVisitor : public StaticVisitorBase {
- public:
- static void Initialize();
-
- INLINE(static int IterateBody(Map* map, HeapObject* obj)) {
- return table_.GetVisitor(map)(map, obj);
- }
-
- INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
- Object** start, Object** end)) {
- for (Object** p = start; p < end; p++) {
- StaticVisitor::VisitPointer(heap, object, p);
- }
- }
-
- // Although we are using the JSFunction body descriptor which does not
- // visit the code entry, compiler wants it to be accessible.
- // See JSFunction::BodyDescriptorImpl.
- inline static void VisitCodeEntry(Heap* heap, HeapObject* object,
- Address entry_address) {
- UNREACHABLE();
- }
-
- private:
- inline static int UnreachableVisitor(Map* map, HeapObject* object) {
- UNREACHABLE();
- return 0;
- }
-
- INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
- return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- }
-
- INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) {
- int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
- return FixedDoubleArray::SizeFor(length);
- }
-
- INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) {
- return SeqOneByteString::cast(object)
- ->SeqOneByteStringSize(map->instance_type());
- }
-
- INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) {
- return SeqTwoByteString::cast(object)
- ->SeqTwoByteStringSize(map->instance_type());
- }
-
- INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
- return FreeSpace::cast(object)->size();
- }
-
- class DataObjectVisitor {
- public:
- template <int object_size>
- static inline int VisitSpecialized(Map* map, HeapObject* object) {
- return object_size;
- }
-
- INLINE(static int Visit(Map* map, HeapObject* object)) {
- return map->instance_size();
- }
- };
-
- typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, int>
- StructVisitor;
-
- typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, int>
- JSObjectVisitor;
-
- // Visitor for JSObjects without unboxed double fields.
- typedef FlexibleBodyVisitor<StaticVisitor, JSObject::FastBodyDescriptor, int>
- JSObjectFastVisitor;
-
- typedef int (*Callback)(Map* map, HeapObject* object);
-
- static VisitorDispatchTable<Callback> table_;
-};
-
-
-template <typename StaticVisitor>
-VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
- StaticNewSpaceVisitor<StaticVisitor>::table_;
-
-
-// Base class for visitors used to transitively mark the entire heap.
-// IterateBody returns nothing.
-// Certain types of objects might not be handled by this base class and
-// no visitor function is registered by the generic initialization. A
-// specialized visitor function needs to be provided by the inheriting
-// class itself for those cases.
-//
-// This class is intended to be used in the following way:
-//
-// class SomeVisitor : public StaticMarkingVisitor<SomeVisitor> {
-// ...
-// }
-//
-// This is an example of Curiously recurring template pattern.
-template <typename StaticVisitor>
-class StaticMarkingVisitor : public StaticVisitorBase {
- public:
- static void Initialize();
-
- INLINE(static void IterateBody(Map* map, HeapObject* obj)) {
- table_.GetVisitor(map)(map, obj);
- }
-
- INLINE(static void VisitWeakCell(Map* map, HeapObject* object));
- INLINE(static void VisitTransitionArray(Map* map, HeapObject* object));
- INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
- Address entry_address));
- INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitExternalReference(RelocInfo* rinfo)) {}
- INLINE(static void VisitInternalReference(RelocInfo* rinfo)) {}
- INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) {}
- // Skip the weak next code link in a code object.
- INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) {}
-
- protected:
- INLINE(static void VisitMap(Map* map, HeapObject* object));
- INLINE(static void VisitCode(Map* map, HeapObject* object));
- INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
- INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
- INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
- INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
- INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
-
- // Mark pointers in a Map treating some elements of the descriptor array weak.
- static void MarkMapContents(Heap* heap, Map* map);
-
- // Code flushing support.
- INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
- INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
-
- // Helpers used by code flushing support that visit pointer fields and treat
- // references to code objects either strongly or weakly.
- static void VisitSharedFunctionInfoStrongCode(Map* map, HeapObject* object);
- static void VisitSharedFunctionInfoWeakCode(Map* map, HeapObject* object);
- static void VisitJSFunctionStrongCode(Map* map, HeapObject* object);
- static void VisitJSFunctionWeakCode(Map* map, HeapObject* object);
-
- class DataObjectVisitor {
- public:
- template <int size>
- static inline void VisitSpecialized(Map* map, HeapObject* object) {}
-
- INLINE(static void Visit(Map* map, HeapObject* object)) {}
- };
-
- typedef FlexibleBodyVisitor<StaticVisitor, FixedArray::BodyDescriptor, void>
- FixedArrayVisitor;
-
- typedef FlexibleBodyVisitor<StaticVisitor, JSObject::FastBodyDescriptor, void>
- JSObjectFastVisitor;
- typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, void>
- JSObjectVisitor;
-
- class JSApiObjectVisitor : AllStatic {
- public:
- INLINE(static void Visit(Map* map, HeapObject* object)) {
- TracePossibleWrapper(object);
- JSObjectVisitor::Visit(map, object);
- }
-
- private:
- INLINE(static void TracePossibleWrapper(HeapObject* object)) {
- if (object->GetHeap()->local_embedder_heap_tracer()->InUse()) {
- DCHECK(object->IsJSObject());
- object->GetHeap()->TracePossibleWrapper(JSObject::cast(object));
- }
- }
- };
-
- typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, void>
- StructObjectVisitor;
-
- typedef void (*Callback)(Map* map, HeapObject* object);
-
- static VisitorDispatchTable<Callback> table_;
-};
-
-
-template <typename StaticVisitor>
-VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
- StaticMarkingVisitor<StaticVisitor>::table_;
-
#define TYPED_VISITOR_ID_LIST(V) \
V(AllocationSite) \
V(ByteArray) \
@@ -366,20 +32,21 @@ VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
V(JSWeakCollection) \
V(Map) \
V(Oddball) \
+ V(PropertyArray) \
V(PropertyCell) \
V(SeqOneByteString) \
V(SeqTwoByteString) \
V(SharedFunctionInfo) \
V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
V(Symbol) \
- V(TransitionArray) \
V(ThinString) \
+ V(TransitionArray) \
V(WeakCell)
-// The base class for visitors that need to dispatch on object type.
-// It is similar to StaticVisitor except it uses virtual dispatch
-// instead of static dispatch table. The default behavour of all
-// visit functions is to iterate body of the given object using
+// The base class for visitors that need to dispatch on object type. The default
+// behavior of all visit functions is to iterate body of the given object using
// the BodyDescriptor of the object.
//
// The visit functions return the size of the object cast to ResultType.
@@ -389,32 +56,95 @@ VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
// class SomeVisitor : public HeapVisitor<ResultType, SomeVisitor> {
// ...
// }
-//
-// This is an example of Curiously recurring template pattern.
-// TODO(ulan): replace static visitors with the HeapVisitor.
template <typename ResultType, typename ConcreteVisitor>
class HeapVisitor : public ObjectVisitor {
public:
- ResultType Visit(HeapObject* object);
+ V8_INLINE ResultType Visit(HeapObject* object);
+ V8_INLINE ResultType Visit(Map* map, HeapObject* object);
protected:
// A guard predicate for visiting the object.
// If it returns false then the default implementations of the Visit*
// functions bailout from iterating the object pointers.
- virtual bool ShouldVisit(HeapObject* object);
+ V8_INLINE bool ShouldVisit(HeapObject* object) { return true; }
+ // Guard predicate for visiting the objects map pointer separately.
+ V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
- virtual void VisitMapPointer(HeapObject* host, HeapObject** map);
+ V8_INLINE void VisitMapPointer(HeapObject* host, HeapObject** map);
-#define VISIT(type) virtual ResultType Visit##type(Map* map, type* object);
+#define VISIT(type) V8_INLINE ResultType Visit##type(Map* map, type* object);
TYPED_VISITOR_ID_LIST(VISIT)
#undef VISIT
- virtual ResultType VisitShortcutCandidate(Map* map, ConsString* object);
- virtual ResultType VisitNativeContext(Map* map, Context* object);
- virtual ResultType VisitDataObject(Map* map, HeapObject* object);
- virtual ResultType VisitJSObjectFast(Map* map, JSObject* object);
- virtual ResultType VisitJSApiObject(Map* map, JSObject* object);
- virtual ResultType VisitStruct(Map* map, HeapObject* object);
- virtual ResultType VisitFreeSpace(Map* map, FreeSpace* object);
+ V8_INLINE ResultType VisitShortcutCandidate(Map* map, ConsString* object);
+ V8_INLINE ResultType VisitNativeContext(Map* map, Context* object);
+ V8_INLINE ResultType VisitDataObject(Map* map, HeapObject* object);
+ V8_INLINE ResultType VisitJSObjectFast(Map* map, JSObject* object);
+ V8_INLINE ResultType VisitJSApiObject(Map* map, JSObject* object);
+ V8_INLINE ResultType VisitStruct(Map* map, HeapObject* object);
+ V8_INLINE ResultType VisitFreeSpace(Map* map, FreeSpace* object);
+};
+
+template <typename ConcreteVisitor>
+class NewSpaceVisitor : public HeapVisitor<int, ConcreteVisitor> {
+ public:
+ V8_INLINE bool ShouldVisitMapPointer() { return false; }
+
+ void VisitCodeEntry(JSFunction* host, Address code_entry) final {
+ // Code is not in new space.
+ }
+
+ // Special cases for young generation.
+
+ V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
+ V8_INLINE int VisitNativeContext(Map* map, Context* object);
+ V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
+
+ int VisitBytecodeArray(Map* map, BytecodeArray* object) {
+ UNREACHABLE();
+ return 0;
+ }
+
+ int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) {
+ UNREACHABLE();
+ return 0;
+ }
+};
+
+template <typename ConcreteVisitor>
+class MarkingVisitor : public HeapVisitor<int, ConcreteVisitor> {
+ public:
+ explicit MarkingVisitor(Heap* heap, MarkCompactCollector* collector)
+ : heap_(heap), collector_(collector) {}
+
+ V8_INLINE bool ShouldVisitMapPointer() { return false; }
+
+ V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
+ V8_INLINE int VisitWeakCell(Map* map, WeakCell* object);
+ V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
+ V8_INLINE int VisitNativeContext(Map* map, Context* object);
+ V8_INLINE int VisitJSWeakCollection(Map* map, JSWeakCollection* object);
+ V8_INLINE int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object);
+ V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
+ V8_INLINE int VisitCode(Map* map, Code* object);
+ V8_INLINE int VisitMap(Map* map, Map* object);
+ V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
+ V8_INLINE int VisitAllocationSite(Map* map, AllocationSite* object);
+
+ // ObjectVisitor implementation.
+ V8_INLINE void VisitCodeEntry(JSFunction* host, Address entry_address) final;
+ V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitCellPointer(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitDebugTarget(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final;
+ // Skip weak next code link.
+ V8_INLINE void VisitNextCodeLink(Code* host, Object** p) final {}
+
+ protected:
+ V8_INLINE void MarkMapContents(Map* map);
+
+ Heap* heap_;
+ MarkCompactCollector* collector_;
};
class WeakObjectRetainer;
diff --git a/deps/v8/src/heap/page-parallel-job.h b/deps/v8/src/heap/page-parallel-job.h
deleted file mode 100644
index 939bdb3b3b..0000000000
--- a/deps/v8/src/heap/page-parallel-job.h
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_PAGE_PARALLEL_JOB_
-#define V8_HEAP_PAGE_PARALLEL_JOB_
-
-#include "src/allocation.h"
-#include "src/cancelable-task.h"
-#include "src/utils.h"
-#include "src/v8.h"
-
-namespace v8 {
-namespace internal {
-
-class Heap;
-class Isolate;
-
-// This class manages background tasks that process set of pages in parallel.
-// The JobTraits class needs to define:
-// - PerPageData type - state associated with each page.
-// - PerTaskData type - state associated with each task.
-// - static void ProcessPageInParallel(Heap* heap,
-// PerTaskData task_data,
-// MemoryChunk* page,
-// PerPageData page_data)
-template <typename JobTraits>
-class PageParallelJob {
- public:
- // PageParallelJob cannot dynamically create a semaphore because of a bug in
- // glibc. See http://crbug.com/609249 and
- // https://sourceware.org/bugzilla/show_bug.cgi?id=12674.
- // The caller must provide a semaphore with value 0 and ensure that
- // the lifetime of the semaphore is the same as the lifetime of the Isolate.
- // It is guaranteed that the semaphore value will be 0 after Run() call.
- PageParallelJob(Heap* heap, CancelableTaskManager* cancelable_task_manager,
- base::Semaphore* semaphore)
- : heap_(heap),
- cancelable_task_manager_(cancelable_task_manager),
- items_(nullptr),
- num_items_(0),
- num_tasks_(0),
- pending_tasks_(semaphore) {}
-
- ~PageParallelJob() {
- Item* item = items_;
- while (item != nullptr) {
- Item* next = item->next;
- delete item;
- item = next;
- }
- }
-
- void AddPage(MemoryChunk* chunk, typename JobTraits::PerPageData data) {
- Item* item = new Item(chunk, data, items_);
- items_ = item;
- ++num_items_;
- }
-
- int NumberOfPages() const { return num_items_; }
-
- // Returns the number of tasks that were spawned when running the job.
- int NumberOfTasks() const { return num_tasks_; }
-
- // Runs the given number of tasks in parallel and processes the previously
- // added pages. This function blocks until all tasks finish.
- // The callback takes the index of a task and returns data for that task.
- template <typename Callback>
- void Run(int num_tasks, Callback per_task_data_callback) {
- if (num_items_ == 0) return;
- DCHECK_GE(num_tasks, 1);
- CancelableTaskManager::Id task_ids[kMaxNumberOfTasks];
- const int max_num_tasks = Min(
- kMaxNumberOfTasks,
- static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
- num_tasks_ = Max(1, Min(num_tasks, max_num_tasks));
- int items_per_task = (num_items_ + num_tasks_ - 1) / num_tasks_;
- int start_index = 0;
- Task* main_task = nullptr;
- for (int i = 0; i < num_tasks_; i++, start_index += items_per_task) {
- if (start_index >= num_items_) {
- start_index -= num_items_;
- }
- Task* task = new Task(heap_, items_, num_items_, start_index,
- pending_tasks_, per_task_data_callback(i));
- task_ids[i] = task->id();
- if (i > 0) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- } else {
- main_task = task;
- }
- }
- // Contribute on main thread.
- main_task->Run();
- delete main_task;
- // Wait for background tasks.
- for (int i = 0; i < num_tasks_; i++) {
- if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_tasks_->Wait();
- }
- }
- }
-
- private:
- static const int kMaxNumberOfTasks = 32;
-
- enum ProcessingState { kAvailable, kProcessing, kFinished };
-
- struct Item : public Malloced {
- Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next)
- : chunk(chunk), state(kAvailable), data(data), next(next) {}
- MemoryChunk* chunk;
- base::AtomicValue<ProcessingState> state;
- typename JobTraits::PerPageData data;
- Item* next;
- };
-
- class Task : public CancelableTask {
- public:
- Task(Heap* heap, Item* items, int num_items, int start_index,
- base::Semaphore* on_finish, typename JobTraits::PerTaskData data)
- : CancelableTask(heap->isolate()),
- heap_(heap),
- items_(items),
- num_items_(num_items),
- start_index_(start_index),
- on_finish_(on_finish),
- data_(data) {}
-
- virtual ~Task() {}
-
- private:
- // v8::internal::CancelableTask overrides.
- void RunInternal() override {
- // Each task starts at a different index to improve parallelization.
- Item* current = items_;
- int skip = start_index_;
- while (skip-- > 0) {
- current = current->next;
- }
- for (int i = 0; i < num_items_; i++) {
- if (current->state.TrySetValue(kAvailable, kProcessing)) {
- JobTraits::ProcessPageInParallel(heap_, data_, current->chunk,
- current->data);
- current->state.SetValue(kFinished);
- }
- current = current->next;
- // Wrap around if needed.
- if (current == nullptr) {
- current = items_;
- }
- }
- on_finish_->Signal();
- }
-
- Heap* heap_;
- Item* items_;
- int num_items_;
- int start_index_;
- base::Semaphore* on_finish_;
- typename JobTraits::PerTaskData data_;
- DISALLOW_COPY_AND_ASSIGN(Task);
- };
-
- Heap* heap_;
- CancelableTaskManager* cancelable_task_manager_;
- Item* items_;
- int num_items_;
- int num_tasks_;
- base::Semaphore* pending_tasks_;
- DISALLOW_COPY_AND_ASSIGN(PageParallelJob);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_PAGE_PARALLEL_JOB_
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index b60cd451ee..5908940d9e 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -21,14 +21,16 @@ class RememberedSet : public AllStatic {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
+ template <AccessMode access_mode = AccessMode::ATOMIC>
static void Insert(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type>();
+ SlotSet* slot_set = chunk->slot_set<type, access_mode>();
if (slot_set == nullptr) {
slot_set = chunk->AllocateSlotSet<type>();
}
uintptr_t offset = slot_addr - chunk->address();
- slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
+ slot_set[offset / Page::kPageSize].Insert<access_mode>(offset %
+ Page::kPageSize);
}
// Given a page and a slot in that page, this function returns true if
@@ -127,15 +129,18 @@ class RememberedSet : public AllStatic {
// Iterates and filters the remembered set in the given memory chunk with
// the given callback. The callback should take (Address slot) and return
// SlotCallbackResult.
+ //
+ // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
+ // threads concurrently inserting slots.
template <typename Callback>
- static void Iterate(MemoryChunk* chunk, Callback callback) {
+ static void Iterate(MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->slot_set<type>();
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
int new_count = 0;
for (size_t page = 0; page < pages; page++) {
- new_count +=
- slots[page].Iterate(callback, SlotSet::PREFREE_EMPTY_BUCKETS);
+ new_count += slots[page].Iterate(callback, mode);
}
// Only old-to-old slot sets are released eagerly. Old-new-slot sets are
// released by the sweeper threads.
@@ -145,6 +150,17 @@ class RememberedSet : public AllStatic {
}
}
+ static void PreFreeEmptyBuckets(MemoryChunk* chunk) {
+ DCHECK(type == OLD_TO_NEW);
+ SlotSet* slots = chunk->slot_set<type>();
+ if (slots != nullptr) {
+ size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
+ for (size_t page = 0; page < pages; page++) {
+ slots[page].PreFreeEmptyBuckets();
+ }
+ }
+ }
+
// Given a page and a typed slot in that page, this function adds the slot
// to the remembered set.
static void InsertTyped(Page* page, Address host_addr, SlotType slot_type,
@@ -341,7 +357,6 @@ class UpdateTypedSlotHelper {
break;
}
UNREACHABLE();
- return REMOVE_SLOT;
}
};
@@ -356,7 +371,6 @@ inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
return DEBUG_TARGET_SLOT;
}
UNREACHABLE();
- return CLEARED_SLOT;
}
} // namespace internal
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index f7fbfc1480..c13e8e9205 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -22,11 +22,14 @@ class V8_EXPORT_PRIVATE ScavengeJob {
class IdleTask : public CancelableIdleTask {
public:
explicit IdleTask(Isolate* isolate, ScavengeJob* job)
- : CancelableIdleTask(isolate), job_(job) {}
+ : CancelableIdleTask(isolate), isolate_(isolate), job_(job) {}
// CancelableIdleTask overrides.
void RunInternal(double deadline_in_seconds) override;
+ Isolate* isolate() { return isolate_; }
+
private:
+ Isolate* isolate_;
ScavengeJob* job_;
};
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 4cc215a83e..38b3ef2a8f 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -6,12 +6,187 @@
#define V8_HEAP_SCAVENGER_INL_H_
#include "src/heap/scavenger.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
+namespace {
+
+// White list for objects that for sure only contain data.
+bool ContainsOnlyData(VisitorId visitor_id) {
+ switch (visitor_id) {
+ case kVisitSeqOneByteString:
+ return true;
+ case kVisitSeqTwoByteString:
+ return true;
+ case kVisitByteArray:
+ return true;
+ case kVisitFixedDoubleArray:
+ return true;
+ case kVisitDataObject:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+} // namespace
+
+void Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
+ int size) {
+ // Copy the content of source to target.
+ heap()->CopyBlock(target->address(), source->address(), size);
+
+ // Set the forwarding address.
+ source->set_map_word(MapWord::FromForwardingAddress(target));
+
+ if (V8_UNLIKELY(is_logging_)) {
+ // Update NewSpace stats if necessary.
+ RecordCopiedObject(target);
+ heap()->OnMoveEvent(target, source, size);
+ }
+
+ if (is_incremental_marking_) {
+ heap()->incremental_marking()->TransferColor(source, target);
+ }
+ heap()->UpdateAllocationSite<Heap::kCached>(map, source,
+ &local_pretenuring_feedback_);
+}
+
+bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size) {
+ DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation =
+ allocator_.Allocate<NEW_SPACE>(object_size, alignment);
+
+ HeapObject* target = nullptr;
+ if (allocation.To(&target)) {
+ DCHECK(ObjectMarking::IsWhite(
+ target, heap()->mark_compact_collector()->marking_state(target)));
+ MigrateObject(map, object, target, object_size);
+ *slot = target;
+
+ copied_list_.Insert(target, object_size);
+ copied_size_ += object_size;
+ return true;
+ }
+ return false;
+}
+
+bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
+ int object_size) {
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation =
+ allocator_.Allocate<OLD_SPACE>(object_size, alignment);
+
+ HeapObject* target = nullptr;
+ if (allocation.To(&target)) {
+ DCHECK(ObjectMarking::IsWhite(
+ target, heap()->mark_compact_collector()->marking_state(target)));
+ MigrateObject(map, object, target, object_size);
+ *slot = target;
+
+ if (!ContainsOnlyData(static_cast<VisitorId>(map->visitor_id()))) {
+ promotion_list_.Push(ObjectAndSize(target, object_size));
+ }
+ promoted_size_ += object_size;
+ return true;
+ }
+ return false;
+}
+
+void Scavenger::EvacuateObjectDefault(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size) {
+ SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
+ SLOW_DCHECK(object->SizeFromMap(map) == object_size);
+
+ if (!heap()->ShouldBePromoted(object->address())) {
+ // A semi-space copy may fail due to fragmentation. In that case, we
+ // try to promote the object.
+ if (SemiSpaceCopyObject(map, slot, object, object_size)) {
+ return;
+ }
+ }
+
+ if (PromoteObject(map, slot, object, object_size)) {
+ return;
+ }
+
+ // If promotion failed, we try to copy the object to the other semi-space
+ if (SemiSpaceCopyObject(map, slot, object, object_size)) return;
+
+ FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
+}
+
+void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
+ ThinString* object, int object_size) {
+ if (!is_incremental_marking_) {
+ HeapObject* actual = object->actual();
+ *slot = actual;
+ // ThinStrings always refer to internalized strings, which are
+ // always in old space.
+ DCHECK(!heap()->InNewSpace(actual));
+ object->set_map_word(MapWord::FromForwardingAddress(actual));
+ return;
+ }
+
+ EvacuateObjectDefault(map, slot, object, object_size);
+}
+
+void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
+ ConsString* object, int object_size) {
+ DCHECK(IsShortcutCandidate(map->instance_type()));
+ if (!is_incremental_marking_ &&
+ object->unchecked_second() == heap()->empty_string()) {
+ HeapObject* first = HeapObject::cast(object->unchecked_first());
+
+ *slot = first;
+
+ if (!heap()->InNewSpace(first)) {
+ object->set_map_word(MapWord::FromForwardingAddress(first));
+ return;
+ }
+
+ MapWord first_word = first->map_word();
+ if (first_word.IsForwardingAddress()) {
+ HeapObject* target = first_word.ToForwardingAddress();
+
+ *slot = target;
+ object->set_map_word(MapWord::FromForwardingAddress(target));
+ return;
+ }
+
+ EvacuateObject(slot, first_word.ToMap(), first);
+ object->set_map_word(MapWord::FromForwardingAddress(*slot));
+ return;
+ }
+
+ EvacuateObjectDefault(map, slot, object, object_size);
+}
+
+void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
+ HeapObject* source) {
+ SLOW_DCHECK(heap_->InFromSpace(source));
+ SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
+ int size = source->SizeFromMap(map);
+ switch (static_cast<VisitorId>(map->visitor_id())) {
+ case kVisitThinString:
+ EvacuateThinString(map, slot, ThinString::cast(source), size);
+ break;
+ case kVisitShortcutCandidate:
+ EvacuateShortcutCandidate(map, slot, ConsString::cast(source), size);
+ break;
+ default:
+ EvacuateObjectDefault(map, slot, source, size);
+ break;
+ }
+}
+
void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
- DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
+ DCHECK(heap()->InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
@@ -28,13 +203,11 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
- object->GetHeap()->UpdateAllocationSite<Heap::kGlobal>(
- object, object->GetHeap()->global_pretenuring_feedback_);
-
+ Map* map = first_word.ToMap();
// AllocationMementos are unrooted and shouldn't survive a scavenge
- DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
+ DCHECK_NE(heap()->allocation_memento_map(), map);
// Call the slow part of scavenge object.
- return ScavengeObjectSlow(p, object);
+ EvacuateObject(p, map, object);
}
SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
@@ -61,13 +234,14 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
return REMOVE_SLOT;
}
-// static
-void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
- Object** p) {
- Object* object = *p;
- if (!heap->InNewSpace(object)) return;
- Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
- reinterpret_cast<HeapObject*>(object));
+void ScavengeVisitor::VisitPointers(HeapObject* host, Object** start,
+ Object** end) {
+ for (Object** p = start; p < end; p++) {
+ Object* object = *p;
+ if (!heap_->InNewSpace(object)) continue;
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ reinterpret_cast<HeapObject*>(object));
+ }
}
} // namespace internal
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index e211388729..41c6176ee3 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -4,459 +4,140 @@
#include "src/heap/scavenger.h"
-#include "src/contexts.h"
#include "src/heap/heap-inl.h"
-#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/scavenger-inl.h"
-#include "src/isolate.h"
-#include "src/log.h"
-#include "src/profiler/heap-profiler.h"
+#include "src/objects-body-descriptors-inl.h"
namespace v8 {
namespace internal {
-enum LoggingAndProfiling {
- LOGGING_AND_PROFILING_ENABLED,
- LOGGING_AND_PROFILING_DISABLED
-};
-
-
-enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-
-template <MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
-class ScavengingVisitor : public StaticVisitorBase {
+class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
public:
- static void Initialize() {
- table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
- table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
- table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
- table_.Register(kVisitThinString, &EvacuateThinString);
- table_.Register(kVisitByteArray, &EvacuateByteArray);
- table_.Register(kVisitFixedArray, &EvacuateFixedArray);
- table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
- table_.Register(kVisitFixedTypedArrayBase, &EvacuateFixedTypedArray);
- table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
- table_.Register(kVisitJSArrayBuffer,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(
- kVisitNativeContext,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- Context::kSize>);
-
- table_.Register(
- kVisitConsString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- ConsString::kSize>);
-
- table_.Register(
- kVisitSlicedString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- SlicedString::kSize>);
-
- table_.Register(
- kVisitSymbol,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- Symbol::kSize>);
-
- table_.Register(
- kVisitSharedFunctionInfo,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- SharedFunctionInfo::kSize>);
-
- table_.Register(kVisitJSWeakCollection,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSRegExp,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSFunction, &EvacuateJSFunction);
-
- table_.Register(kVisitDataObject,
- &ObjectEvacuationStrategy<DATA_OBJECT>::Visit);
-
- table_.Register(kVisitJSObjectFast,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
- table_.Register(kVisitJSObject,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSApiObject,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitStruct,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
- }
-
- static VisitorDispatchTable<ScavengingCallback>* GetTable() {
- return &table_;
- }
-
- static void EvacuateThinStringNoShortcut(Map* map, HeapObject** slot,
- HeapObject* object) {
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- ThinString::kSize);
- }
-
- private:
- enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
-
- static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
- bool should_record = false;
-#ifdef DEBUG
- should_record = FLAG_heap_stats;
-#endif
- should_record = should_record || FLAG_log_gc;
- if (should_record) {
- if (heap->new_space()->Contains(obj)) {
- heap->new_space()->RecordAllocation(obj);
- } else {
- heap->new_space()->RecordPromotion(obj);
+ IterateAndScavengePromotedObjectsVisitor(Heap* heap, Scavenger* scavenger,
+ bool record_slots)
+ : heap_(heap), scavenger_(scavenger), record_slots_(record_slots) {}
+
+ inline void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
+ for (Address slot_address = reinterpret_cast<Address>(start);
+ slot_address < reinterpret_cast<Address>(end);
+ slot_address += kPointerSize) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ Object* target = *slot;
+
+ if (target->IsHeapObject()) {
+ if (heap_->InFromSpace(target)) {
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(slot),
+ HeapObject::cast(target));
+ target = *slot;
+ if (heap_->InNewSpace(target)) {
+ SLOW_DCHECK(target->IsHeapObject());
+ SLOW_DCHECK(heap_->InToSpace(target));
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot_address),
+ slot_address);
+ }
+ SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target)));
+ } else if (record_slots_ &&
+ MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target))) {
+ heap_->mark_compact_collector()->RecordSlot(host, slot, target);
+ }
}
}
}
- // Helper function used by CopyObject to copy a source object to an
- // allocated target object and update the forwarding pointer in the source
- // object. Returns the target object.
- INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
- HeapObject* target, int size)) {
- // If we migrate into to-space, then the to-space top pointer should be
- // right after the target object. Incorporate double alignment
- // over-allocation.
- DCHECK(!heap->InToSpace(target) ||
- target->address() + size == heap->new_space()->top() ||
- target->address() + size + kPointerSize == heap->new_space()->top());
-
- // Make sure that we do not overwrite the promotion queue which is at
- // the end of to-space.
- DCHECK(!heap->InToSpace(target) ||
- heap->promotion_queue()->IsBelowPromotionQueue(
- heap->new_space()->top()));
-
- // Copy the content of source to target.
- heap->CopyBlock(target->address(), source->address(), size);
-
- // Set the forwarding address.
- source->set_map_word(MapWord::FromForwardingAddress(target));
-
- if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
- // Update NewSpace stats if necessary.
- RecordCopiedObject(heap, target);
- heap->OnMoveEvent(target, source, size);
- }
-
- if (marks_handling == TRANSFER_MARKS) {
- heap->incremental_marking()->TransferColor(source, target);
- }
- }
-
- template <AllocationAlignment alignment>
- static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
- HeapObject* object, int object_size) {
- Heap* heap = map->GetHeap();
-
- DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
- AllocationResult allocation =
- heap->new_space()->AllocateRaw(object_size, alignment);
-
- HeapObject* target = NULL; // Initialization to please compiler.
- if (allocation.To(&target)) {
- // Order is important here: Set the promotion limit before storing a
- // filler for double alignment or migrating the object. Otherwise we
- // may end up overwriting promotion queue entries when we migrate the
- // object.
- heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
-
- MigrateObject(heap, object, target, object_size);
-
- // Update slot to new target.
- *slot = target;
-
- heap->IncrementSemiSpaceCopiedObjectSize(object_size);
- return true;
- }
- return false;
- }
-
-
- template <ObjectContents object_contents, AllocationAlignment alignment>
- static inline bool PromoteObject(Map* map, HeapObject** slot,
- HeapObject* object, int object_size) {
- Heap* heap = map->GetHeap();
-
- AllocationResult allocation =
- heap->old_space()->AllocateRaw(object_size, alignment);
-
- HeapObject* target = NULL; // Initialization to please compiler.
- if (allocation.To(&target)) {
- DCHECK(ObjectMarking::IsWhite(
- target, heap->mark_compact_collector()->marking_state(target)));
- MigrateObject(heap, object, target, object_size);
-
- // Update slot to new target using CAS. A concurrent sweeper thread my
- // filter the slot concurrently.
- HeapObject* old = *slot;
- base::Release_CompareAndSwap(reinterpret_cast<base::AtomicWord*>(slot),
- reinterpret_cast<base::AtomicWord>(old),
- reinterpret_cast<base::AtomicWord>(target));
-
- if (object_contents == POINTER_OBJECT) {
- heap->promotion_queue()->insert(target, object_size);
- }
- heap->IncrementPromotedObjectsSize(object_size);
- return true;
- }
- return false;
- }
-
- template <ObjectContents object_contents, AllocationAlignment alignment>
- static inline void EvacuateObject(Map* map, HeapObject** slot,
- HeapObject* object, int object_size) {
- SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
- SLOW_DCHECK(object->Size() == object_size);
- Heap* heap = map->GetHeap();
-
- if (!heap->ShouldBePromoted(object->address(), object_size)) {
- // A semi-space copy may fail due to fragmentation. In that case, we
- // try to promote the object.
- if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
- return;
- }
- }
-
- if (PromoteObject<object_contents, alignment>(map, slot, object,
- object_size)) {
- return;
- }
+ inline void VisitCodeEntry(JSFunction* host, Address code_entry_slot) final {
+ // Black allocation is not enabled during Scavenges.
+ DCHECK(!heap_->incremental_marking()->black_allocation());
- // If promotion failed, we try to copy the object to the other semi-space
- if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
-
- FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
- }
-
- static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
- HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
-
- if (marks_handling == IGNORE_MARKS) return;
-
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- HeapObject* target = map_word.ToForwardingAddress();
-
- // TODO(mlippautz): Notify collector of this object so we don't have to
- // retrieve the state our of thin air.
- if (ObjectMarking::IsBlack(target, MarkingState::Internal(target))) {
- // This object is black and it might not be rescanned by marker.
- // We should explicitly record code entry slot for compaction because
- // promotion queue processing (IteratePromotedObjectPointers) will
- // miss it as it is not HeapObject-tagged.
- Address code_entry_slot =
- target->address() + JSFunction::kCodeEntryOffset;
+ if (ObjectMarking::IsBlack(host, MarkingState::Internal(host))) {
Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
- target, code_entry_slot, code);
+ heap_->mark_compact_collector()->RecordCodeEntrySlot(
+ host, code_entry_slot, code);
}
}
- static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int length = reinterpret_cast<FixedArray*>(object)->synchronized_length();
- int object_size = FixedArray::SizeFor(length);
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- object_size);
- }
-
- static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
- int object_size = FixedDoubleArray::SizeFor(length);
- EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
- }
-
- static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- object_size);
- }
-
- static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
- EvacuateObject<POINTER_OBJECT, kDoubleAligned>(map, slot, object,
- object_size);
- }
-
- static inline void EvacuateByteArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
- }
-
- static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqOneByteString::cast(object)
- ->SeqOneByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
- }
+ private:
+ Heap* const heap_;
+ Scavenger* const scavenger_;
+ const bool record_slots_;
+};
- static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqTwoByteString::cast(object)
- ->SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
+void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
+ // We are not collecting slots on new space objects during mutation
+ // thus we have to scan for pointers to evacuation candidates when we
+ // promote objects. But we should not record any slots in non-black
+ // objects. Grey object's slots would be rescanned.
+ // White object might not survive until the end of collection
+ // it would be a violation of the invariant to record it's slots.
+ const bool record_slots =
+ heap()->incremental_marking()->IsCompacting() &&
+ ObjectMarking::IsBlack(target, MarkingState::Internal(target));
+ IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
+ if (target->IsJSFunction()) {
+ // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
+ // this links are recorded during processing of weak lists.
+ JSFunction::BodyDescriptorWeak::IterateBody(target, size, &visitor);
+ } else {
+ target->IterateBody(target->map()->instance_type(), size, &visitor);
}
+}
- static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
- HeapObject* object) {
- DCHECK(IsShortcutCandidate(map->instance_type()));
-
- Heap* heap = map->GetHeap();
-
- if (marks_handling == IGNORE_MARKS &&
- ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
- HeapObject* first =
- HeapObject::cast(ConsString::cast(object)->unchecked_first());
-
- *slot = first;
-
- if (!heap->InNewSpace(first)) {
- object->set_map_word(MapWord::FromForwardingAddress(first));
- return;
- }
-
- MapWord first_word = first->map_word();
- if (first_word.IsForwardingAddress()) {
- HeapObject* target = first_word.ToForwardingAddress();
-
- *slot = target;
- object->set_map_word(MapWord::FromForwardingAddress(target));
- return;
+void Scavenger::Process() {
+ // Threshold when to switch processing the promotion list to avoid
+ // allocating too much backing store in the worklist.
+ const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
+ ScavengeVisitor scavenge_visitor(heap(), this);
+
+ bool done;
+ do {
+ done = true;
+ AddressRange range;
+ while ((promotion_list_.LocalPushSegmentSize() <
+ kProcessPromotionListThreshold) &&
+ copied_list_.Pop(&range)) {
+ for (Address current = range.first; current < range.second;) {
+ HeapObject* object = HeapObject::FromAddress(current);
+ int size = object->Size();
+ scavenge_visitor.Visit(object);
+ current += size;
}
-
- Scavenger::ScavengeObjectSlow(slot, first);
- object->set_map_word(MapWord::FromForwardingAddress(*slot));
- return;
- }
-
- int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- object_size);
- }
-
- static inline void EvacuateThinString(Map* map, HeapObject** slot,
- HeapObject* object) {
- if (marks_handling == IGNORE_MARKS) {
- HeapObject* actual = ThinString::cast(object)->actual();
- *slot = actual;
- // ThinStrings always refer to internalized strings, which are
- // always in old space.
- DCHECK(!map->GetHeap()->InNewSpace(actual));
- object->set_map_word(MapWord::FromForwardingAddress(actual));
- return;
- }
-
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- ThinString::kSize);
- }
-
- template <ObjectContents object_contents>
- class ObjectEvacuationStrategy {
- public:
- template <int object_size>
- static inline void VisitSpecialized(Map* map, HeapObject** slot,
- HeapObject* object) {
- EvacuateObject<object_contents, kWordAligned>(map, slot, object,
- object_size);
+ done = false;
}
-
- static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
- int object_size = map->instance_size();
- EvacuateObject<object_contents, kWordAligned>(map, slot, object,
- object_size);
+ ObjectAndSize object_and_size;
+ while (promotion_list_.Pop(&object_and_size)) {
+ HeapObject* target = object_and_size.first;
+ int size = object_and_size.second;
+ DCHECK(!target->IsMap());
+ IterateAndScavengePromotedObject(target, size);
+ done = false;
}
- };
-
- static VisitorDispatchTable<ScavengingCallback> table_;
-};
-
-template <MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback>
- ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
-
-// static
-void Scavenger::Initialize() {
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ } while (!done);
}
-
-// static
-void Scavenger::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
- MapWord first_word = object->map_word();
- SLOW_DCHECK(!first_word.IsForwardingAddress());
- Map* map = first_word.ToMap();
- Scavenger* scavenger = map->GetHeap()->scavenge_collector_;
- scavenger->scavenging_visitors_table_.GetVisitor(map)(map, p, object);
-}
-
-
-void Scavenger::SelectScavengingVisitorsTable() {
- bool logging_and_profiling =
- FLAG_verify_predictable || isolate()->logger()->is_logging() ||
- isolate()->is_profiling() ||
- (isolate()->heap_profiler() != NULL &&
- isolate()->heap_profiler()->is_tracking_object_moves());
-
- if (!heap()->incremental_marking()->IsMarking()) {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
- } else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
- } else {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
+void Scavenger::RecordCopiedObject(HeapObject* obj) {
+ bool should_record = FLAG_log_gc;
+#ifdef DEBUG
+ should_record = FLAG_heap_stats;
+#endif
+ if (should_record) {
+ if (heap()->new_space()->Contains(obj)) {
+ heap()->new_space()->RecordAllocation(obj);
} else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
-
- if (heap()->incremental_marking()->IsCompacting()) {
- // When compacting forbid short-circuiting of cons-strings.
- // Scavenging code relies on the fact that new space object
- // can't be evacuated into evacuation candidate but
- // short-circuiting violates this assumption.
- scavenging_visitors_table_.Register(
- kVisitShortcutCandidate,
- scavenging_visitors_table_.GetVisitorById(kVisitConsString));
- scavenging_visitors_table_.Register(
- kVisitThinString,
- &ScavengingVisitor<TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::
- EvacuateThinStringNoShortcut);
+ heap()->new_space()->RecordPromotion(obj);
}
}
}
-
-Isolate* Scavenger::isolate() { return heap()->isolate(); }
+void Scavenger::Finalize() {
+ heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
+ heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
+ heap()->IncrementPromotedObjectsSize(promoted_size_);
+ allocator_.Finalize();
+}
void RootScavengeVisitor::VisitRootPointer(Root root, Object** p) {
ScavengePointer(p);
@@ -472,8 +153,8 @@ void RootScavengeVisitor::ScavengePointer(Object** p) {
Object* object = *p;
if (!heap_->InNewSpace(object)) return;
- Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
- reinterpret_cast<HeapObject*>(object));
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ reinterpret_cast<HeapObject*>(object));
}
} // namespace internal
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 09f2955651..869e4ad5f3 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -5,67 +5,173 @@
#ifndef V8_HEAP_SCAVENGER_H_
#define V8_HEAP_SCAVENGER_H_
+#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/slot-set.h"
+#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
-typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
- HeapObject* object);
+static const int kCopiedListSegmentSize = 64;
+static const int kPromotionListSegmentSize = 64;
+
+using AddressRange = std::pair<Address, Address>;
+using CopiedList = Worklist<AddressRange, kCopiedListSegmentSize>;
+using ObjectAndSize = std::pair<HeapObject*, int>;
+using PromotionList = Worklist<ObjectAndSize, kPromotionListSegmentSize>;
+
+// A list of copied ranges. Keeps the last consecutive range local and announces
+// all other ranges to a global work list.
+class CopiedRangesList {
+ public:
+ CopiedRangesList(CopiedList* copied_list, int task_id)
+ : current_start_(nullptr),
+ current_end_(nullptr),
+ copied_list_(copied_list, task_id) {}
+
+ ~CopiedRangesList() {
+ CHECK_NULL(current_start_);
+ CHECK_NULL(current_end_);
+ }
+
+ void Insert(HeapObject* object, int size) {
+ const Address object_address = object->address();
+ if (current_end_ != object_address) {
+ if (current_start_ != nullptr) {
+ copied_list_.Push(AddressRange(current_start_, current_end_));
+ }
+ current_start_ = object_address;
+ current_end_ = current_start_ + size;
+ return;
+ }
+ DCHECK_EQ(current_end_, object_address);
+ current_end_ += size;
+ return;
+ }
+
+ bool Pop(AddressRange* entry) {
+ if (copied_list_.Pop(entry)) {
+ return true;
+ } else if (current_start_ != nullptr) {
+ *entry = AddressRange(current_start_, current_end_);
+ current_start_ = current_end_ = nullptr;
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ Address current_start_;
+ Address current_end_;
+ CopiedList::View copied_list_;
+};
class Scavenger {
public:
- explicit Scavenger(Heap* heap) : heap_(heap) {}
+ Scavenger(Heap* heap, bool is_logging, bool is_incremental_marking,
+ CopiedList* copied_list, PromotionList* promotion_list, int task_id)
+ : heap_(heap),
+ promotion_list_(promotion_list, task_id),
+ copied_list_(copied_list, task_id),
+ local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
+ copied_size_(0),
+ promoted_size_(0),
+ allocator_(heap),
+ is_logging_(is_logging),
+ is_incremental_marking_(is_incremental_marking) {}
+
+ // Scavenges an object |object| referenced from slot |p|. |object| is required
+ // to be in from space.
+ inline void ScavengeObject(HeapObject** p, HeapObject* object);
+
+ // Potentially scavenges an object referenced from |slot_address| if it is
+ // indeed a HeapObject and resides in from space.
+ inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
+ Address slot_address);
+
+ // Processes remaining work (=objects) after single objects have been
+ // manually scavenged using ScavengeObject or CheckAndScavengeObject.
+ void Process();
+
+ // Finalize the Scavenger. Needs to be called from the main thread.
+ void Finalize();
- // Initializes static visitor dispatch tables.
- static void Initialize();
+ private:
+ static const int kInitialLocalPretenuringFeedbackCapacity = 256;
- // Callback function passed to Heap::Iterate etc. Copies an object if
- // necessary, the object might be promoted to an old space. The caller must
- // ensure the precondition that the object is (a) a heap object and (b) in
- // the heap's from space.
- static inline void ScavengeObject(HeapObject** p, HeapObject* object);
- static inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
- Address slot_address);
+ inline Heap* heap() { return heap_; }
- // Slow part of {ScavengeObject} above.
- static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+ // Copies |source| to |target| and sets the forwarding pointer in |source|.
+ V8_INLINE void MigrateObject(Map* map, HeapObject* source, HeapObject* target,
+ int size);
- // Chooses an appropriate static visitor table depending on the current state
- // of the heap (i.e. incremental marking, logging and profiling).
- void SelectScavengingVisitorsTable();
+ V8_INLINE bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size);
- Isolate* isolate();
- Heap* heap() { return heap_; }
+ V8_INLINE bool PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
+ int object_size);
- private:
- Heap* heap_;
- VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+ V8_INLINE void EvacuateObject(HeapObject** slot, Map* map,
+ HeapObject* source);
+
+ // Different cases for object evacuation.
+
+ V8_INLINE void EvacuateObjectDefault(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size);
+
+ V8_INLINE void EvacuateJSFunction(Map* map, HeapObject** slot,
+ JSFunction* object, int object_size);
+
+ inline void EvacuateThinString(Map* map, HeapObject** slot,
+ ThinString* object, int object_size);
+
+ inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
+ ConsString* object, int object_size);
+
+ void IterateAndScavengePromotedObject(HeapObject* target, int size);
+
+ void RecordCopiedObject(HeapObject* obj);
+
+ Heap* const heap_;
+ PromotionList::View promotion_list_;
+ CopiedRangesList copied_list_;
+ base::HashMap local_pretenuring_feedback_;
+ size_t copied_size_;
+ size_t promoted_size_;
+ LocalAllocator allocator_;
+ bool is_logging_;
+ bool is_incremental_marking_;
};
// Helper class for turning the scavenger into an object visitor that is also
// filtering out non-HeapObjects and objects which do not reside in new space.
-class RootScavengeVisitor : public RootVisitor {
+class RootScavengeVisitor final : public RootVisitor {
public:
- explicit RootScavengeVisitor(Heap* heap) : heap_(heap) {}
+ RootScavengeVisitor(Heap* heap, Scavenger* scavenger)
+ : heap_(heap), scavenger_(scavenger) {}
- void VisitRootPointer(Root root, Object** p) override;
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointer(Root root, Object** p) final;
+ void VisitRootPointers(Root root, Object** start, Object** end) final;
private:
- inline void ScavengePointer(Object** p);
+ void ScavengePointer(Object** p);
- Heap* heap_;
+ Heap* const heap_;
+ Scavenger* const scavenger_;
};
-
-// Helper class for turning the scavenger into an object visitor that is also
-// filtering out non-HeapObjects and objects which do not reside in new space.
-class StaticScavengeVisitor
- : public StaticNewSpaceVisitor<StaticScavengeVisitor> {
+class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
public:
- static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
+ ScavengeVisitor(Heap* heap, Scavenger* scavenger)
+ : heap_(heap), scavenger_(scavenger) {}
+
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final;
+
+ private:
+ Heap* const heap_;
+ Scavenger* const scavenger_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/sequential-marking-deque.cc b/deps/v8/src/heap/sequential-marking-deque.cc
index a715b3fd85..034ad67dfe 100644
--- a/deps/v8/src/heap/sequential-marking-deque.cc
+++ b/deps/v8/src/heap/sequential-marking-deque.cc
@@ -13,7 +13,8 @@ namespace v8 {
namespace internal {
void SequentialMarkingDeque::SetUp() {
- backing_store_ = new base::VirtualMemory(kMaxSize);
+ backing_store_ =
+ new base::VirtualMemory(kMaxSize, heap_->GetRandomMmapAddr());
backing_store_committed_size_ = 0;
if (backing_store_ == nullptr) {
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
@@ -35,8 +36,7 @@ void SequentialMarkingDeque::StartUsing() {
size_t size = FLAG_force_marking_deque_overflows
? 64 * kPointerSize
: backing_store_committed_size_;
- DCHECK(
- base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
+ DCHECK(base::bits::IsPowerOfTwo(static_cast<uint32_t>(size / kPointerSize)));
mask_ = static_cast<int>((size / kPointerSize) - 1);
top_ = bottom_ = 0;
overflowed_ = false;
diff --git a/deps/v8/src/heap/sequential-marking-deque.h b/deps/v8/src/heap/sequential-marking-deque.h
index 86098dd730..2ae99c887b 100644
--- a/deps/v8/src/heap/sequential-marking-deque.h
+++ b/deps/v8/src/heap/sequential-marking-deque.h
@@ -72,25 +72,12 @@ class SequentialMarkingDeque {
}
INLINE(HeapObject* Pop()) {
- DCHECK(!IsEmpty());
+ if (IsEmpty()) return nullptr;
top_ = ((top_ - 1) & mask_);
HeapObject* object = array_[top_];
return object;
}
- // Unshift the object into the marking stack if there is room, otherwise mark
- // the deque as overflowed and wait for a rescan of the heap.
- INLINE(bool Unshift(HeapObject* object)) {
- if (IsFull()) {
- SetOverflowed();
- return false;
- } else {
- bottom_ = ((bottom_ - 1) & mask_);
- array_[bottom_] = object;
- return true;
- }
- }
-
// Calls the specified callback on each element of the deque and replaces
// the element with the result of the callback. If the callback returns
// nullptr then the element is removed from the deque.
@@ -100,9 +87,7 @@ class SequentialMarkingDeque {
int i = bottom_;
int new_top = bottom_;
while (i != top_) {
- HeapObject* object = callback(array_[i]);
- if (object) {
- array_[new_top] = object;
+ if (callback(array_[i], &array_[new_top])) {
new_top = (new_top + 1) & mask_;
}
i = (i + 1) & mask_;
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 7612199c3c..64ba266f21 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -36,7 +36,7 @@ class SlotSet : public Malloced {
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
- bucket[i].SetValue(nullptr);
+ StoreBucket(&buckets_[i], nullptr);
}
}
@@ -52,16 +52,28 @@ class SlotSet : public Malloced {
// The slot offset specifies a slot at address page_start_ + slot_offset.
// This method should only be called on the main thread because concurrent
// allocation of the bucket is not thread-safe.
+ //
+ // AccessMode defines whether there can be concurrent access on the buckets
+ // or not.
+ template <AccessMode access_mode = AccessMode::ATOMIC>
void Insert(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
- if (current_bucket == nullptr) {
- current_bucket = AllocateBucket();
- bucket[bucket_index].SetValue(current_bucket);
+ Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
+ if (bucket == nullptr) {
+ bucket = AllocateBucket();
+ if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) {
+ DeleteArray<uint32_t>(bucket);
+ bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
+ }
}
- if (!(current_bucket[cell_index].Value() & (1u << bit_index))) {
- current_bucket[cell_index].SetBit(bit_index);
+ // Check that monotonicity is preserved, i.e., once a bucket is set we do
+ // not free it concurrently.
+ DCHECK_NOT_NULL(bucket);
+ DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index]));
+ uint32_t mask = 1u << bit_index;
+ if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
+ SetCellBits<access_mode>(&bucket[cell_index], mask);
}
}
@@ -70,25 +82,21 @@ class SlotSet : public Malloced {
bool Contains(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
- if (current_bucket == nullptr) {
- return false;
- }
- return (current_bucket[cell_index].Value() & (1u << bit_index)) != 0;
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket == nullptr) return false;
+ return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
void Remove(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
- if (current_bucket != nullptr) {
- uint32_t cell = current_bucket[cell_index].Value();
- if (cell) {
- uint32_t bit_mask = 1u << bit_index;
- if (cell & bit_mask) {
- current_bucket[cell_index].ClearBit(bit_index);
- }
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket != nullptr) {
+ uint32_t cell = LoadCell(&bucket[cell_index]);
+ uint32_t bit_mask = 1u << bit_index;
+ if (cell & bit_mask) {
+ ClearCellBits(&bucket[cell_index], bit_mask);
}
}
}
@@ -104,18 +112,24 @@ class SlotSet : public Malloced {
SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
uint32_t start_mask = (1u << start_bit) - 1;
uint32_t end_mask = ~((1u << end_bit) - 1);
+ Bucket bucket;
if (start_bucket == end_bucket && start_cell == end_cell) {
- ClearCell(start_bucket, start_cell, ~(start_mask | end_mask));
+ bucket = LoadBucket(&buckets_[start_bucket]);
+ if (bucket != nullptr) {
+ ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask));
+ }
return;
}
int current_bucket = start_bucket;
int current_cell = start_cell;
- ClearCell(current_bucket, current_cell, ~start_mask);
+ bucket = LoadBucket(&buckets_[current_bucket]);
+ if (bucket != nullptr) {
+ ClearCellBits(&bucket[current_cell], ~start_mask);
+ }
current_cell++;
- base::AtomicValue<uint32_t>* bucket_ptr = bucket[current_bucket].Value();
if (current_bucket < end_bucket) {
- if (bucket_ptr != nullptr) {
- ClearBucket(bucket_ptr, current_cell, kCellsPerBucket);
+ if (bucket != nullptr) {
+ ClearBucket(bucket, current_cell, kCellsPerBucket);
}
// The rest of the current bucket is cleared.
// Move on to the next bucket.
@@ -131,37 +145,35 @@ class SlotSet : public Malloced {
ReleaseBucket(current_bucket);
} else {
DCHECK(mode == KEEP_EMPTY_BUCKETS);
- bucket_ptr = bucket[current_bucket].Value();
- if (bucket_ptr) {
- ClearBucket(bucket_ptr, 0, kCellsPerBucket);
+ bucket = LoadBucket(&buckets_[current_bucket]);
+ if (bucket != nullptr) {
+ ClearBucket(bucket, 0, kCellsPerBucket);
}
}
current_bucket++;
}
// All buckets between start_bucket and end_bucket are cleared.
- bucket_ptr = bucket[current_bucket].Value();
+ bucket = LoadBucket(&buckets_[current_bucket]);
DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
- if (current_bucket == kBuckets || bucket_ptr == nullptr) {
+ if (current_bucket == kBuckets || bucket == nullptr) {
return;
}
while (current_cell < end_cell) {
- bucket_ptr[current_cell].SetValue(0);
+ StoreCell(&bucket[current_cell], 0);
current_cell++;
}
// All cells between start_cell and end_cell are cleared.
DCHECK(current_bucket == end_bucket && current_cell == end_cell);
- ClearCell(end_bucket, end_cell, ~end_mask);
+ ClearCellBits(&bucket[end_cell], ~end_mask);
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
bool Lookup(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- if (bucket[bucket_index].Value() != nullptr) {
- uint32_t cell = bucket[bucket_index].Value()[cell_index].Value();
- return (cell & (1u << bit_index)) != 0;
- }
- return false;
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket == nullptr) return false;
+ return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
}
// Iterate over all slots in the set and for each slot invoke the callback.
@@ -178,14 +190,13 @@ class SlotSet : public Malloced {
int Iterate(Callback callback, EmptyBucketMode mode) {
int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
- base::AtomicValue<uint32_t>* current_bucket =
- bucket[bucket_index].Value();
- if (current_bucket != nullptr) {
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket != nullptr) {
int in_bucket_count = 0;
int cell_offset = bucket_index * kBitsPerBucket;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
- if (current_bucket[i].Value()) {
- uint32_t cell = current_bucket[i].Value();
+ uint32_t cell = LoadCell(&bucket[i]);
+ if (cell) {
uint32_t old_cell = cell;
uint32_t mask = 0;
while (cell) {
@@ -201,15 +212,7 @@ class SlotSet : public Malloced {
}
uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) {
- while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
- // If TrySetValue fails, the cell must have changed. We just
- // have to read the current value of the cell, & it with the
- // computed value, and retry. We can do this, because this
- // method will only be called on the main thread and filtering
- // threads will only remove slots.
- old_cell = current_bucket[i].Value();
- new_cell = old_cell & ~mask;
- }
+ ClearCellBits(&bucket[i], mask);
}
}
}
@@ -222,16 +225,36 @@ class SlotSet : public Malloced {
return new_count;
}
+ void PreFreeEmptyBuckets() {
+ for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket != nullptr) {
+ bool found_non_empty_cell = false;
+ int cell_offset = bucket_index * kBitsPerBucket;
+ for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
+ if (LoadCell(&bucket[i])) {
+ found_non_empty_cell = true;
+ break;
+ }
+ }
+ if (!found_non_empty_cell) {
+ PreFreeEmptyBucket(bucket_index);
+ }
+ }
+ }
+ }
+
void FreeToBeFreedBuckets() {
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
while (!to_be_freed_buckets_.empty()) {
- base::AtomicValue<uint32_t>* top = to_be_freed_buckets_.top();
+ Bucket top = to_be_freed_buckets_.top();
to_be_freed_buckets_.pop();
- DeleteArray<base::AtomicValue<uint32_t>>(top);
+ DeleteArray<uint32_t>(top);
}
}
private:
+ typedef uint32_t* Bucket;
static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
static const int kCellsPerBucket = 32;
static const int kCellsPerBucketLog2 = 5;
@@ -241,51 +264,88 @@ class SlotSet : public Malloced {
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
- base::AtomicValue<uint32_t>* AllocateBucket() {
- base::AtomicValue<uint32_t>* result =
- NewArray<base::AtomicValue<uint32_t>>(kCellsPerBucket);
+ Bucket AllocateBucket() {
+ Bucket result = NewArray<uint32_t>(kCellsPerBucket);
for (int i = 0; i < kCellsPerBucket; i++) {
- result[i].SetValue(0);
+ result[i] = 0;
}
return result;
}
- void ClearBucket(base::AtomicValue<uint32_t>* bucket, int start_cell,
- int end_cell) {
+ void ClearBucket(Bucket bucket, int start_cell, int end_cell) {
DCHECK_GE(start_cell, 0);
DCHECK_LE(end_cell, kCellsPerBucket);
int current_cell = start_cell;
while (current_cell < kCellsPerBucket) {
- bucket[current_cell].SetValue(0);
+ StoreCell(&bucket[current_cell], 0);
current_cell++;
}
}
void PreFreeEmptyBucket(int bucket_index) {
- base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
- if (bucket_ptr != nullptr) {
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket != nullptr) {
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
- to_be_freed_buckets_.push(bucket_ptr);
- bucket[bucket_index].SetValue(nullptr);
+ to_be_freed_buckets_.push(bucket);
+ StoreBucket(&buckets_[bucket_index], nullptr);
}
}
void ReleaseBucket(int bucket_index) {
- DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
- bucket[bucket_index].SetValue(nullptr);
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ StoreBucket(&buckets_[bucket_index], nullptr);
+ DeleteArray<uint32_t>(bucket);
}
- void ClearCell(int bucket_index, int cell_index, uint32_t mask) {
- if (bucket_index < kBuckets) {
- base::AtomicValue<uint32_t>* cells = bucket[bucket_index].Value();
- if (cells != nullptr) {
- uint32_t cell = cells[cell_index].Value();
- if (cell) cells[cell_index].SetBits(0, mask);
- }
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ Bucket LoadBucket(Bucket* bucket) {
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicWord::Acquire_Load(bucket);
+ return *bucket;
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ void StoreBucket(Bucket* bucket, Bucket value) {
+ if (access_mode == AccessMode::ATOMIC) {
+ base::AsAtomicWord::Release_Store(bucket, value);
+ } else {
+ *bucket = value;
+ }
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ bool SwapInNewBucket(Bucket* bucket, Bucket value) {
+ if (access_mode == AccessMode::ATOMIC) {
+ return base::AsAtomicWord::Release_CompareAndSwap(bucket, nullptr,
+ value) == nullptr;
+ } else {
+ DCHECK_NULL(*bucket);
+ *bucket = value;
+ return true;
+ }
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ uint32_t LoadCell(uint32_t* cell) {
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomic32::Acquire_Load(cell);
+ return *cell;
+ }
+
+ void StoreCell(uint32_t* cell, uint32_t value) {
+ base::AsAtomic32::Release_Store(cell, value);
+ }
+
+ void ClearCellBits(uint32_t* cell, uint32_t mask) {
+ base::AsAtomic32::SetBits(cell, 0u, mask);
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ void SetCellBits(uint32_t* cell, uint32_t mask) {
+ if (access_mode == AccessMode::ATOMIC) {
+ base::AsAtomic32::SetBits(cell, mask, mask);
} else {
- // GCC bug 59124: Emits wrong warnings
- // "array subscript is above array bounds"
- UNREACHABLE();
+ *cell = (*cell & ~mask) | mask;
}
}
@@ -300,10 +360,10 @@ class SlotSet : public Malloced {
*bit_index = slot & (kBitsPerCell - 1);
}
- base::AtomicValue<base::AtomicValue<uint32_t>*> bucket[kBuckets];
+ Bucket buckets_[kBuckets];
Address page_start_;
base::Mutex to_be_freed_buckets_mutex_;
- std::stack<base::AtomicValue<uint32_t>*> to_be_freed_buckets_;
+ std::stack<uint32_t*> to_be_freed_buckets_;
};
enum SlotType {
@@ -330,62 +390,65 @@ class TypedSlotSet {
typedef std::pair<SlotType, uint32_t> TypeAndOffset;
struct TypedSlot {
- TypedSlot() {
- type_and_offset_.SetValue(0);
- host_offset_.SetValue(0);
- }
+ TypedSlot() : type_and_offset_(0), host_offset_(0) {}
- TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset) {
- type_and_offset_.SetValue(TypeField::encode(type) |
- OffsetField::encode(offset));
- host_offset_.SetValue(host_offset);
- }
+ TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
+ : type_and_offset_(TypeField::encode(type) |
+ OffsetField::encode(offset)),
+ host_offset_(host_offset) {}
bool operator==(const TypedSlot other) {
- return type_and_offset_.Value() == other.type_and_offset_.Value() &&
- host_offset_.Value() == other.host_offset_.Value();
+ return type_and_offset() == other.type_and_offset() &&
+ host_offset() == other.host_offset();
}
bool operator!=(const TypedSlot other) { return !(*this == other); }
- SlotType type() { return TypeField::decode(type_and_offset_.Value()); }
+ SlotType type() const { return TypeField::decode(type_and_offset()); }
- uint32_t offset() { return OffsetField::decode(type_and_offset_.Value()); }
+ uint32_t offset() const { return OffsetField::decode(type_and_offset()); }
- TypeAndOffset GetTypeAndOffset() {
- uint32_t type_and_offset = type_and_offset_.Value();
- return std::make_pair(TypeField::decode(type_and_offset),
- OffsetField::decode(type_and_offset));
+ TypeAndOffset GetTypeAndOffset() const {
+ uint32_t t_and_o = type_and_offset();
+ return std::make_pair(TypeField::decode(t_and_o),
+ OffsetField::decode(t_and_o));
}
- uint32_t host_offset() { return host_offset_.Value(); }
+ uint32_t type_and_offset() const {
+ return base::AsAtomic32::Acquire_Load(&type_and_offset_);
+ }
+
+ uint32_t host_offset() const {
+ return base::AsAtomic32::Acquire_Load(&host_offset_);
+ }
void Set(TypedSlot slot) {
- type_and_offset_.SetValue(slot.type_and_offset_.Value());
- host_offset_.SetValue(slot.host_offset_.Value());
+ base::AsAtomic32::Release_Store(&type_and_offset_,
+ slot.type_and_offset());
+ base::AsAtomic32::Release_Store(&host_offset_, slot.host_offset());
}
void Clear() {
- type_and_offset_.SetValue(TypeField::encode(CLEARED_SLOT) |
- OffsetField::encode(0));
- host_offset_.SetValue(0);
+ base::AsAtomic32::Release_Store(
+ &type_and_offset_,
+ TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
+ base::AsAtomic32::Release_Store(&host_offset_, 0);
}
- base::AtomicValue<uint32_t> type_and_offset_;
- base::AtomicValue<uint32_t> host_offset_;
+ uint32_t type_and_offset_;
+ uint32_t host_offset_;
};
static const int kMaxOffset = 1 << 29;
- explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
- chunk_.SetValue(new Chunk(nullptr, kInitialBufferSize));
- }
+ explicit TypedSlotSet(Address page_start)
+ : page_start_(page_start), top_(new Chunk(nullptr, kInitialBufferSize)) {}
~TypedSlotSet() {
- Chunk* chunk = chunk_.Value();
+ Chunk* chunk = load_top();
while (chunk != nullptr) {
- Chunk* next = chunk->next.Value();
+ Chunk* n = chunk->next();
delete chunk;
- chunk = next;
+ chunk = n;
}
FreeToBeFreedChunks();
}
@@ -394,16 +457,16 @@ class TypedSlotSet {
// This method can only be called on the main thread.
void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
TypedSlot slot(type, host_offset, offset);
- Chunk* top_chunk = chunk_.Value();
+ Chunk* top_chunk = load_top();
if (!top_chunk) {
top_chunk = new Chunk(nullptr, kInitialBufferSize);
- chunk_.SetValue(top_chunk);
+ set_top(top_chunk);
}
if (!top_chunk->AddSlot(slot)) {
Chunk* new_top_chunk =
- new Chunk(top_chunk, NextCapacity(top_chunk->capacity.Value()));
+ new Chunk(top_chunk, NextCapacity(top_chunk->capacity()));
bool added = new_top_chunk->AddSlot(slot);
- chunk_.SetValue(new_top_chunk);
+ set_top(new_top_chunk);
DCHECK(added);
USE(added);
}
@@ -421,18 +484,17 @@ class TypedSlotSet {
template <typename Callback>
int Iterate(Callback callback, IterationMode mode) {
STATIC_ASSERT(CLEARED_SLOT < 8);
- Chunk* chunk = chunk_.Value();
+ Chunk* chunk = load_top();
Chunk* previous = nullptr;
int new_count = 0;
while (chunk != nullptr) {
- TypedSlot* buffer = chunk->buffer.Value();
- int count = chunk->count.Value();
+ TypedSlot* buf = chunk->buffer();
bool empty = true;
- for (int i = 0; i < count; i++) {
+ for (int i = 0; i < chunk->count(); i++) {
// Order is important here. We have to read out the slot type last to
// observe the concurrent removal case consistently.
- Address host_addr = page_start_ + buffer[i].host_offset();
- TypeAndOffset type_and_offset = buffer[i].GetTypeAndOffset();
+ Address host_addr = page_start_ + buf[i].host_offset();
+ TypeAndOffset type_and_offset = buf[i].GetTypeAndOffset();
SlotType type = type_and_offset.first;
if (type != CLEARED_SLOT) {
Address addr = page_start_ + type_and_offset.second;
@@ -440,26 +502,26 @@ class TypedSlotSet {
new_count++;
empty = false;
} else {
- buffer[i].Clear();
+ buf[i].Clear();
}
}
}
- Chunk* next = chunk->next.Value();
+ Chunk* n = chunk->next();
if (mode == PREFREE_EMPTY_CHUNKS && empty) {
// We remove the chunk from the list but let it still point its next
// chunk to allow concurrent iteration.
if (previous) {
- previous->next.SetValue(next);
+ previous->set_next(n);
} else {
- chunk_.SetValue(next);
+ set_top(n);
}
base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
to_be_freed_chunks_.push(chunk);
} else {
previous = chunk;
}
- chunk = next;
+ chunk = n;
}
return new_count;
}
@@ -474,12 +536,11 @@ class TypedSlotSet {
}
void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
- Chunk* chunk = chunk_.Value();
+ Chunk* chunk = load_top();
while (chunk != nullptr) {
- TypedSlot* buffer = chunk->buffer.Value();
- int count = chunk->count.Value();
- for (int i = 0; i < count; i++) {
- uint32_t host_offset = buffer[i].host_offset();
+ TypedSlot* buf = chunk->buffer();
+ for (int i = 0; i < chunk->count(); i++) {
+ uint32_t host_offset = buf[i].host_offset();
std::map<uint32_t, uint32_t>::iterator upper_bound =
invalid_ranges.upper_bound(host_offset);
if (upper_bound == invalid_ranges.begin()) continue;
@@ -488,10 +549,10 @@ class TypedSlotSet {
upper_bound--;
DCHECK_LE(upper_bound->first, host_offset);
if (upper_bound->second > host_offset) {
- buffer[i].Clear();
+ buf[i].Clear();
}
}
- chunk = chunk->next.Value();
+ chunk = chunk->next();
}
}
@@ -508,31 +569,55 @@ class TypedSlotSet {
struct Chunk : Malloced {
explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
- count.SetValue(0);
- capacity.SetValue(chunk_capacity);
- buffer.SetValue(NewArray<TypedSlot>(chunk_capacity));
- next.SetValue(next_chunk);
+ next_ = next_chunk;
+ buffer_ = NewArray<TypedSlot>(chunk_capacity);
+ capacity_ = chunk_capacity;
+ count_ = 0;
}
+
+ ~Chunk() { DeleteArray(buffer_); }
+
bool AddSlot(TypedSlot slot) {
- int current_count = count.Value();
- if (current_count == capacity.Value()) return false;
- TypedSlot* current_buffer = buffer.Value();
+ int current_count = count();
+ if (current_count == capacity()) return false;
+ TypedSlot* current_buffer = buffer();
// Order is important here. We have to write the slot first before
// increasing the counter to guarantee that a consistent state is
// observed by concurrent threads.
current_buffer[current_count].Set(slot);
- count.SetValue(current_count + 1);
+ set_count(current_count + 1);
return true;
}
- ~Chunk() { DeleteArray(buffer.Value()); }
- base::AtomicValue<Chunk*> next;
- base::AtomicValue<int> count;
- base::AtomicValue<int> capacity;
- base::AtomicValue<TypedSlot*> buffer;
+
+ Chunk* next() const { return base::AsAtomicWord::Acquire_Load(&next_); }
+
+ void set_next(Chunk* n) {
+ return base::AsAtomicWord::Release_Store(&next_, n);
+ }
+
+ TypedSlot* buffer() const { return buffer_; }
+
+ int32_t capacity() const { return capacity_; }
+
+ int32_t count() const { return base::AsAtomic32::Acquire_Load(&count_); }
+
+ void set_count(int32_t new_value) {
+ base::AsAtomic32::Release_Store(&count_, new_value);
+ }
+
+ private:
+ Chunk* next_;
+ TypedSlot* buffer_;
+ int32_t capacity_;
+ int32_t count_;
};
+ Chunk* load_top() { return base::AsAtomicWord::Acquire_Load(&top_); }
+
+ void set_top(Chunk* c) { base::AsAtomicWord::Release_Store(&top_, c); }
+
Address page_start_;
- base::AtomicValue<Chunk*> chunk_;
+ Chunk* top_;
base::Mutex to_be_freed_chunks_mutex_;
std::stack<Chunk*> to_be_freed_chunks_;
};
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 5b44d1dc10..0fef117b7e 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -7,10 +7,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
-#include "src/isolate.h"
#include "src/msan.h"
-#include "src/profiler/heap-profiler.h"
-#include "src/v8memory.h"
namespace v8 {
namespace internal {
@@ -94,36 +91,6 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
}
// -----------------------------------------------------------------------------
-// MemoryAllocator
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void MemoryAllocator::Protect(Address start, size_t size) {
- base::OS::Protect(start, size);
-}
-
-
-void MemoryAllocator::Unprotect(Address start, size_t size,
- Executability executable) {
- base::OS::Unprotect(start, size, executable);
-}
-
-
-void MemoryAllocator::ProtectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- base::OS::Protect(chunks_[id].address(), chunks_[id].size());
-}
-
-
-void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
- chunks_[id].owner()->executable() == EXECUTABLE);
-}
-
-#endif
-
-// -----------------------------------------------------------------------------
// SemiSpace
bool SemiSpace::Contains(HeapObject* o) {
@@ -169,61 +136,6 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
- SemiSpace* owner) {
- DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
- bool in_to_space = (owner->id() != kFromSpace);
- chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
- : MemoryChunk::IN_FROM_SPACE);
- DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
- : MemoryChunk::IN_TO_SPACE));
- Page* page = static_cast<Page*>(chunk);
- heap->incremental_marking()->SetNewSpacePageFlags(page);
- page->AllocateLocalTracker();
- if (FLAG_minor_mc) {
- page->AllocateYoungGenerationBitmap();
- MarkingState::External(page).ClearLiveness();
- }
- return page;
-}
-
-// --------------------------------------------------------------------------
-// PagedSpace
-
-template <Page::InitializationMode mode>
-Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
- PagedSpace* owner) {
- Page* page = reinterpret_cast<Page*>(chunk);
- DCHECK(page->area_size() <= kAllocatableMemory);
- DCHECK(chunk->owner() == owner);
-
- owner->IncreaseCapacity(page->area_size());
- heap->incremental_marking()->SetOldSpacePageFlags(chunk);
-
- // Make sure that categories are initialized before freeing the area.
- page->InitializeFreeListCategories();
- // In the case we do not free the memory, we effectively account for the whole
- // page as allocated memory that cannot be used for further allocations.
- if (mode == kFreeMemory) {
- owner->Free(page->area_start(), page->area_size());
- }
-
- return page;
-}
-
-Page* Page::ConvertNewToOld(Page* old_page) {
- DCHECK(!old_page->is_anchor());
- DCHECK(old_page->InNewSpace());
- OldSpace* old_space = old_page->heap()->old_space();
- old_page->set_owner(old_space);
- old_page->SetFlags(0, static_cast<uintptr_t>(~0));
- old_space->AccountCommitted(old_page->size());
- Page* new_page = Page::Initialize<kDoNotFreeMemory>(
- old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
- new_page->InsertAfter(old_space->anchor()->prev_page());
- return new_page;
-}
-
void Page::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
@@ -334,7 +246,6 @@ MemoryChunk* MemoryChunkIterator::next() {
break;
}
UNREACHABLE();
- return nullptr;
}
Page* FreeListCategory::page() const {
@@ -576,27 +487,6 @@ MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
return AllocateRaw(size_in_bytes, alignment);
}
-LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable, Space* owner) {
- if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
- STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
- FATAL("Code page is too large.");
- }
- heap->incremental_marking()->SetOldSpacePageFlags(chunk);
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
-
- // Initialize the owner field for each contained page (except the first, which
- // is initialized by MemoryChunk::Initialize).
- for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
- addr < chunk->area_end(); addr += Page::kPageSize) {
- // Clear out kPageHeaderTag.
- Memory::Address_at(addr) = 0;
- }
-
- return static_cast<LargePage*>(chunk);
-}
-
size_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->memory_allocator()->Available());
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 3e67788828..6f4546c816 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -123,8 +123,10 @@ bool CodeRange::SetUp(size_t requested) {
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
code_range_ = new base::VirtualMemory(
- requested, Max(kCodeRangeAreaAlignment,
- static_cast<size_t>(base::OS::AllocateAlignment())));
+ requested,
+ Max(kCodeRangeAreaAlignment,
+ static_cast<size_t>(base::OS::AllocateAlignment())),
+ base::OS::GetRandomMmapAddr());
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
delete code_range_;
@@ -300,7 +302,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
size_executable_(0),
lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
highest_ever_allocated_(reinterpret_cast<void*>(0)),
- unmapper_(isolate->heap(), this) {}
+ unmapper_(this) {}
bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) {
capacity_ = RoundUp(capacity, Page::kPageSize);
@@ -332,46 +334,40 @@ void MemoryAllocator::TearDown() {
code_range_ = nullptr;
}
-class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
+class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
public:
- explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
- : CancelableTask(isolate), unmapper_(unmapper) {}
+ explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
private:
- void RunInternal() override {
+ // v8::Task overrides.
+ void Run() override {
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
}
- Unmapper* const unmapper_;
+ Unmapper* unmapper_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
ReconsiderDelayedChunks();
- if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
- if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
- // kMaxUnmapperTasks are already running. Avoid creating any more.
- return;
- }
- UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
- DCHECK_LT(concurrent_unmapping_tasks_active_, kMaxUnmapperTasks);
- task_ids_[concurrent_unmapping_tasks_active_++] = task->id();
+ if (FLAG_concurrent_sweeping) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
+ concurrent_unmapping_tasks_active_++;
} else {
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
}
-void MemoryAllocator::Unmapper::WaitUntilCompleted() {
- for (int i = 0; i < concurrent_unmapping_tasks_active_; i++) {
- if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_unmapping_tasks_semaphore_.Wait();
- }
- concurrent_unmapping_tasks_active_ = 0;
+bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
+ bool waited = false;
+ while (concurrent_unmapping_tasks_active_ > 0) {
+ pending_unmapping_tasks_semaphore_.Wait();
+ concurrent_unmapping_tasks_active_--;
+ waited = true;
}
+ return waited;
}
template <MemoryAllocator::Unmapper::FreeMode mode>
@@ -398,7 +394,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
void MemoryAllocator::Unmapper::TearDown() {
- CHECK_EQ(0, concurrent_unmapping_tasks_active_);
+ WaitUntilCompleted();
ReconsiderDelayedChunks();
CHECK(delayed_regular_chunks_.empty());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
@@ -422,7 +418,7 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
// because the memory chunk can be in the queue of a sweeper task.
// Chunks in old generation are unmapped if they are empty.
DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
- return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping ||
+ return !chunk->InNewSpace() || mc == nullptr ||
!mc->sweeper().sweeping_in_progress();
}
@@ -466,23 +462,29 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
}
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
+ void* hint,
base::VirtualMemory* controller) {
- base::VirtualMemory reservation(size, alignment);
+ base::VirtualMemory reservation(size, alignment, hint);
- if (!reservation.IsReserved()) return NULL;
- size_.Increment(reservation.size());
- Address base =
+ if (!reservation.IsReserved()) return nullptr;
+ const Address base =
RoundUp(static_cast<Address>(reservation.address()), alignment);
+ if (base + size != reservation.end()) {
+ const Address unused_start = RoundUp(base + size, GetCommitPageSize());
+ reservation.ReleasePartial(unused_start);
+ }
+ size_.Increment(reservation.size());
controller->TakeControl(&reservation);
return base;
}
Address MemoryAllocator::AllocateAlignedMemory(
size_t reserve_size, size_t commit_size, size_t alignment,
- Executability executable, base::VirtualMemory* controller) {
+ Executability executable, void* hint, base::VirtualMemory* controller) {
DCHECK(commit_size <= reserve_size);
base::VirtualMemory reservation;
- Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
+ Address base =
+ ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
if (base == NULL) return NULL;
if (executable == EXECUTABLE) {
@@ -518,6 +520,23 @@ void Page::InitializeAsAnchor(Space* space) {
SetFlag(ANCHOR);
}
+Heap* MemoryChunk::synchronized_heap() {
+ return reinterpret_cast<Heap*>(
+ base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
+}
+
+void MemoryChunk::InitializationMemoryFence() {
+ base::MemoryFence();
+#ifdef THREAD_SANITIZER
+ // Since TSAN does not process memory fences, we use the following annotation
+ // to tell TSAN that there is no data race when emitting a
+ // InitializationMemoryFence. Note that the other thread still needs to
+ // perform MemoryChunk::synchronized_heap().
+ base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
+ reinterpret_cast<base::AtomicWord>(heap_));
+#endif
+}
+
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
@@ -533,10 +552,12 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = Flags(NO_FLAGS);
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
- chunk->slot_set_[OLD_TO_NEW].SetValue(nullptr);
- chunk->slot_set_[OLD_TO_OLD].SetValue(nullptr);
- chunk->typed_slot_set_[OLD_TO_NEW].SetValue(nullptr);
- chunk->typed_slot_set_[OLD_TO_OLD].SetValue(nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
+ nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
+ nullptr);
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
@@ -560,10 +581,83 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
if (reservation != nullptr) {
chunk->reservation_.TakeControl(reservation);
}
-
return chunk;
}
+template <Page::InitializationMode mode>
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+ PagedSpace* owner) {
+ Page* page = reinterpret_cast<Page*>(chunk);
+ DCHECK(page->area_size() <= kAllocatableMemory);
+ DCHECK(chunk->owner() == owner);
+
+ owner->IncreaseCapacity(page->area_size());
+ heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+ // Make sure that categories are initialized before freeing the area.
+ page->InitializeFreeListCategories();
+ // In the case we do not free the memory, we effectively account for the whole
+ // page as allocated memory that cannot be used for further allocations.
+ if (mode == kFreeMemory) {
+ owner->Free(page->area_start(), page->area_size());
+ }
+ page->InitializationMemoryFence();
+ return page;
+}
+
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+ SemiSpace* owner) {
+ DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
+ bool in_to_space = (owner->id() != kFromSpace);
+ chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+ : MemoryChunk::IN_FROM_SPACE);
+ DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+ : MemoryChunk::IN_TO_SPACE));
+ Page* page = static_cast<Page*>(chunk);
+ heap->incremental_marking()->SetNewSpacePageFlags(page);
+ page->AllocateLocalTracker();
+ if (FLAG_minor_mc) {
+ page->AllocateYoungGenerationBitmap();
+ MarkingState::External(page).ClearLiveness();
+ }
+ page->InitializationMemoryFence();
+ return page;
+}
+
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, Space* owner) {
+ if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
+ STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
+ FATAL("Code page is too large.");
+ }
+ heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
+
+ // Initialize the owner field for each contained page (except the first, which
+ // is initialized by MemoryChunk::Initialize).
+ for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
+ addr < chunk->area_end(); addr += Page::kPageSize) {
+ // Clear out kPageHeaderTag.
+ Memory::Address_at(addr) = 0;
+ }
+ LargePage* page = static_cast<LargePage*>(chunk);
+ page->InitializationMemoryFence();
+ return page;
+}
+
+Page* Page::ConvertNewToOld(Page* old_page) {
+ DCHECK(!old_page->is_anchor());
+ DCHECK(old_page->InNewSpace());
+ OldSpace* old_space = old_page->heap()->old_space();
+ old_page->set_owner(old_space);
+ old_page->SetFlags(0, static_cast<uintptr_t>(~0));
+ old_space->AccountCommitted(old_page->size());
+ Page* new_page = Page::Initialize<kDoNotFreeMemory>(
+ old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
+ new_page->InsertAfter(old_space->anchor()->prev_page());
+ return new_page;
+}
// Commit MemoryChunk area to the requested size.
bool MemoryChunk::CommitArea(size_t requested) {
@@ -640,22 +734,6 @@ void MemoryChunk::Unlink() {
set_next_chunk(NULL);
}
-void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
- DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
- DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
- Address free_start = chunk->area_end_ - bytes_to_shrink;
- // Don't adjust the size of the page. The area is just uncomitted but not
- // released.
- chunk->area_end_ -= bytes_to_shrink;
- UncommitBlock(free_start, bytes_to_shrink);
- if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- if (chunk->reservation_.IsReserved())
- chunk->reservation_.Guard(chunk->area_end_);
- else
- base::OS::Guard(chunk->area_end_, GetCommitPageSize());
- }
-}
-
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
@@ -668,6 +746,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
base::VirtualMemory reservation;
Address area_start = nullptr;
Address area_end = nullptr;
+ void* address_hint = heap->GetRandomMmapAddr();
//
// MemoryChunk layout:
@@ -727,7 +806,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
} else {
base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable,
- &reservation);
+ address_hint, &reservation);
if (base == NULL) return NULL;
// Update executable memory size.
size_executable_.Increment(reservation.size());
@@ -748,7 +827,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, &reservation);
+ executable, address_hint, &reservation);
if (base == NULL) return NULL;
@@ -803,6 +882,11 @@ size_t Page::AvailableInFreeList() {
}
size_t Page::ShrinkToHighWaterMark() {
+ // Shrinking only makes sense outside of the CodeRange, where we don't care
+ // about address space fragmentation.
+ base::VirtualMemory* reservation = reserved_memory();
+ if (!reservation->IsReserved()) return 0;
+
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
@@ -832,6 +916,7 @@ size_t Page::ShrinkToHighWaterMark() {
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
+ DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
reinterpret_cast<void*>(this),
@@ -842,7 +927,8 @@ size_t Page::ShrinkToHighWaterMark() {
filler->address(),
static_cast<int>(area_end() - filler->address() - unused),
ClearRecordedSlots::kNo);
- heap()->memory_allocator()->ShrinkChunk(this, unused);
+ heap()->memory_allocator()->PartialFreeMemory(
+ this, address() + size() - unused, unused, area_end() - unused);
CHECK(filler->IsFiller());
CHECK_EQ(filler->address() + filler->Size(), area_end());
}
@@ -856,8 +942,9 @@ void Page::CreateBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(end - 1), this);
MarkingState::Internal(this).bitmap()->SetRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
- MarkingState::Internal(this).IncrementLiveBytes(
- static_cast<int>(end - start));
+ MarkingState::Internal(this)
+ .IncrementLiveBytes<IncrementalMarking::kAtomicity>(
+ static_cast<int>(end - start));
}
void Page::DestroyBlackArea(Address start, Address end) {
@@ -867,29 +954,33 @@ void Page::DestroyBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(end - 1), this);
MarkingState::Internal(this).bitmap()->ClearRange(
AddressToMarkbitIndex(start), AddressToMarkbitIndex(end));
- MarkingState::Internal(this).IncrementLiveBytes(
- -static_cast<int>(end - start));
+ MarkingState::Internal(this)
+ .IncrementLiveBytes<IncrementalMarking::kAtomicity>(
+ -static_cast<int>(end - start));
}
-void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
- Address start_free) {
- // We do not allow partial shrink for code.
- DCHECK(chunk->executable() == NOT_EXECUTABLE);
-
- intptr_t size;
+void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
+ size_t bytes_to_free,
+ Address new_area_end) {
base::VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
- size = static_cast<intptr_t>(reservation->size());
-
- size_t to_free_size = size - (start_free - chunk->address());
-
- DCHECK(size_.Value() >= to_free_size);
- size_.Decrement(to_free_size);
+ chunk->size_ -= bytes_to_free;
+ chunk->area_end_ = new_area_end;
+ if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(chunk->area_end_) %
+ static_cast<uintptr_t>(GetCommitPageSize()));
+ DCHECK_EQ(chunk->address() + chunk->size(),
+ chunk->area_end() + CodePageGuardSize());
+ reservation->Guard(chunk->area_end_);
+ }
+ // On e.g. Windows, a reservation may be larger than a page and releasing
+ // partially starting at |start_free| will also release the potentially
+ // unused part behind the current page.
+ const size_t released_bytes = reservation->ReleasePartial(start_free);
+ DCHECK_GE(size_.Value(), released_bytes);
+ size_.Decrement(released_bytes);
isolate_->counters()->memory_allocated()->Decrement(
- static_cast<int>(to_free_size));
- chunk->set_size(size - to_free_size);
-
- reservation->ReleasePartial(start_free);
+ static_cast<int>(released_bytes));
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
@@ -1077,7 +1168,7 @@ size_t MemoryAllocator::CodePageAreaEndOffset() {
intptr_t MemoryAllocator::GetCommitPageSize() {
if (FLAG_v8_os_page_size != 0) {
- DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
+ DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
return FLAG_v8_os_page_size * KB;
} else {
return base::OS::CommitPageSize();
@@ -1117,6 +1208,10 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
+bool MemoryChunk::contains_array_buffers() {
+ return local_tracker() != nullptr && !local_tracker()->IsEmpty();
+}
+
void MemoryChunk::ReleaseAllocatedMemory() {
if (skip_list_ != nullptr) {
delete skip_list_;
@@ -1150,12 +1245,13 @@ template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
- if (!slot_set_[type].TrySetValue(nullptr, slot_set)) {
+ SlotSet* old_slot_set = base::AsAtomicWord::Release_CompareAndSwap(
+ &slot_set_[type], nullptr, slot_set);
+ if (old_slot_set != nullptr) {
delete[] slot_set;
- slot_set = slot_set_[type].Value();
- DCHECK(slot_set);
- return slot_set;
+ slot_set = old_slot_set;
}
+ DCHECK(slot_set);
return slot_set;
}
@@ -1164,10 +1260,10 @@ template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
- SlotSet* slot_set = slot_set_[type].Value();
+ SlotSet* slot_set = slot_set_[type];
if (slot_set) {
+ slot_set_[type] = nullptr;
delete[] slot_set;
- slot_set_[type].SetValue(nullptr);
}
}
@@ -1176,14 +1272,15 @@ template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
- TypedSlotSet* slot_set = new TypedSlotSet(address());
- if (!typed_slot_set_[type].TrySetValue(nullptr, slot_set)) {
- delete slot_set;
- slot_set = typed_slot_set_[type].Value();
- DCHECK(slot_set);
- return slot_set;
+ TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
+ TypedSlotSet* old_value = base::AsAtomicWord::Release_CompareAndSwap(
+ &typed_slot_set_[type], nullptr, typed_slot_set);
+ if (old_value != nullptr) {
+ delete typed_slot_set;
+ typed_slot_set = old_value;
}
- return slot_set;
+ DCHECK(typed_slot_set);
+ return typed_slot_set;
}
template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
@@ -1191,10 +1288,10 @@ template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseTypedSlotSet() {
- TypedSlotSet* typed_slot_set = typed_slot_set_[type].Value();
+ TypedSlotSet* typed_slot_set = typed_slot_set_[type];
if (typed_slot_set) {
+ typed_slot_set_[type] = nullptr;
delete typed_slot_set;
- typed_slot_set_[type].SetValue(nullptr);
}
}
@@ -1315,6 +1412,8 @@ void PagedSpace::RefillFreeList() {
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
+ base::LockGuard<base::Mutex> guard(mutex());
+
DCHECK(identity() == other->identity());
// Unmerged fields:
// area_size_
@@ -1409,9 +1508,7 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
- // Do not account for the unused space as uncommitted because the counter
- // is kept in sync with page size which is also not adjusted for those
- // chunks.
+ AccountUncommitted(unused);
}
}
@@ -1502,8 +1599,9 @@ void PagedSpace::EmptyAllocationInfo() {
MarkingState::Internal(page).bitmap()->ClearRange(
page->AddressToMarkbitIndex(current_top),
page->AddressToMarkbitIndex(current_limit));
- MarkingState::Internal(page).IncrementLiveBytes(
- -static_cast<int>(current_limit - current_top));
+ MarkingState::Internal(page)
+ .IncrementLiveBytes<IncrementalMarking::kAtomicity>(
+ -static_cast<int>(current_limit - current_top));
}
}
@@ -1582,14 +1680,16 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
- if (ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
+ if (ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
+ object, MarkingState::Internal(object))) {
black_size += size;
}
CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
}
- CHECK_LE(black_size, MarkingState::Internal(page).live_bytes());
+ CHECK_LE(black_size,
+ MarkingState::Internal(page).live_bytes<AccessMode::ATOMIC>());
}
CHECK(allocation_pointer_found_in_space);
}
@@ -1601,7 +1701,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
bool NewSpace::SetUp(size_t initial_semispace_capacity,
size_t maximum_semispace_capacity) {
DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
- DCHECK(base::bits::IsPowerOfTwo32(
+ DCHECK(base::bits::IsPowerOfTwo(
static_cast<uint32_t>(maximum_semispace_capacity)));
to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
@@ -1687,7 +1787,6 @@ void NewSpace::Shrink() {
}
bool NewSpace::Rebalance() {
- CHECK(heap()->promotion_queue()->is_empty());
// Order here is important to make use of the page pool.
return to_space_.EnsureCurrentCapacity() &&
from_space_.EnsureCurrentCapacity();
@@ -1783,6 +1882,8 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
+ original_top_.SetValue(top());
+ original_limit_.SetValue(limit());
UpdateInlineAllocationLimit(0);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1830,10 +1931,6 @@ bool NewSpace::AddFreshPage() {
// Clear remainder of current page.
Address limit = Page::FromAllocationAreaAddress(top)->area_end();
- if (heap()->gc_state() == Heap::SCAVENGE) {
- heap()->promotion_queue()->SetNewLimit(limit);
- }
-
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
UpdateAllocationInfo();
@@ -2220,7 +2317,6 @@ void SemiSpace::set_age_mark(Address mark) {
std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
// Use the NewSpace::NewObjectIterator to iterate the ToSpace.
UNREACHABLE();
- return std::unique_ptr<ObjectIterator>();
}
#ifdef DEBUG
@@ -2708,7 +2804,7 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
owner_->EmptyAllocationInfo();
owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- Heap::kNoGCFlags, kNoGCCallbackFlags);
+ Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
@@ -2844,7 +2940,7 @@ size_t FreeListCategory::SumFreeList() {
FreeSpace* cur = top();
while (cur != NULL) {
DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
- sum += cur->nobarrier_size();
+ sum += cur->relaxed_read_size();
cur = cur->next();
}
return sum;
@@ -3146,15 +3242,16 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
}
- heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
- kNoGCCallbackFlags);
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
AllocationStep(object->address(), object_size);
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) {
- ObjectMarking::WhiteToBlack(object, MarkingState::Internal(object));
+ ObjectMarking::WhiteToBlack<IncrementalMarking::kAtomicity>(
+ object, MarkingState::Internal(object));
}
return object;
}
@@ -3246,18 +3343,24 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
}
void LargeObjectSpace::FreeUnmarkedObjects() {
- LargePage* previous = NULL;
+ LargePage* previous = nullptr;
LargePage* current = first_page_;
- while (current != NULL) {
+ while (current != nullptr) {
HeapObject* object = current->GetObject();
DCHECK(!ObjectMarking::IsGrey(object, MarkingState::Internal(object)));
if (ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
Address free_start;
if ((free_start = current->GetAddressToShrink()) != 0) {
- // TODO(hpayer): Perform partial free concurrently.
+ DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start);
RemoveChunkMapEntries(current, free_start);
- heap()->memory_allocator()->PartialFreeMemory(current, free_start);
+ const size_t bytes_to_free =
+ current->size() - (free_start - current->address());
+ heap()->memory_allocator()->PartialFreeMemory(
+ current, free_start, bytes_to_free,
+ current->area_start() + object->Size());
+ size_ -= bytes_to_free;
+ AccountUncommitted(bytes_to_free);
}
previous = current;
current = current->next_page();
@@ -3265,7 +3368,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* page = current;
// Cut the chunk out from the chunk list.
current = current->next_page();
- if (previous == NULL) {
+ if (previous == nullptr) {
first_page_ = current;
} else {
previous->set_next_page(current);
@@ -3326,7 +3429,8 @@ void LargeObjectSpace::Verify() {
CHECK(object->IsAbstractCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsThinString() ||
object->IsFixedArray() || object->IsFixedDoubleArray() ||
- object->IsByteArray() || object->IsFreeSpace());
+ object->IsPropertyArray() || object->IsByteArray() ||
+ object->IsFreeSpace());
// The object itself should look OK.
object->ObjectVerify();
@@ -3349,6 +3453,16 @@ void LargeObjectSpace::Verify() {
CHECK(element_object->map()->IsMap());
}
}
+ } else if (object->IsPropertyArray()) {
+ PropertyArray* array = PropertyArray::cast(object);
+ for (int j = 0; j < array->length(); j++) {
+ Object* property = array->get(j);
+ if (property->IsHeapObject()) {
+ HeapObject* property_object = HeapObject::cast(property);
+ CHECK(heap()->Contains(property_object));
+ CHECK(property_object->map()->IsMap());
+ }
+ }
}
}
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 5c37482ac2..a8394dd486 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -14,8 +14,8 @@
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/hashmap.h"
+#include "src/base/iterator.h"
#include "src/base/platform/mutex.h"
-#include "src/cancelable-task.h"
#include "src/flags.h"
#include "src/globals.h"
#include "src/heap/heap.h"
@@ -237,6 +237,13 @@ class FreeListCategory {
// any heap object.
class MemoryChunk {
public:
+ // Use with std data structures.
+ struct Hasher {
+ size_t operator()(Page* const p) const {
+ return reinterpret_cast<size_t>(p) >> kPageSizeBits;
+ }
+ };
+
enum Flag {
NO_FLAGS = 0u,
IS_EXECUTABLE = 1u << 0,
@@ -434,32 +441,42 @@ class MemoryChunk {
inline Heap* heap() const { return heap_; }
+ Heap* synchronized_heap();
+
inline SkipList* skip_list() { return skip_list_; }
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
- template <RememberedSetType type>
+ template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* slot_set() {
- return slot_set_[type].Value();
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicWord::Acquire_Load(&slot_set_[type]);
+ return slot_set_[type];
}
- template <RememberedSetType type>
+ template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
- return typed_slot_set_[type].Value();
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicWord::Acquire_Load(&typed_slot_set_[type]);
+ return typed_slot_set_[type];
}
- inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
-
template <RememberedSetType type>
SlotSet* AllocateSlotSet();
+ // Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseSlotSet();
template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet();
+ // Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseTypedSlotSet();
+
void AllocateLocalTracker();
void ReleaseLocalTracker();
+ inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
+ bool contains_array_buffers();
+
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
@@ -581,6 +598,10 @@ class MemoryChunk {
base::VirtualMemory* reserved_memory() { return &reservation_; }
+ // Emits a memory barrier. For TSAN builds the other thread needs to perform
+ // MemoryChunk::synchronized_heap() to simulate the barrier.
+ void InitializationMemoryFence();
+
size_t size_;
Flags flags_;
@@ -608,9 +629,8 @@ class MemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
- base::AtomicValue<SlotSet*> slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
- base::AtomicValue<TypedSlotSet*>
- typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
SkipList* skip_list_;
@@ -675,7 +695,7 @@ class MarkingState {
MarkingState(Bitmap* bitmap, intptr_t* live_bytes)
: bitmap_(bitmap), live_bytes_(live_bytes) {}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
inline void IncrementLiveBytes(intptr_t by) const;
void SetLiveBytes(intptr_t value) const {
@@ -688,7 +708,9 @@ class MarkingState {
}
Bitmap* bitmap() const { return bitmap_; }
- intptr_t live_bytes() const { return *live_bytes_; }
+
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
+ inline intptr_t live_bytes() const;
private:
Bitmap* bitmap_;
@@ -696,19 +718,29 @@ class MarkingState {
};
template <>
-inline void MarkingState::IncrementLiveBytes<MarkBit::NON_ATOMIC>(
+inline void MarkingState::IncrementLiveBytes<AccessMode::NON_ATOMIC>(
intptr_t by) const {
*live_bytes_ += by;
}
template <>
-inline void MarkingState::IncrementLiveBytes<MarkBit::ATOMIC>(
+inline void MarkingState::IncrementLiveBytes<AccessMode::ATOMIC>(
intptr_t by) const {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Increment(by);
}
+template <>
+inline intptr_t MarkingState::live_bytes<AccessMode::NON_ATOMIC>() const {
+ return *live_bytes_;
+}
+
+template <>
+inline intptr_t MarkingState::live_bytes<AccessMode::ATOMIC>() const {
+ return reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Value();
+}
+
// -----------------------------------------------------------------------------
-// A page is a memory chunk of a size 1MB. Large object pages may be larger.
+// A page is a memory chunk of a size 512K. Large object pages may be larger.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
@@ -722,8 +754,6 @@ class Page : public MemoryChunk {
static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- static inline Page* ConvertNewToOld(Page* old_page);
-
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
// is in fact in a page.
@@ -754,6 +784,8 @@ class Page : public MemoryChunk {
kObjectStartOffset;
}
+ static Page* ConvertNewToOld(Page* old_page);
+
inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
// Create a Page object that is only used as anchor for the doubly-linked
@@ -835,10 +867,10 @@ class Page : public MemoryChunk {
enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
template <InitializationMode mode = kFreeMemory>
- static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable, PagedSpace* owner);
- static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable, SemiSpace* owner);
+ static Page* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, PagedSpace* owner);
+ static Page* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, SemiSpace* owner);
inline void InitializeFreeListCategories();
@@ -870,8 +902,8 @@ class LargePage : public MemoryChunk {
static const int kMaxCodePageSize = 512 * MB;
private:
- static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable, Space* owner);
+ static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, Space* owner);
friend class MemoryAllocator;
};
@@ -954,6 +986,8 @@ class Space : public Malloced {
committed_ -= bytes;
}
+ V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
+
#ifdef DEBUG
virtual void Print() = 0;
#endif
@@ -1150,9 +1184,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
public:
class UnmapFreeMemoryTask;
- Unmapper(Heap* heap, MemoryAllocator* allocator)
- : heap_(heap),
- allocator_(allocator),
+ explicit Unmapper(MemoryAllocator* allocator)
+ : allocator_(allocator),
pending_unmapping_tasks_semaphore_(0),
concurrent_unmapping_tasks_active_(0) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
@@ -1186,14 +1219,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
}
void FreeQueuedChunks();
- void WaitUntilCompleted();
+ bool WaitUntilCompleted();
void TearDown();
bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
private:
static const int kReservedQueueingSlots = 64;
- static const int kMaxUnmapperTasks = 24;
enum ChunkQueueType {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
@@ -1232,15 +1264,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
- Heap* const heap_;
- MemoryAllocator* const allocator_;
base::Mutex mutex_;
+ MemoryAllocator* allocator_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk.
std::list<MemoryChunk*> delayed_regular_chunks_;
- CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_;
@@ -1332,20 +1362,24 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space);
- void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
-
- Address ReserveAlignedMemory(size_t requested, size_t alignment,
+ Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
base::VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable,
- base::VirtualMemory* controller);
+ void* hint, base::VirtualMemory* controller);
bool CommitMemory(Address addr, size_t size, Executability executable);
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
- void PartialFreeMemory(MemoryChunk* chunk, Address start_free);
void FreeMemory(Address addr, size_t size, Executability executable);
+ // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
+ // internally memory is freed from |start_free| to the end of the reservation.
+ // Additional memory beyond the page is not accounted though, so
+ // |bytes_to_free| is computed by the caller.
+ void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
+ size_t bytes_to_free, Address new_area_end);
+
// Commit a contiguous block of memory from the initial chunk. Assumes that
// the address is not NULL, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
@@ -1460,7 +1494,7 @@ class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
template <class PAGE_TYPE>
class PageIteratorImpl
- : public std::iterator<std::forward_iterator_tag, PAGE_TYPE> {
+ : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
public:
explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
@@ -1540,22 +1574,14 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
// space.
class AllocationInfo {
public:
- AllocationInfo() : original_top_(nullptr), top_(nullptr), limit_(nullptr) {}
- AllocationInfo(Address top, Address limit)
- : original_top_(top), top_(top), limit_(limit) {}
+ AllocationInfo() : top_(nullptr), limit_(nullptr) {}
+ AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
void Reset(Address top, Address limit) {
- original_top_ = top;
set_top(top);
set_limit(limit);
}
- Address original_top() {
- SLOW_DCHECK(top_ == NULL ||
- (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
- return original_top_;
- }
-
INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL ||
(reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
@@ -1589,8 +1615,6 @@ class AllocationInfo {
#endif
private:
- // The original top address when the allocation info was initialized.
- Address original_top_;
// Current allocation top.
Address top_;
// Current allocation limit.
@@ -2341,14 +2365,12 @@ class SemiSpace : public Space {
size_t Size() override {
UNREACHABLE();
- return 0;
}
size_t SizeOfObjects() override { return Size(); }
size_t Available() override {
UNREACHABLE();
- return 0;
}
iterator begin() { return iterator(anchor_.next_page()); }
@@ -2440,10 +2462,10 @@ class NewSpace : public Space {
explicit NewSpace(Heap* heap)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+ top_on_previous_step_(0),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- top_on_previous_step_(0),
allocated_histogram_(nullptr),
promoted_histogram_(nullptr) {}
@@ -2577,6 +2599,10 @@ class NewSpace : public Space {
return allocation_info_.limit();
}
+ Address original_top() { return original_top_.Value(); }
+
+ Address original_limit() { return original_limit_.Value(); }
+
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
@@ -2707,16 +2733,20 @@ class NewSpace : public Space {
base::Mutex mutex_;
+ // Allocation pointer and limit for normal allocation and allocation during
+ // mark-compact collection.
+ AllocationInfo allocation_info_;
+ Address top_on_previous_step_;
+ // The top and the limit at the time of setting the allocation info.
+ // These values can be accessed by background tasks.
+ base::AtomicValue<Address> original_top_;
+ base::AtomicValue<Address> original_limit_;
+
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
base::VirtualMemory reservation_;
- // Allocation pointer and limit for normal allocation and allocation during
- // mark-compact collection.
- AllocationInfo allocation_info_;
-
- Address top_on_previous_step_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
@@ -2785,7 +2815,6 @@ class CompactionSpaceCollection : public Malloced {
UNREACHABLE();
}
UNREACHABLE();
- return nullptr;
}
private:
@@ -2824,7 +2853,7 @@ class MapSpace : public PagedSpace {
: PagedSpace(heap, id, NOT_EXECUTABLE) {}
int RoundSizeDownToObjectAlignment(int size) override {
- if (base::bits::IsPowerOfTwo32(Map::kSize)) {
+ if (base::bits::IsPowerOfTwo(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
return (size / Map::kSize) * Map::kSize;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index b803b10d06..31333e6437 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -35,7 +35,8 @@ void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of
// the area.
- virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
+ virtual_memory_ =
+ new base::VirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr());
uintptr_t start_as_int =
reinterpret_cast<uintptr_t>(virtual_memory_->address());
start_[0] =
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
new file mode 100644
index 0000000000..b6856b4849
--- /dev/null
+++ b/deps/v8/src/heap/worklist.h
@@ -0,0 +1,354 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_WORKLIST_
+#define V8_HEAP_WORKLIST_
+
+#include <cstddef>
+#include <vector>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
+
+namespace v8 {
+namespace internal {
+
+// A concurrent worklist based on segments. Each tasks gets private
+// push and pop segments. Empty pop segments are swapped with their
+// corresponding push segments. Full push segments are published to a global
+// pool of segments and replaced with empty segments.
+//
+// Work stealing is best effort, i.e., there is no way to inform other tasks
+// of the need of items.
+template <typename EntryType, int SEGMENT_SIZE>
+class Worklist {
+ public:
+ class View {
+ public:
+ View(Worklist<EntryType, SEGMENT_SIZE>* worklist, int task_id)
+ : worklist_(worklist), task_id_(task_id) {}
+
+ // Pushes an entry onto the worklist.
+ bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
+
+ // Pops an entry from the worklist.
+ bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
+
+ // Returns true if the local portion of the worklist is empty.
+ bool IsLocalEmpty() { return worklist_->IsLocalEmpty(task_id_); }
+
+ // Returns true if the worklist is empty. Can only be used from the main
+ // thread without concurrent access.
+ bool IsGlobalEmpty() { return worklist_->IsGlobalEmpty(); }
+
+ bool IsGlobalPoolEmpty() { return worklist_->IsGlobalPoolEmpty(); }
+
+ size_t LocalPushSegmentSize() {
+ return worklist_->LocalPushSegmentSize(task_id_);
+ }
+
+ private:
+ Worklist<EntryType, SEGMENT_SIZE>* worklist_;
+ int task_id_;
+ };
+
+ static const int kMaxNumTasks = 8;
+ static const int kSegmentCapacity = SEGMENT_SIZE;
+
+ Worklist() : Worklist(kMaxNumTasks) {}
+
+ explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_push_segment(i) = new Segment();
+ private_pop_segment(i) = new Segment();
+ }
+ }
+
+ ~Worklist() {
+ CHECK(IsGlobalEmpty());
+ for (int i = 0; i < num_tasks_; i++) {
+ DCHECK_NOT_NULL(private_push_segment(i));
+ DCHECK_NOT_NULL(private_pop_segment(i));
+ delete private_push_segment(i);
+ delete private_pop_segment(i);
+ }
+ }
+
+ bool Push(int task_id, EntryType entry) {
+ DCHECK_LT(task_id, num_tasks_);
+ DCHECK_NOT_NULL(private_push_segment(task_id));
+ if (!private_push_segment(task_id)->Push(entry)) {
+ PublishPushSegmentToGlobal(task_id);
+ bool success = private_push_segment(task_id)->Push(entry);
+ USE(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ bool Pop(int task_id, EntryType* entry) {
+ DCHECK_LT(task_id, num_tasks_);
+ DCHECK_NOT_NULL(private_pop_segment(task_id));
+ if (!private_pop_segment(task_id)->Pop(entry)) {
+ if (!private_push_segment(task_id)->IsEmpty()) {
+ Segment* tmp = private_pop_segment(task_id);
+ private_pop_segment(task_id) = private_push_segment(task_id);
+ private_push_segment(task_id) = tmp;
+ } else if (!StealPopSegmentFromGlobal(task_id)) {
+ return false;
+ }
+ bool success = private_pop_segment(task_id)->Pop(entry);
+ USE(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ size_t LocalPushSegmentSize(int task_id) {
+ return private_push_segment(task_id)->Size();
+ }
+
+ bool IsLocalEmpty(int task_id) {
+ return private_pop_segment(task_id)->IsEmpty() &&
+ private_push_segment(task_id)->IsEmpty();
+ }
+
+ bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); }
+
+ bool IsGlobalEmpty() {
+ for (int i = 0; i < num_tasks_; i++) {
+ if (!IsLocalEmpty(i)) return false;
+ }
+ return global_pool_.IsEmpty();
+ }
+
+ size_t LocalSize(int task_id) {
+ return private_pop_segment(task_id)->Size() +
+ private_push_segment(task_id)->Size();
+ }
+
+ // Clears all segments. Frees the global segment pool.
+ //
+ // Assumes that no other tasks are running.
+ void Clear() {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Clear();
+ private_push_segment(i)->Clear();
+ }
+ global_pool_.Clear();
+ }
+
+ // Calls the specified callback on each element of the deques and replaces
+ // the element with the result of the callback.
+ // The signature of the callback is
+ // bool Callback(EntryType old, EntryType* new).
+ // If the callback returns |false| then the element is removed from the
+ // worklist. Otherwise the |new| entry is updated.
+ //
+ // Assumes that no other tasks are running.
+ template <typename Callback>
+ void Update(Callback callback) {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Update(callback);
+ private_push_segment(i)->Update(callback);
+ }
+ global_pool_.Update(callback);
+ }
+
+ template <typename Callback>
+ void IterateGlobalPool(Callback callback) {
+ global_pool_.Iterate(callback);
+ }
+
+ void FlushToGlobal(int task_id) {
+ PublishPushSegmentToGlobal(task_id);
+ PublishPopSegmentToGlobal(task_id);
+ }
+
+ private:
+ FRIEND_TEST(WorkListTest, SegmentCreate);
+ FRIEND_TEST(WorkListTest, SegmentPush);
+ FRIEND_TEST(WorkListTest, SegmentPushPop);
+ FRIEND_TEST(WorkListTest, SegmentIsEmpty);
+ FRIEND_TEST(WorkListTest, SegmentIsFull);
+ FRIEND_TEST(WorkListTest, SegmentClear);
+ FRIEND_TEST(WorkListTest, SegmentFullPushFails);
+ FRIEND_TEST(WorkListTest, SegmentEmptyPopFails);
+ FRIEND_TEST(WorkListTest, SegmentUpdateFalse);
+ FRIEND_TEST(WorkListTest, SegmentUpdate);
+
+ class Segment {
+ public:
+ static const int kCapacity = kSegmentCapacity;
+
+ Segment() : index_(0) {}
+
+ bool Push(EntryType entry) {
+ if (IsFull()) return false;
+ entries_[index_++] = entry;
+ return true;
+ }
+
+ bool Pop(EntryType* entry) {
+ if (IsEmpty()) return false;
+ *entry = entries_[--index_];
+ return true;
+ }
+
+ size_t Size() const { return index_; }
+ bool IsEmpty() const { return index_ == 0; }
+ bool IsFull() const { return index_ == kCapacity; }
+ void Clear() { index_ = 0; }
+
+ template <typename Callback>
+ void Update(Callback callback) {
+ size_t new_index = 0;
+ for (size_t i = 0; i < index_; i++) {
+ if (callback(entries_[i], &entries_[new_index])) {
+ new_index++;
+ }
+ }
+ index_ = new_index;
+ }
+
+ template <typename Callback>
+ void Iterate(Callback callback) const {
+ for (size_t i = 0; i < index_; i++) {
+ callback(entries_[i]);
+ }
+ }
+
+ Segment* next() const { return next_; }
+ void set_next(Segment* segment) { next_ = segment; }
+
+ private:
+ Segment* next_;
+ size_t index_;
+ EntryType entries_[kCapacity];
+ };
+
+ struct PrivateSegmentHolder {
+ Segment* private_push_segment;
+ Segment* private_pop_segment;
+ char cache_line_padding[64];
+ };
+
+ class GlobalPool {
+ public:
+ GlobalPool() : top_(nullptr) {}
+
+ V8_INLINE void Push(Segment* segment) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ segment->set_next(top_);
+ top_ = segment;
+ }
+
+ V8_INLINE bool Pop(Segment** segment) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (top_ != nullptr) {
+ *segment = top_;
+ top_ = top_->next();
+ return true;
+ }
+ return false;
+ }
+
+ V8_INLINE bool IsEmpty() {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ return top_ == nullptr;
+ }
+
+ void Clear() {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ Segment* current = top_;
+ while (current != nullptr) {
+ Segment* tmp = current;
+ current = current->next();
+ delete tmp;
+ }
+ top_ = nullptr;
+ }
+
+ // See Worklist::Update.
+ template <typename Callback>
+ void Update(Callback callback) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ Segment* prev = nullptr;
+ Segment* current = top_;
+ while (current != nullptr) {
+ current->Update(callback);
+ if (current->IsEmpty()) {
+ if (prev == nullptr) {
+ top_ = current->next();
+ } else {
+ prev->set_next(current->next());
+ }
+ Segment* tmp = current;
+ current = current->next();
+ delete tmp;
+ } else {
+ prev = current;
+ current = current->next();
+ }
+ }
+ }
+
+ // See Worklist::Iterate.
+ template <typename Callback>
+ void Iterate(Callback callback) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ for (Segment* current = top_; current != nullptr;
+ current = current->next()) {
+ current->Iterate(callback);
+ }
+ }
+
+ private:
+ base::Mutex lock_;
+ Segment* top_;
+ };
+
+ V8_INLINE Segment*& private_push_segment(int task_id) {
+ return private_segments_[task_id].private_push_segment;
+ }
+
+ V8_INLINE Segment*& private_pop_segment(int task_id) {
+ return private_segments_[task_id].private_pop_segment;
+ }
+
+ V8_INLINE void PublishPushSegmentToGlobal(int task_id) {
+ if (!private_push_segment(task_id)->IsEmpty()) {
+ global_pool_.Push(private_push_segment(task_id));
+ private_push_segment(task_id) = new Segment();
+ }
+ }
+
+ V8_INLINE void PublishPopSegmentToGlobal(int task_id) {
+ if (!private_pop_segment(task_id)->IsEmpty()) {
+ global_pool_.Push(private_pop_segment(task_id));
+ private_pop_segment(task_id) = new Segment();
+ }
+ }
+
+ V8_INLINE bool StealPopSegmentFromGlobal(int task_id) {
+ Segment* new_segment = nullptr;
+ if (global_pool_.Pop(&new_segment)) {
+ delete private_pop_segment(task_id);
+ private_pop_segment(task_id) = new_segment;
+ return true;
+ }
+ return false;
+ }
+
+ PrivateSegmentHolder private_segments_[kMaxNumTasks];
+ GlobalPool global_pool_;
+ int num_tasks_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_WORKSTEALING_BAG_
diff --git a/deps/v8/src/heap/workstealing-marking-deque.h b/deps/v8/src/heap/workstealing-marking-deque.h
deleted file mode 100644
index 1a3dc865e4..0000000000
--- a/deps/v8/src/heap/workstealing-marking-deque.h
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_WORKSTEALING_MARKING_DEQUE_
-#define V8_HEAP_WORKSTEALING_MARKING_DEQUE_
-
-#include <cstddef>
-
-#include "src/base/logging.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace internal {
-
-class HeapObject;
-
-class StackSegment {
- public:
- static const int kNumEntries = 64;
-
- StackSegment(StackSegment* next, StackSegment* prev)
- : next_(next), prev_(prev), index_(0) {}
-
- bool Push(HeapObject* object) {
- if (IsFull()) return false;
-
- objects_[index_++] = object;
- return true;
- }
-
- bool Pop(HeapObject** object) {
- if (IsEmpty()) return false;
-
- *object = objects_[--index_];
- return true;
- }
-
- size_t Size() { return index_; }
- bool IsEmpty() { return index_ == 0; }
- bool IsFull() { return index_ == kNumEntries; }
- void Clear() { index_ = 0; }
-
- StackSegment* next() { return next_; }
- StackSegment* prev() { return prev_; }
- void set_next(StackSegment* next) { next_ = next; }
- void set_prev(StackSegment* prev) { prev_ = prev; }
-
- void Unlink() {
- if (next() != nullptr) next()->set_prev(prev());
- if (prev() != nullptr) prev()->set_next(next());
- }
-
- private:
- StackSegment* next_;
- StackSegment* prev_;
- size_t index_;
- HeapObject* objects_[kNumEntries];
-};
-
-class SegmentedStack {
- public:
- SegmentedStack()
- : front_(new StackSegment(nullptr, nullptr)), back_(front_) {}
-
- ~SegmentedStack() {
- CHECK(IsEmpty());
- delete front_;
- }
-
- bool Push(HeapObject* object) {
- if (!front_->Push(object)) {
- NewFront();
- bool success = front_->Push(object);
- USE(success);
- DCHECK(success);
- }
- return true;
- }
-
- bool Pop(HeapObject** object) {
- if (!front_->Pop(object)) {
- if (IsEmpty()) return false;
- DeleteFront();
- bool success = front_->Pop(object);
- USE(success);
- DCHECK(success);
- }
- return object;
- }
-
- bool IsEmpty() { return front_ == back_ && front_->IsEmpty(); }
-
- private:
- void NewFront() {
- StackSegment* s = new StackSegment(front_, nullptr);
- front_->set_prev(s);
- front_ = s;
- }
-
- void DeleteFront() { delete Unlink(front_); }
-
- StackSegment* Unlink(StackSegment* segment) {
- CHECK_NE(front_, back_);
- if (segment == front_) front_ = front_->next();
- if (segment == back_) back_ = back_->prev();
- segment->Unlink();
- return segment;
- }
-
- StackSegment* front_;
- StackSegment* back_;
-};
-
-// TODO(mlippautz): Implement actual work stealing.
-class WorkStealingMarkingDeque {
- public:
- static const int kMaxNumTasks = 4;
-
- bool Push(int task_id, HeapObject* object) {
- DCHECK_LT(task_id, kMaxNumTasks);
- return private_stacks_[task_id].Push(object);
- }
-
- bool Pop(int task_id, HeapObject** object) {
- DCHECK_LT(task_id, kMaxNumTasks);
- return private_stacks_[task_id].Pop(object);
- }
-
- bool IsLocalEmpty(int task_id) { return private_stacks_[task_id].IsEmpty(); }
-
- private:
- SegmentedStack private_stacks_[kMaxNumTasks];
-};
-
-class LocalWorkStealingMarkingDeque {
- public:
- LocalWorkStealingMarkingDeque(WorkStealingMarkingDeque* deque, int task_id)
- : deque_(deque), task_id_(task_id) {}
-
- // Pushes an object onto the marking deque.
- bool Push(HeapObject* object) { return deque_->Push(task_id_, object); }
-
- // Pops an object onto the marking deque.
- bool Pop(HeapObject** object) { return deque_->Pop(task_id_, object); }
-
- // Returns true if the local portion of the marking deque is empty.
- bool IsEmpty() { return deque_->IsLocalEmpty(task_id_); }
-
- // Blocks if there are no more objects available. Returns execution with
- // |true| once new objects are available and |false| otherwise.
- bool WaitForMoreObjects() {
- // Return false once the local portion of the marking deque is drained.
- // TODO(mlippautz): Implement a barrier that can be used to synchronize
- // work stealing and emptiness.
- return !IsEmpty();
- }
-
- private:
- WorkStealingMarkingDeque* deque_;
- int task_id_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_WORKSTEALING_MARKING_DEQUE_