summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/mark-compact.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/mark-compact.cc')
-rw-r--r--deps/v8/src/heap/mark-compact.cc1599
1 files changed, 960 insertions, 639 deletions
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 6f46bc57bf..0a6b5ed000 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -8,7 +8,6 @@
#include "src/base/utils/random-number-generator.h"
#include "src/cancelable-task.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
@@ -28,7 +27,11 @@
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
#include "src/ic/stub-cache.h"
+#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-objects-inl.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/slots-inl.h"
#include "src/transitions-inl.h"
#include "src/utils-inl.h"
#include "src/v8.h"
@@ -44,7 +47,7 @@ const char* Marking::kImpossibleBitPattern = "01";
// The following has to hold in order for {MarkingState::MarkBitFrom} to not
// produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
-STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
+STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
// =============================================================================
// Verifiers
@@ -62,31 +65,34 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
- virtual void VerifyPointers(Object** start, Object** end) = 0;
- virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
+ virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
+ virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
+ virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
- virtual bool IsMarked(HeapObject* object) = 0;
+ virtual bool IsMarked(HeapObject object) = 0;
- virtual bool IsBlackOrGrey(HeapObject* object) = 0;
+ virtual bool IsBlackOrGrey(HeapObject object) = 0;
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- VerifyPointers(start, end);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ VerifyRootPointers(start, end);
}
void VerifyRoots(VisitMode mode);
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
void VerifyMarking(NewSpace* new_space);
void VerifyMarking(PagedSpace* paged_space);
+ void VerifyMarking(LargeObjectSpace* lo_space);
Heap* heap_;
};
@@ -97,7 +103,7 @@ void MarkingVerifier::VerifyRoots(VisitMode mode) {
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
Address end) {
- HeapObject* object;
+ HeapObject object;
Address next_object_must_be_here_or_later = start;
for (Address current = start; current < end;) {
object = HeapObject::FromAddress(current);
@@ -115,11 +121,11 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
page->AddressToMarkbitIndex(current),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
bitmap(page)->AllBitsClearInRange(
- page->AddressToMarkbitIndex(current + kPointerSize * 2),
+ page->AddressToMarkbitIndex(current + kTaggedSize * 2),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
current = next_object_must_be_here_or_later;
} else {
- current += kPointerSize;
+ current += kTaggedSize;
}
}
}
@@ -146,6 +152,15 @@ void MarkingVerifier::VerifyMarking(PagedSpace* space) {
}
}
+void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
+ LargeObjectIterator it(lo_space);
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
+ if (IsBlackOrGrey(obj)) {
+ obj->Iterate(this);
+ }
+ }
+}
+
class FullMarkingVerifier : public MarkingVerifier {
public:
explicit FullMarkingVerifier(Heap* heap)
@@ -159,13 +174,8 @@ class FullMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->old_space());
VerifyMarking(heap_->code_space());
VerifyMarking(heap_->map_space());
-
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
- if (marking_state_->IsBlackOrGrey(obj)) {
- obj->Iterate(this);
- }
- }
+ VerifyMarking(heap_->lo_space());
+ VerifyMarking(heap_->code_lo_space());
}
protected:
@@ -173,41 +183,55 @@ class FullMarkingVerifier : public MarkingVerifier {
return marking_state_->bitmap(chunk);
}
- bool IsMarked(HeapObject* object) override {
+ bool IsMarked(HeapObject object) override {
return marking_state_->IsBlack(object);
}
- bool IsBlackOrGrey(HeapObject* object) override {
+ bool IsBlackOrGrey(HeapObject object) override {
return marking_state_->IsBlackOrGrey(object);
}
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK(marking_state_->IsBlackOrGrey(object));
- }
- }
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ VerifyPointersImpl(start, end);
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
- HeapObject* object;
- if ((*current)->GetHeapObjectIfStrong(&object)) {
- CHECK(marking_state_->IsBlackOrGrey(object));
- }
- }
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+
+ void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
+ VerifyPointersImpl(start, end);
}
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+ }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
if (!host->IsWeakObject(rinfo->target_object())) {
- Object* p = rinfo->target_object();
- VisitPointer(host, &p);
+ HeapObject object = rinfo->target_object();
+ VerifyHeapObjectImpl(object);
}
}
private:
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
+ CHECK(marking_state_->IsBlackOrGrey(heap_object));
+ }
+
+ template <typename TSlot>
+ V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = *slot;
+ HeapObject heap_object;
+ if (object.GetHeapObjectIfStrong(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
+ }
+ }
+ }
+
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
@@ -215,18 +239,19 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
public:
virtual void Run() = 0;
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- VerifyPointers(start, end);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ VerifyRootPointers(start, end);
}
protected:
@@ -234,8 +259,9 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
inline Heap* heap() { return heap_; }
- virtual void VerifyPointers(Object** start, Object** end) = 0;
- virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
+ virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
+ virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
+ virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
void VerifyRoots(VisitMode mode);
void VerifyEvacuationOnPage(Address start, Address end);
@@ -252,7 +278,7 @@ void EvacuationVerifier::VerifyRoots(VisitMode mode) {
void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
Address current = start;
while (current < end) {
- HeapObject* object = HeapObject::FromAddress(current);
+ HeapObject object = HeapObject::FromAddress(current);
if (!object->IsFiller()) object->Iterate(this);
current += object->Size();
}
@@ -295,27 +321,37 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
protected:
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- if (Heap::InNewSpace(object)) {
- CHECK(Heap::InToSpace(object));
- }
- CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
+ CHECK_IMPLIES(Heap::InNewSpace(heap_object), Heap::InToSpace(heap_object));
+ CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
+ }
+
+ template <typename TSlot>
+ void VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot current = start; current < end; ++current) {
+ typename TSlot::TObject object = *current;
+ HeapObject heap_object;
+ if (object.GetHeapObjectIfStrong(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
}
}
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
- HeapObject* object;
- if ((*current)->GetHeapObjectIfStrong(&object)) {
- if (Heap::InNewSpace(object)) {
- CHECK(Heap::InToSpace(object));
- }
- CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
- }
- }
+
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ VerifyHeapObjectImpl(rinfo->target_object());
+ }
+ void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
+ VerifyPointersImpl(start, end);
}
};
@@ -474,7 +510,8 @@ void MarkCompactCollector::CollectGarbage() {
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
HeapObjectIterator iterator(space);
- while (HeapObject* object = iterator.Next()) {
+ for (HeapObject object = iterator.Next(); !object.is_null();
+ object = iterator.Next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
}
}
@@ -494,6 +531,14 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
}
}
+void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
+ LargeObjectIterator it(space);
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
+ CHECK(non_atomic_marking_state()->IsWhite(obj));
+ CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
+ MemoryChunk::FromHeapObject(obj)));
+ }
+}
void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_space());
@@ -503,13 +548,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
// Read-only space should always be black since we never collect any objects
// in it or linked from it.
VerifyMarkbitsAreDirty(heap_->read_only_space());
-
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
- CHECK(non_atomic_marking_state()->IsWhite(obj));
- CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
- MemoryChunk::FromAddress(obj->address())));
- }
+ VerifyMarkbitsAreClean(heap_->lo_space());
+ VerifyMarkbitsAreClean(heap_->code_lo_space());
+ VerifyMarkbitsAreClean(heap_->new_lo_space());
}
#endif // VERIFY_HEAP
@@ -753,7 +794,7 @@ void MarkCompactCollector::Prepare() {
heap()->memory_allocator()->unmapper()->PrepareForMarkCompact();
if (!was_marked_incrementally_) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
heap_->local_embedder_heap_tracer()->TracePrologue();
}
@@ -779,9 +820,12 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::FinishConcurrentMarking(
ConcurrentMarking::StopRequest stop_request) {
- if (FLAG_concurrent_marking) {
+ // FinishConcurrentMarking is called for both, concurrent and parallel,
+ // marking. It is safe to call this function when tasks are already finished.
+ if (FLAG_parallel_marking || FLAG_concurrent_marking) {
heap()->concurrent_marking()->Stop(stop_request);
- heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
+ heap()->concurrent_marking()->FlushMemoryChunkData(
+ non_atomic_marking_state());
}
}
@@ -806,6 +850,8 @@ void MarkCompactCollector::VerifyMarking() {
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
+ epoch_++;
+
#ifdef DEBUG
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
@@ -819,6 +865,7 @@ void MarkCompactCollector::Finish() {
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
+ heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
@@ -845,17 +892,18 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
explicit RootMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitRootPointer(Root root, const char* description, Object** p) final {
+ void VisitRootPointer(Root root, const char* description,
+ FullObjectSlot p) final {
MarkObjectByPointer(root, p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(root, p);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) final {
+ for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
}
private:
- V8_INLINE void MarkObjectByPointer(Root root, Object** p) {
+ V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
if (!(*p)->IsHeapObject()) return;
collector_->MarkRootObject(root, HeapObject::cast(*p));
@@ -879,27 +927,34 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitPointer(HeapObject* host, Object** p) final {
+ void VisitPointer(HeapObject host, ObjectSlot p) final {
MarkObject(host, *p);
}
- void VisitPointers(HeapObject* host, Object** start, Object** end) final {
- for (Object** p = start; p < end; p++) {
+ void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
+ for (ObjectSlot p = start; p < end; ++p) {
DCHECK(!HasWeakHeapObjectTag(*p));
MarkObject(host, *p);
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
// At the moment, custom roots cannot contain weak pointers.
UNREACHABLE();
}
// VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ MarkObject(host, target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ MarkObject(host, rinfo->target_object());
+ }
private:
- void MarkObject(HeapObject* host, Object* object) {
+ V8_INLINE void MarkObject(HeapObject host, Object object) {
if (!object->IsHeapObject()) return;
collector_->MarkObject(host, HeapObject::cast(object));
}
@@ -909,22 +964,23 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
class InternalizedStringTableCleaner : public ObjectVisitor {
public:
- InternalizedStringTableCleaner(Heap* heap, HeapObject* table)
+ InternalizedStringTableCleaner(Heap* heap, HeapObject table)
: heap_(heap), pointers_removed_(0), table_(table) {}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
- Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
+ Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
- for (Object** p = start; p < end; p++) {
- Object* o = *p;
+ for (ObjectSlot p = start; p < end; ++p) {
+ Object o = *p;
if (o->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(o);
+ HeapObject heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
pointers_removed_++;
// Set the entry to the_hole_value (as deleted).
- *p = the_hole;
+ p.store(the_hole);
} else {
// StringTable contains only old space strings.
DCHECK(!Heap::InNewSpace(o));
@@ -934,8 +990,14 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ UNREACHABLE();
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
UNREACHABLE();
}
@@ -946,32 +1008,32 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
private:
Heap* heap_;
int pointers_removed_;
- HeapObject* table_;
+ HeapObject table_;
};
class ExternalStringTableCleaner : public RootVisitor {
public:
explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
- Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
- for (Object** p = start; p < end; p++) {
- Object* o = *p;
+ Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
+ for (FullObjectSlot p = start; p < end; ++p) {
+ Object o = *p;
if (o->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(o);
+ HeapObject heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
if (o->IsExternalString()) {
- heap_->FinalizeExternalString(String::cast(*p));
+ heap_->FinalizeExternalString(String::cast(o));
} else {
// The original external string may have been internalized.
DCHECK(o->IsThinString());
}
// Set the entry to the_hole_value (as deleted).
- *p = the_hole;
+ p.store(the_hole);
}
}
}
@@ -989,8 +1051,8 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
MarkCompactCollector::NonAtomicMarkingState* marking_state)
: marking_state_(marking_state) {}
- Object* RetainAs(Object* object) override {
- HeapObject* heap_object = HeapObject::cast(object);
+ Object RetainAs(Object object) override {
+ HeapObject heap_object = HeapObject::cast(object);
DCHECK(!marking_state_->IsGrey(heap_object));
if (marking_state_->IsBlack(heap_object)) {
return object;
@@ -999,9 +1061,9 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
// "dead" AllocationSites need to live long enough for a traversal of new
// space. These sites get a one-time reprieve.
- Object* nested = object;
+ Object nested = object;
while (nested->IsAllocationSite()) {
- AllocationSite* current_site = AllocationSite::cast(nested);
+ AllocationSite current_site = AllocationSite::cast(nested);
// MarkZombie will override the nested_site, read it first before
// marking
nested = current_site->nested_site();
@@ -1011,7 +1073,7 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
return object;
} else {
- return nullptr;
+ return Object();
}
}
@@ -1024,69 +1086,68 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- inline void VisitPointer(HeapObject* host, Object** p) final {
+ inline void VisitPointer(HeapObject host, ObjectSlot p) final {
DCHECK(!HasWeakHeapObjectTag(*p));
- RecordMigratedSlot(host, reinterpret_cast<MaybeObject*>(*p),
- reinterpret_cast<Address>(p));
+ RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
}
- inline void VisitPointer(HeapObject* host, MaybeObject** p) final {
- RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p));
+ inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
+ RecordMigratedSlot(host, *p, p.address());
}
- inline void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
+ inline void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
- inline void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
- inline void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
+ inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
DCHECK(!Heap::InNewSpace(target));
collector_->RecordRelocSlot(host, rinfo, target);
}
- inline void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
+ inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- HeapObject* object = HeapObject::cast(rinfo->target_object());
+ HeapObject object = HeapObject::cast(rinfo->target_object());
GenerationalBarrierForCode(host, rinfo, object);
collector_->RecordRelocSlot(host, rinfo, object);
}
// Entries that are skipped for recording.
- inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {}
- inline void VisitExternalReference(Foreign* host, Address* p) final {}
- inline void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) final {}
- inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {}
+ inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
+ inline void VisitExternalReference(Foreign host, Address* p) final {}
+ inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
+ inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
protected:
- inline virtual void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
+ inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) {
if (value->IsStrongOrWeak()) {
- Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+ Page* p = Page::FromAddress(value.ptr());
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
- Page::FromAddress(slot), slot);
+ MemoryChunk::FromHeapObject(host), slot);
} else if (p->IsEvacuationCandidate()) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
- Page::FromAddress(slot), slot);
+ MemoryChunk::FromHeapObject(host), slot);
}
}
}
@@ -1099,7 +1160,7 @@ class MigrationObserver {
explicit MigrationObserver(Heap* heap) : heap_(heap) {}
virtual ~MigrationObserver() = default;
- virtual void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ virtual void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) = 0;
protected:
@@ -1110,7 +1171,7 @@ class ProfilingMigrationObserver final : public MigrationObserver {
public:
explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
- inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) final {
if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
PROFILE(heap_->isolate(),
@@ -1123,7 +1184,7 @@ class ProfilingMigrationObserver final : public MigrationObserver {
class HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() = default;
- virtual bool Visit(HeapObject* object, int size) = 0;
+ virtual bool Visit(HeapObject object, int size) = 0;
};
class EvacuateVisitorBase : public HeapObjectVisitor {
@@ -1136,21 +1197,21 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
protected:
enum MigrationMode { kFast, kObserved };
- typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject* dst,
- HeapObject* src, int size,
+ typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject dst,
+ HeapObject src, int size,
AllocationSpace dest);
template <MigrationMode mode>
- static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject* dst,
- HeapObject* src, int size,
- AllocationSpace dest) {
+ static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
+ HeapObject src, int size, AllocationSpace dest) {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
- DCHECK(dest != LO_SPACE);
+ DCHECK_NE(dest, LO_SPACE);
+ DCHECK_NE(dest, CODE_LO_SPACE);
if (dest == OLD_SPACE) {
DCHECK_OBJECT_SIZE(size);
- DCHECK(IsAligned(size, kPointerSize));
+ DCHECK(IsAligned(size, kTaggedSize));
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
@@ -1169,8 +1230,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
- base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
- static_cast<base::AtomicWord>(dst_addr));
+ src->set_map_word(MapWord::FromForwardingAddress(dst));
}
EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
@@ -1181,9 +1241,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
migration_function_ = RawMigrateObject<MigrationMode::kFast>;
}
- inline bool TryEvacuateObject(AllocationSpace target_space,
- HeapObject* object, int size,
- HeapObject** target_object) {
+ inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
+ int size, HeapObject* target_object) {
#ifdef VERIFY_HEAP
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
@@ -1198,25 +1257,25 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
return false;
}
- inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject* src,
- HeapObject* dst, int size) {
+ inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject src,
+ HeapObject dst, int size) {
for (MigrationObserver* obs : observers_) {
obs->Move(dest, src, dst, size);
}
}
- inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
+ inline void MigrateObject(HeapObject dst, HeapObject src, int size,
AllocationSpace dest) {
migration_function_(this, dst, src, size, dest);
}
#ifdef VERIFY_HEAP
- bool AbortCompactionForTesting(HeapObject* object) {
+ bool AbortCompactionForTesting(HeapObject object) {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
- kPageAlignmentMask & ~kPointerAlignmentMask;
- if ((object->address() & kPageAlignmentMask) == mask) {
- Page* page = Page::FromAddress(object->address());
+ kPageAlignmentMask & ~kObjectAlignmentMask;
+ if ((object->ptr() & kPageAlignmentMask) == mask) {
+ Page* page = Page::FromHeapObject(object);
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
} else {
@@ -1249,9 +1308,9 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
local_pretenuring_feedback_(local_pretenuring_feedback),
is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
- inline bool Visit(HeapObject* object, int size) override {
+ inline bool Visit(HeapObject object, int size) override {
if (TryEvacuateWithoutCopy(object)) return true;
- HeapObject* target_object = nullptr;
+ HeapObject target_object;
if (heap_->ShouldBePromoted(object->address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
promoted_size_ += size;
@@ -1259,7 +1318,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
}
heap_->UpdateAllocationSite(object->map(), object,
local_pretenuring_feedback_);
- HeapObject* target = nullptr;
+ HeapObject target;
AllocationSpace space = AllocateTargetObject(object, size, &target);
MigrateObject(HeapObject::cast(target), object, size, space);
semispace_copied_size_ += size;
@@ -1270,19 +1329,17 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t semispace_copied_size() { return semispace_copied_size_; }
private:
- inline bool TryEvacuateWithoutCopy(HeapObject* object) {
+ inline bool TryEvacuateWithoutCopy(HeapObject object) {
if (is_incremental_marking_) return false;
- Map* map = object->map();
+ Map map = object->map();
// Some objects can be evacuated without creating a copy.
if (map->visitor_id() == kVisitThinString) {
- HeapObject* actual = ThinString::cast(object)->unchecked_actual();
+ HeapObject actual = ThinString::cast(object)->unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
- base::Relaxed_Store(
- reinterpret_cast<base::AtomicWord*>(object->address()),
- reinterpret_cast<base::AtomicWord>(
- MapWord::FromForwardingAddress(actual).ToMap()));
+ object->map_slot().Relaxed_Store(
+ MapWord::FromForwardingAddress(actual).ToMap());
return true;
}
// TODO(mlippautz): Handle ConsString.
@@ -1290,8 +1347,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
return false;
}
- inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
- HeapObject** target_object) {
+ inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
+ HeapObject* target_object) {
AllocationAlignment alignment =
HeapObject::RequiredAlignment(old_object->map());
AllocationSpace space_allocated_in = NEW_SPACE;
@@ -1352,7 +1409,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
}
}
- inline bool Visit(HeapObject* object, int size) override {
+ inline bool Visit(HeapObject object, int size) override {
if (mode == NEW_TO_NEW) {
heap_->UpdateAllocationSite(object->map(), object,
local_pretenuring_feedback_);
@@ -1378,11 +1435,10 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
RecordMigratedSlotVisitor* record_visitor)
: EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
- inline bool Visit(HeapObject* object, int size) override {
- HeapObject* target_object = nullptr;
- if (TryEvacuateObject(
- Page::FromAddress(object->address())->owner()->identity(), object,
- size, &target_object)) {
+ inline bool Visit(HeapObject object, int size) override {
+ HeapObject target_object;
+ if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(),
+ object, size, &target_object)) {
DCHECK(object->map_word().IsForwardingAddress());
return true;
}
@@ -1394,7 +1450,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
- inline bool Visit(HeapObject* object, int size) override {
+ inline bool Visit(HeapObject object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
object->IterateBodyFast(&visitor);
return true;
@@ -1404,17 +1460,17 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
Heap* heap_;
};
-bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, Object** p) {
- Object* o = *p;
+bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
+ Object o = *p;
if (!o->IsHeapObject()) return false;
- HeapObject* heap_object = HeapObject::cast(o);
+ HeapObject heap_object = HeapObject::cast(o);
return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
heap_object);
}
void MarkCompactCollector::MarkStringTable(
ObjectVisitor* custom_root_body_visitor) {
- StringTable* string_table = heap()->string_table();
+ StringTable string_table = heap()->string_table();
// Mark the string table itself.
if (marking_state()->WhiteToBlack(string_table)) {
// Explicitly mark the prefix.
@@ -1457,7 +1513,6 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
- DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
@@ -1471,6 +1526,7 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
heap()->concurrent_marking()->ephemeron_marked() ||
+ !marking_worklist()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
++iterations;
}
@@ -1516,7 +1572,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
CHECK(heap()->concurrent_marking()->IsStopped());
- std::unordered_multimap<HeapObject*, HeapObject*> key_to_values;
+ std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
Ephemeron ephemeron;
DCHECK(weak_objects_.current_ephemerons.IsEmpty());
@@ -1571,10 +1627,10 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
// This is the good case: newly_discovered stores all discovered
// objects. Now use key_to_values to see if discovered objects keep more
// objects alive due to ephemeron semantics.
- for (HeapObject* object : ephemeron_marking_.newly_discovered) {
+ for (HeapObject object : ephemeron_marking_.newly_discovered) {
auto range = key_to_values.equal_range(object);
for (auto it = range.first; it != range.second; ++it) {
- HeapObject* value = it->second;
+ HeapObject value = it->second;
MarkObject(object, value);
}
}
@@ -1597,12 +1653,15 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
void MarkCompactCollector::PerformWrapperTracing() {
if (heap_->local_embedder_heap_tracer()->InUse()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
- HeapObject* object;
- while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
- heap_->TracePossibleWrapper(JSObject::cast(object));
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
+ {
+ LocalEmbedderHeapTracer::ProcessingScope scope(
+ heap_->local_embedder_heap_tracer());
+ HeapObject object;
+ while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
+ scope.TracePossibleWrapper(JSObject::cast(object));
+ }
}
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
heap_->local_embedder_heap_tracer()->Trace(
std::numeric_limits<double>::infinity());
}
@@ -1615,9 +1674,9 @@ void MarkCompactCollector::ProcessMarkingWorklist() {
template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
void MarkCompactCollector::ProcessMarkingWorklistInternal() {
- HeapObject* object;
+ HeapObject object;
MarkCompactMarkingVisitor visitor(this, marking_state());
- while ((object = marking_worklist()->Pop()) != nullptr) {
+ while (!(object = marking_worklist()->Pop()).is_null()) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
@@ -1627,14 +1686,13 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
kTrackNewlyDiscoveredObjects) {
AddNewlyDiscovered(object);
}
- Map* map = object->map();
+ Map map = object->map();
MarkObject(object, map);
visitor.Visit(map, object);
}
- DCHECK(marking_worklist()->IsBailoutEmpty());
}
-bool MarkCompactCollector::VisitEphemeron(HeapObject* key, HeapObject* value) {
+bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) {
if (marking_state()->IsBlackOrGrey(key)) {
if (marking_state()->WhiteToGrey(value)) {
marking_worklist()->Push(value);
@@ -1668,7 +1726,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
return;
}
if (it.frame()->type() == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
+ Code code = it.frame()->LookupCode();
if (!code->CanDeoptAt(it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
}
@@ -1737,7 +1795,6 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
- DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
ProcessMarkingWorklist();
@@ -1756,14 +1813,16 @@ void MarkCompactCollector::MarkLiveObjects() {
// opportunistic as it may not discover graphs that are only reachable
// through ephemerons.
{
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPERS);
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
do {
// PerformWrapperTracing() also empties the work items collected by
// concurrent markers. As a result this call needs to happen at least
// once.
PerformWrapperTracing();
ProcessMarkingWorklist();
- } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone());
+ } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
+ !marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
}
@@ -1807,10 +1866,6 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeronMarking();
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
- heap()->local_embedder_heap_tracer()->TraceEpilogue();
- }
DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
}
@@ -1835,7 +1890,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// Prune the string table removing all strings only pointed to by the
// string table. Cannot use string_table() here because the string
// table is marked.
- StringTable* string_table = heap()->string_table();
+ StringTable string_table = heap()->string_table();
InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
string_table->IterateElements(&internalized_visitor);
string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
@@ -1846,6 +1901,16 @@ void MarkCompactCollector::ClearNonLiveReferences() {
}
{
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
+ ClearOldBytecodeCandidates();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHED_JS_FUNCTIONS);
+ ClearFlushedJsFunctions();
+ }
+
+ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer(
@@ -1859,36 +1924,47 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// cleared.
ClearFullMapTransitions();
}
- ClearWeakReferences();
- MarkDependentCodeForDeoptimization();
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
+ ClearWeakReferences();
+ ClearWeakCollections();
+ ClearJSWeakCells();
+ }
- ClearWeakCollections();
+ MarkDependentCodeForDeoptimization();
DCHECK(weak_objects_.transition_arrays.IsEmpty());
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
+ DCHECK(weak_objects_.js_weak_refs.IsEmpty());
+ DCHECK(weak_objects_.js_weak_cells.IsEmpty());
+ DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
+ DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
}
void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
- std::pair<HeapObject*, Code*> weak_object_in_code;
+ std::pair<HeapObject, Code> weak_object_in_code;
while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
&weak_object_in_code)) {
- HeapObject* object = weak_object_in_code.first;
- Code* code = weak_object_in_code.second;
+ HeapObject object = weak_object_in_code.first;
+ Code code = weak_object_in_code.second;
if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
- !code->marked_for_deoptimization()) {
- code->SetMarkedForDeoptimization("weak objects");
- code->InvalidateEmbeddedObjects(heap_);
- have_code_to_deoptimize_ = true;
+ !code->embedded_objects_cleared()) {
+ if (!code->marked_for_deoptimization()) {
+ code->SetMarkedForDeoptimization("weak objects");
+ have_code_to_deoptimize_ = true;
+ }
+ code->ClearEmbeddedObjects(heap_);
+ DCHECK(code->embedded_objects_cleared());
}
}
}
-void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) {
+void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
- Object* potential_parent = dead_target->constructor_or_backpointer();
+ Object potential_parent = dead_target->constructor_or_backpointer();
if (potential_parent->IsMap()) {
- Map* parent = Map::cast(potential_parent);
+ Map parent = Map::cast(potential_parent);
DisallowHeapAllocation no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
TransitionsAccessor(isolate(), parent, &no_gc_obviously)
@@ -1898,14 +1974,14 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) {
}
}
-void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* map,
- Map* dead_target) {
+void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
+ Map dead_target) {
DCHECK(!map->is_prototype_map());
DCHECK(!dead_target->is_prototype_map());
DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
// Take ownership of the descriptor array.
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
if (descriptors == dead_target->instance_descriptors() &&
number_of_own_descriptors > 0) {
TrimDescriptorArray(map, descriptors);
@@ -1913,21 +1989,120 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* map,
}
}
+void MarkCompactCollector::FlushBytecodeFromSFI(
+ SharedFunctionInfo shared_info) {
+ DCHECK(shared_info->HasBytecodeArray());
+
+ // Retain objects required for uncompiled data.
+ String inferred_name = shared_info->inferred_name();
+ int start_position = shared_info->StartPosition();
+ int end_position = shared_info->EndPosition();
+ int function_literal_id = shared_info->FunctionLiteralId(isolate());
+
+ shared_info->DiscardCompiledMetadata(
+ isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
+ RecordSlot(object, slot, target);
+ });
+
+ // The size of the bytecode array should always be larger than an
+ // UncompiledData object.
+ STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
+ UncompiledDataWithoutPreparseData::kSize);
+
+ // Replace bytecode array with an uncompiled data array.
+ HeapObject compiled_data = shared_info->GetBytecodeArray();
+ Address compiled_data_start = compiled_data->address();
+ int compiled_data_size = compiled_data->Size();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
+
+ // Clear any recorded slots for the compiled data as being invalid.
+ RememberedSet<OLD_TO_NEW>::RemoveRange(
+ chunk, compiled_data_start, compiled_data_start + compiled_data_size,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(
+ chunk, compiled_data_start, compiled_data_start + compiled_data_size,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+
+ // Swap the map, using set_map_after_allocation to avoid verify heap checks
+ // which are not necessary since we are doing this during the GC atomic pause.
+ compiled_data->set_map_after_allocation(
+ ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
+ SKIP_WRITE_BARRIER);
+
+ // Create a filler object for any left over space in the bytecode array.
+ if (!heap()->IsLargeObject(compiled_data)) {
+ heap()->CreateFillerObjectAt(
+ compiled_data->address() + UncompiledDataWithoutPreparseData::kSize,
+ compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
+ ClearRecordedSlots::kNo);
+ }
+
+ // Initialize the uncompiled data.
+ UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
+ UncompiledData::Initialize(
+ uncompiled_data, inferred_name, start_position, end_position,
+ function_literal_id,
+ [](HeapObject object, ObjectSlot slot, HeapObject target) {
+ RecordSlot(object, slot, target);
+ });
+
+ // Mark the uncompiled data as black, and ensure all fields have already been
+ // marked.
+ DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
+ non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
+
+ // Use the raw function data setter to avoid validity checks, since we're
+ // performing the unusual task of decompiling.
+ shared_info->set_function_data(uncompiled_data);
+ DCHECK(!shared_info->is_compiled());
+}
+
+void MarkCompactCollector::ClearOldBytecodeCandidates() {
+ DCHECK(FLAG_flush_bytecode ||
+ weak_objects_.bytecode_flushing_candidates.IsEmpty());
+ SharedFunctionInfo flushing_candidate;
+ while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThread,
+ &flushing_candidate)) {
+ // If the BytecodeArray is dead, flush it, which will replace the field with
+ // an uncompiled data object.
+ if (!non_atomic_marking_state()->IsBlackOrGrey(
+ flushing_candidate->GetBytecodeArray())) {
+ FlushBytecodeFromSFI(flushing_candidate);
+ }
+
+ // Now record the slot, which has either been updated to an uncompiled data,
+ // or is the BytecodeArray which is still alive.
+ ObjectSlot slot = HeapObject::RawField(
+ flushing_candidate, SharedFunctionInfo::kFunctionDataOffset);
+ RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
+ }
+}
+
+void MarkCompactCollector::ClearFlushedJsFunctions() {
+ DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
+ JSFunction flushed_js_function;
+ while (weak_objects_.flushed_js_functions.Pop(kMainThread,
+ &flushed_js_function)) {
+ flushed_js_function->ResetIfBytecodeFlushed();
+ }
+}
+
void MarkCompactCollector::ClearFullMapTransitions() {
- TransitionArray* array;
+ TransitionArray array;
while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
int num_transitions = array->number_of_entries();
if (num_transitions > 0) {
- Map* map;
+ Map map;
// The array might contain "undefined" elements because it's not yet
// filled. Allow it.
if (array->GetTargetIfExists(0, isolate(), &map)) {
- DCHECK_NOT_NULL(map); // Weak pointers aren't cleared yet.
- Map* parent = Map::cast(map->constructor_or_backpointer());
+ DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
+ Map parent = Map::cast(map->constructor_or_backpointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
- DescriptorArray* descriptors =
- parent_is_alive ? parent->instance_descriptors() : nullptr;
+ DescriptorArray descriptors = parent_is_alive
+ ? parent->instance_descriptors()
+ : DescriptorArray();
bool descriptors_owner_died =
CompactTransitionArray(parent, array, descriptors);
if (descriptors_owner_died) {
@@ -1938,32 +2113,32 @@ void MarkCompactCollector::ClearFullMapTransitions() {
}
}
-bool MarkCompactCollector::CompactTransitionArray(
- Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
+bool MarkCompactCollector::CompactTransitionArray(Map map,
+ TransitionArray transitions,
+ DescriptorArray descriptors) {
DCHECK(!map->is_prototype_map());
int num_transitions = transitions->number_of_entries();
bool descriptors_owner_died = false;
int transition_index = 0;
// Compact all live transitions to the left.
for (int i = 0; i < num_transitions; ++i) {
- Map* target = transitions->GetTarget(i);
+ Map target = transitions->GetTarget(i);
DCHECK_EQ(target->constructor_or_backpointer(), map);
if (non_atomic_marking_state()->IsWhite(target)) {
- if (descriptors != nullptr &&
+ if (!descriptors.is_null() &&
target->instance_descriptors() == descriptors) {
DCHECK(!target->is_prototype_map());
descriptors_owner_died = true;
}
} else {
if (i != transition_index) {
- Name* key = transitions->GetKey(i);
+ Name key = transitions->GetKey(i);
transitions->SetKey(transition_index, key);
- HeapObjectReference** key_slot =
- transitions->GetKeySlot(transition_index);
+ HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
RecordSlot(transitions, key_slot, key);
- MaybeObject* raw_target = transitions->GetRawTarget(i);
+ MaybeObject raw_target = transitions->GetRawTarget(i);
transitions->SetRawTarget(transition_index, raw_target);
- HeapObjectReference** target_slot =
+ HeapObjectSlot target_slot =
transitions->GetTargetSlot(transition_index);
RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
}
@@ -1988,26 +2163,44 @@ bool MarkCompactCollector::CompactTransitionArray(
return descriptors_owner_died;
}
-void MarkCompactCollector::TrimDescriptorArray(Map* map,
- DescriptorArray* descriptors) {
+void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
+ int descriptors_to_trim) {
+ int old_nof_all_descriptors = array->number_of_all_descriptors();
+ int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
+ DCHECK_LT(0, descriptors_to_trim);
+ DCHECK_LE(0, new_nof_all_descriptors);
+ Address start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
+ Address end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
+ RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
+ start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array),
+ start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
+ ClearRecordedSlots::kNo);
+ array->set_number_of_all_descriptors(new_nof_all_descriptors);
+}
+
+void MarkCompactCollector::TrimDescriptorArray(Map map,
+ DescriptorArray descriptors) {
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) {
DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
return;
}
-
- int number_of_descriptors = descriptors->number_of_descriptors_storage();
- int to_trim = number_of_descriptors - number_of_own_descriptors;
+ // TODO(ulan): Trim only if slack is greater than some percentage threshold.
+ int to_trim =
+ descriptors->number_of_all_descriptors() - number_of_own_descriptors;
if (to_trim > 0) {
- heap_->RightTrimWeakFixedArray(descriptors,
- to_trim * DescriptorArray::kEntrySize);
- descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
+ descriptors->set_number_of_descriptors(number_of_own_descriptors);
+ RightTrimDescriptorArray(descriptors, to_trim);
TrimEnumCache(map, descriptors);
descriptors->Sort();
if (FLAG_unbox_double_fields) {
- LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+ LayoutDescriptor layout_descriptor = map->layout_descriptor();
layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
number_of_own_descriptors);
SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
@@ -2017,21 +2210,20 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
map->set_owns_descriptors(true);
}
-void MarkCompactCollector::TrimEnumCache(Map* map,
- DescriptorArray* descriptors) {
+void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
int live_enum = map->EnumLength();
if (live_enum == kInvalidEnumCacheSentinel) {
live_enum = map->NumberOfEnumerableProperties();
}
if (live_enum == 0) return descriptors->ClearEnumCache();
- EnumCache* enum_cache = descriptors->GetEnumCache();
+ EnumCache enum_cache = descriptors->enum_cache();
- FixedArray* keys = enum_cache->keys();
+ FixedArray keys = enum_cache->keys();
int to_trim = keys->length() - live_enum;
if (to_trim <= 0) return;
heap_->RightTrimFixedArray(keys, to_trim);
- FixedArray* indices = enum_cache->indices();
+ FixedArray indices = enum_cache->indices();
to_trim = indices->length() - live_enum;
if (to_trim <= 0) return;
heap_->RightTrimFixedArray(indices, to_trim);
@@ -2039,13 +2231,13 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
- EphemeronHashTable* table;
+ EphemeronHashTable table;
while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
for (int i = 0; i < table->Capacity(); i++) {
- HeapObject* key = HeapObject::cast(table->KeyAt(i));
+ HeapObject key = HeapObject::cast(table->KeyAt(i));
#ifdef VERIFY_HEAP
- Object* value = table->ValueAt(i);
+ Object value = table->ValueAt(i);
if (value->IsHeapObject()) {
CHECK_IMPLIES(
@@ -2062,22 +2254,81 @@ void MarkCompactCollector::ClearWeakCollections() {
void MarkCompactCollector::ClearWeakReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
- std::pair<HeapObject*, HeapObjectReference**> slot;
+ std::pair<HeapObject, HeapObjectSlot> slot;
+ HeapObjectReference cleared_weak_ref =
+ HeapObjectReference::ClearedValue(isolate());
while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
- HeapObject* value;
- HeapObjectReference** location = slot.second;
+ HeapObject value;
+ // The slot could have been overwritten, so we have to treat it
+ // as MaybeObjectSlot.
+ MaybeObjectSlot location(slot.second);
if ((*location)->GetHeapObjectIfWeak(&value)) {
DCHECK(!value->IsCell());
if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
// The value of the weak reference is alive.
- RecordSlot(slot.first, location, value);
+ RecordSlot(slot.first, HeapObjectSlot(location), value);
} else {
if (value->IsMap()) {
// The map is non-live.
ClearPotentialSimpleMapTransition(Map::cast(value));
}
- *location = HeapObjectReference::ClearedValue();
+ location.store(cleared_weak_ref);
+ }
+ }
+ }
+}
+
+void MarkCompactCollector::ClearJSWeakCells() {
+ if (!FLAG_harmony_weak_refs) {
+ return;
+ }
+ JSWeakRef weak_ref;
+ while (weak_objects_.js_weak_refs.Pop(kMainThread, &weak_ref)) {
+ // We do not insert cleared weak cells into the list, so the value
+ // cannot be undefined here.
+ JSReceiver target = JSReceiver::cast(weak_ref->target());
+ if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
+ weak_ref->set_target(ReadOnlyRoots(isolate()).undefined_value());
+ } else {
+ // The value of the JSWeakRef is alive.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
+ RecordSlot(weak_ref, slot, target);
+ }
+ }
+ JSWeakCell weak_cell;
+ while (weak_objects_.js_weak_cells.Pop(kMainThread, &weak_cell)) {
+ // We do not insert cleared weak cells into the list, so the value
+ // cannot be a Smi here.
+ HeapObject target = HeapObject::cast(weak_cell->target());
+ if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
+ // The value of the JSWeakCell is dead.
+ JSWeakFactory weak_factory = JSWeakFactory::cast(weak_cell->factory());
+ if (!weak_factory->scheduled_for_cleanup()) {
+ heap()->AddDirtyJSWeakFactory(
+ weak_factory,
+ [](HeapObject object, ObjectSlot slot, Object target) {
+ if (target->IsHeapObject()) {
+ RecordSlot(object, slot, HeapObject::cast(target));
+ }
+ });
}
+ // We're modifying the pointers in JSWeakCell and JSWeakFactory during GC;
+ // thus we need to record the slots it writes. The normal write barrier is
+ // not enough, since it's disabled before GC.
+ weak_cell->Nullify(isolate(),
+ [](HeapObject object, ObjectSlot slot, Object target) {
+ if (target->IsHeapObject()) {
+ RecordSlot(object, slot, HeapObject::cast(target));
+ }
+ });
+ DCHECK(weak_factory->NeedsCleanup());
+ DCHECK(weak_factory->scheduled_for_cleanup());
+ } else {
+ // The value of the JSWeakCell is alive.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
+ RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
}
}
}
@@ -2090,14 +2341,25 @@ void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.discovered_ephemerons.Clear();
weak_objects_.weak_references.Clear();
weak_objects_.weak_objects_in_code.Clear();
+ weak_objects_.js_weak_refs.Clear();
+ weak_objects_.js_weak_cells.Clear();
+ weak_objects_.bytecode_flushing_candidates.Clear();
+ weak_objects_.flushed_js_functions.Clear();
}
-void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
- Object* target) {
- Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
+bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
+ return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
+}
+
+MarkCompactCollector::RecordRelocSlotInfo
+MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ RecordRelocSlotInfo result;
+ result.should_record = false;
+ Page* target_page = Page::FromHeapObject(target);
+ Page* source_page = Page::FromHeapObject(host);
if (target_page->IsEvacuationCandidate() &&
- (rinfo->host() == nullptr ||
+ (rinfo->host().is_null() ||
!source_page->ShouldSkipEvacuationSlotRecording())) {
RelocInfo::Mode rmode = rinfo->rmode();
Address addr = rinfo->pc();
@@ -2111,29 +2373,91 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
slot_type = OBJECT_SLOT;
}
}
- RememberedSet<OLD_TO_OLD>::InsertTyped(
- source_page, reinterpret_cast<Address>(host), slot_type, addr);
+ uintptr_t offset = addr - source_page->address();
+ DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
+ result.should_record = true;
+ result.memory_chunk = source_page;
+ result.slot_type = slot_type;
+ result.offset = static_cast<uint32_t>(offset);
+ }
+ return result;
+}
+
+void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ RecordRelocSlotInfo info = PrepareRecordRelocSlot(host, rinfo, target);
+ if (info.should_record) {
+ RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
+ info.offset);
}
}
-template <AccessMode access_mode>
-static inline SlotCallbackResult UpdateSlot(
- MaybeObject** slot, MaybeObject* old, HeapObject* heap_obj,
- HeapObjectReferenceType reference_type) {
+namespace {
+
+// Missing specialization MakeSlotValue<FullObjectSlot, WEAK>() will turn
+// attempt to store a weak reference to strong-only slot to a compilation error.
+template <typename TSlot, HeapObjectReferenceType reference_type>
+typename TSlot::TObject MakeSlotValue(HeapObject heap_object);
+
+template <>
+Object MakeSlotValue<ObjectSlot, HeapObjectReferenceType::STRONG>(
+ HeapObject heap_object) {
+ return heap_object;
+}
+
+template <>
+MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::STRONG>(
+ HeapObject heap_object) {
+ return HeapObjectReference::Strong(heap_object);
+}
+
+template <>
+MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::WEAK>(
+ HeapObject heap_object) {
+ return HeapObjectReference::Weak(heap_object);
+}
+
+#ifdef V8_COMPRESS_POINTERS
+template <>
+Object MakeSlotValue<FullObjectSlot, HeapObjectReferenceType::STRONG>(
+ HeapObject heap_object) {
+ return heap_object;
+}
+
+template <>
+MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
+ HeapObject heap_object) {
+ return HeapObjectReference::Strong(heap_object);
+}
+
+// The following specialization
+// MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
+// is not used.
+#endif
+
+template <AccessMode access_mode, HeapObjectReferenceType reference_type,
+ typename TSlot>
+static inline SlotCallbackResult UpdateSlot(TSlot slot,
+ typename TSlot::TObject old,
+ HeapObject heap_obj) {
+ static_assert(
+ std::is_same<TSlot, FullObjectSlot>::value ||
+ std::is_same<TSlot, ObjectSlot>::value ||
+ std::is_same<TSlot, FullMaybeObjectSlot>::value ||
+ std::is_same<TSlot, MaybeObjectSlot>::value,
+ "Only [Full]ObjectSlot and [Full]MaybeObjectSlot are expected here");
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
DCHECK(Heap::InFromSpace(heap_obj) ||
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
- Page::FromAddress(heap_obj->address())
- ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
- MaybeObject* target =
- reference_type == HeapObjectReferenceType::WEAK
- ? HeapObjectReference::Weak(map_word.ToForwardingAddress())
- : HeapObjectReference::Strong(map_word.ToForwardingAddress());
+ Page::FromHeapObject(heap_obj)->IsFlagSet(
+ Page::COMPACTION_WAS_ABORTED));
+ typename TSlot::TObject target =
+ MakeSlotValue<TSlot, reference_type>(map_word.ToForwardingAddress());
if (access_mode == AccessMode::NON_ATOMIC) {
- *slot = target;
+ slot.store(target);
} else {
- base::AsAtomicPointer::Release_CompareAndSwap(slot, old, target);
+ slot.Release_CompareAndSwap(old, target);
}
DCHECK(!Heap::InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
@@ -2144,113 +2468,113 @@ static inline SlotCallbackResult UpdateSlot(
return REMOVE_SLOT;
}
-template <AccessMode access_mode>
-static inline SlotCallbackResult UpdateSlot(MaybeObject** slot) {
- MaybeObject* obj = base::AsAtomicPointer::Relaxed_Load(slot);
- HeapObject* heap_obj;
- if (obj->GetHeapObjectIfWeak(&heap_obj)) {
- UpdateSlot<access_mode>(slot, obj, heap_obj, HeapObjectReferenceType::WEAK);
+template <AccessMode access_mode, typename TSlot>
+static inline SlotCallbackResult UpdateSlot(TSlot slot) {
+ typename TSlot::TObject obj = slot.Relaxed_Load();
+ HeapObject heap_obj;
+ if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
+ UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(slot, obj, heap_obj);
} else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
- return UpdateSlot<access_mode>(slot, obj, heap_obj,
- HeapObjectReferenceType::STRONG);
+ return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
+ heap_obj);
}
return REMOVE_SLOT;
}
-template <AccessMode access_mode>
-static inline SlotCallbackResult UpdateStrongSlot(MaybeObject** maybe_slot) {
- DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrong());
- Object** slot = reinterpret_cast<Object**>(maybe_slot);
- Object* obj = base::AsAtomicPointer::Relaxed_Load(slot);
- if (obj->IsHeapObject()) {
- HeapObject* heap_obj = HeapObject::cast(obj);
- return UpdateSlot<access_mode>(maybe_slot, MaybeObject::FromObject(obj),
- heap_obj, HeapObjectReferenceType::STRONG);
+template <AccessMode access_mode, typename TSlot>
+static inline SlotCallbackResult UpdateStrongSlot(TSlot slot) {
+ DCHECK(!HasWeakHeapObjectTag((*slot).ptr()));
+ typename TSlot::TObject obj = slot.Relaxed_Load();
+ HeapObject heap_obj;
+ if (obj.GetHeapObject(&heap_obj)) {
+ return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
+ heap_obj);
}
return REMOVE_SLOT;
}
+} // namespace
+
// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
-// TODO(ulan): Remove code object specific functions. This visitor
-// nevers visits code objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
+ PointersUpdatingVisitor() {}
- void VisitPointer(HeapObject* host, Object** p) override {
+ void VisitPointer(HeapObject host, ObjectSlot p) override {
UpdateStrongSlotInternal(p);
}
- void VisitPointer(HeapObject* host, MaybeObject** p) override {
+ void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
UpdateSlotInternal(p);
}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) {
UpdateStrongSlotInternal(p);
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- for (MaybeObject** p = start; p < end; p++) {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
UpdateSlotInternal(p);
}
}
void VisitRootPointer(Root root, const char* description,
- Object** p) override {
- UpdateStrongSlotInternal(p);
+ FullObjectSlot p) override {
+ UpdateRootSlotInternal(p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) UpdateStrongSlotInternal(p);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) {
+ UpdateRootSlotInternal(p);
+ }
}
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateEmbeddedPointer(
- heap_, rinfo, UpdateStrongMaybeObjectSlotInternal);
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
}
- void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateCodeTarget(
- rinfo, UpdateStrongMaybeObjectSlotInternal);
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
}
private:
+ static inline SlotCallbackResult UpdateRootSlotInternal(FullObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
+ }
+
static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
- MaybeObject** slot) {
- DCHECK(!(*slot)->IsWeakOrCleared());
- return UpdateStrongSlotInternal(reinterpret_cast<Object**>(slot));
+ MaybeObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
}
- static inline SlotCallbackResult UpdateStrongSlotInternal(Object** slot) {
- DCHECK(!HasWeakHeapObjectTag(*slot));
- return UpdateStrongSlot<AccessMode::NON_ATOMIC>(
- reinterpret_cast<MaybeObject**>(slot));
+ static inline SlotCallbackResult UpdateStrongSlotInternal(ObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
}
- static inline SlotCallbackResult UpdateSlotInternal(MaybeObject** slot) {
+ static inline SlotCallbackResult UpdateSlotInternal(MaybeObjectSlot slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
}
-
- Heap* heap_;
};
-static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
+static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
+ FullObjectSlot p) {
MapWord map_word = HeapObject::cast(*p)->map_word();
if (map_word.IsForwardingAddress()) {
- String* new_string = String::cast(map_word.ToForwardingAddress());
+ String new_string = String::cast(map_word.ToForwardingAddress());
if (new_string->IsExternalString()) {
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
- Page::FromAddress(reinterpret_cast<Address>(*p)),
- Page::FromHeapObject(new_string),
+ Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
ExternalString::cast(new_string)->ExternalPayloadSize());
}
return new_string;
@@ -2270,6 +2594,8 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space->Flip();
new_space->ResetLinearAllocationArea();
+ heap()->new_lo_space()->Flip();
+
// Old space.
DCHECK(old_space_evacuation_pages_.empty());
old_space_evacuation_pages_ = std::move(evacuation_candidates_);
@@ -2283,6 +2609,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
+ heap()->code_lo_space()->FreeUnmarkedObjects();
+ heap()->new_lo_space()->FreeUnmarkedObjects();
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
// Give pages that are queued to be freed back to the OS.
@@ -2318,10 +2646,11 @@ class Evacuator : public Malloced {
// NewSpacePages with more live bytes than this threshold qualify for fast
// evacuation.
- static int PageEvacuationThreshold() {
+ static intptr_t NewSpacePageEvacuationThreshold() {
if (FLAG_page_promotion)
- return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
- return Page::kAllocatableMemory + kPointerSize;
+ return FLAG_page_promotion_threshold *
+ MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
+ return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize;
}
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
@@ -2341,7 +2670,7 @@ class Evacuator : public Malloced {
virtual ~Evacuator() = default;
- void EvacuatePage(Page* page);
+ void EvacuatePage(MemoryChunk* chunk);
void AddObserver(MigrationObserver* observer) {
new_space_visitor_.AddObserver(observer);
@@ -2358,7 +2687,8 @@ class Evacuator : public Malloced {
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
// |saved_live_bytes| returns the live bytes of the page that was processed.
- virtual void RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
+ virtual void RawEvacuatePage(MemoryChunk* chunk,
+ intptr_t* saved_live_bytes) = 0;
inline Heap* heap() { return heap_; }
@@ -2386,29 +2716,30 @@ class Evacuator : public Malloced {
intptr_t bytes_compacted_;
};
-void Evacuator::EvacuatePage(Page* page) {
+void Evacuator::EvacuatePage(MemoryChunk* chunk) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
- DCHECK(page->SweepingDone());
+ DCHECK(chunk->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
- RawEvacuatePage(page, &saved_live_bytes);
+ RawEvacuatePage(chunk, &saved_live_bytes);
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
- PrintIsolate(
- heap()->isolate(),
- "evacuation[%p]: page=%p new_space=%d "
- "page_evacuation=%d executable=%d contains_age_mark=%d "
- "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
- static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
- page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
- page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
- page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
- page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
- evacuation_time, page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ PrintIsolate(heap()->isolate(),
+ "evacuation[%p]: page=%p new_space=%d "
+ "page_evacuation=%d executable=%d contains_age_mark=%d "
+ "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
+ static_cast<void*>(this), static_cast<void*>(chunk),
+ chunk->InNewSpace(),
+ chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
+ chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
+ chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
+ chunk->Contains(heap()->new_space()->age_mark()),
+ saved_live_bytes, evacuation_time,
+ chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
}
@@ -2439,51 +2770,51 @@ class FullEvacuator : public Evacuator {
}
protected:
- void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+ void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
MarkCompactCollector* collector_;
};
-void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
- const EvacuationMode evacuation_mode = ComputeEvacuationMode(page);
+void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
+ const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"FullEvacuator::RawEvacuatePage", "evacuation_mode",
evacuation_mode);
MarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
- *live_bytes = marking_state->live_bytes(page);
- HeapObject* failed_object = nullptr;
+ *live_bytes = marking_state->live_bytes(chunk);
+ HeapObject failed_object;
switch (evacuation_mode) {
case kObjectsNewToOld:
LiveObjectVisitor::VisitBlackObjectsNoFail(
- page, marking_state, &new_space_visitor_,
+ chunk, marking_state, &new_space_visitor_,
LiveObjectVisitor::kClearMarkbits);
// ArrayBufferTracker will be updated during pointers updating.
break;
case kPageNewToOld:
LiveObjectVisitor::VisitBlackObjectsNoFail(
- page, marking_state, &new_to_old_page_visitor_,
+ chunk, marking_state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_old_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
+ marking_state->live_bytes(chunk));
// ArrayBufferTracker will be updated during sweeping.
break;
case kPageNewToNew:
LiveObjectVisitor::VisitBlackObjectsNoFail(
- page, marking_state, &new_to_new_page_visitor_,
+ chunk, marking_state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_new_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
+ marking_state->live_bytes(chunk));
// ArrayBufferTracker will be updated during sweeping.
break;
case kObjectsOldToOld: {
const bool success = LiveObjectVisitor::VisitBlackObjects(
- page, marking_state, &old_space_visitor_,
+ chunk, marking_state, &old_space_visitor_,
LiveObjectVisitor::kClearMarkbits, &failed_object);
if (!success) {
// Aborted compaction page. Actual processing happens on the main
// thread for simplicity reasons.
- collector_->ReportAbortedEvacuationCandidate(failed_object, page);
+ collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
} else {
// ArrayBufferTracker will be updated during pointers updating.
}
@@ -2492,14 +2823,14 @@ void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
}
}
-class PageEvacuationItem : public ItemParallelJob::Item {
+class EvacuationItem : public ItemParallelJob::Item {
public:
- explicit PageEvacuationItem(Page* page) : page_(page) {}
- ~PageEvacuationItem() override = default;
- Page* page() const { return page_; }
+ explicit EvacuationItem(MemoryChunk* chunk) : chunk_(chunk) {}
+ ~EvacuationItem() override = default;
+ MemoryChunk* chunk() const { return chunk_; }
private:
- Page* page_;
+ MemoryChunk* chunk_;
};
class PageEvacuationTask : public ItemParallelJob::Task {
@@ -2511,9 +2842,9 @@ class PageEvacuationTask : public ItemParallelJob::Task {
void RunInParallel() override {
TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
- PageEvacuationItem* item = nullptr;
- while ((item = GetItem<PageEvacuationItem>()) != nullptr) {
- evacuator_->EvacuatePage(item->page());
+ EvacuationItem* item = nullptr;
+ while ((item = GetItem<EvacuationItem>()) != nullptr) {
+ evacuator_->EvacuatePage(item->chunk());
item->MarkFinished();
}
};
@@ -2571,7 +2902,7 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
return !reduce_memory && !p->NeverEvacuate() &&
- (live_bytes > Evacuator::PageEvacuationThreshold()) &&
+ (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
!p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
}
@@ -2582,7 +2913,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
for (Page* page : old_space_evacuation_pages_) {
live_bytes += non_atomic_marking_state()->live_bytes(page);
- evacuation_job.AddItem(new PageEvacuationItem(page));
+ evacuation_job.AddItem(new EvacuationItem(page));
}
for (Page* page : new_space_evacuation_pages_) {
@@ -2601,8 +2932,25 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- evacuation_job.AddItem(new PageEvacuationItem(page));
+ evacuation_job.AddItem(new EvacuationItem(page));
+ }
+
+ // Promote young generation large objects.
+ LargePage* current = heap()->new_lo_space()->first_page();
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ while (current) {
+ LargePage* next_current = current->next_page();
+ HeapObject object = current->GetObject();
+ DCHECK(!marking_state->IsGrey(object));
+ if (marking_state->IsBlack(object)) {
+ heap_->lo_space()->PromoteNewLargeObject(current);
+ current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ evacuation_job.AddItem(new EvacuationItem(current));
+ }
+ current = next_current;
}
+
if (evacuation_job.NumberOfItems() == 0) return;
RecordMigratedSlotVisitor record_visitor(this);
@@ -2613,9 +2961,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
- Object* RetainAs(Object* object) override {
+ Object RetainAs(Object object) override {
if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
+ HeapObject heap_object = HeapObject::cast(object);
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
@@ -2625,11 +2973,6 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
-// Return true if the given code is deoptimized or will be deoptimized.
-bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
- return code->is_optimized_code() && code->marked_for_deoptimization();
-}
-
void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
EvacuateRecordOnlyVisitor visitor(heap());
LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
@@ -2642,12 +2985,12 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
MarkingState* marking_state,
Visitor* visitor,
IterationMode iteration_mode,
- HeapObject** failed_object) {
+ HeapObject* failed_object) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"LiveObjectVisitor::VisitBlackObjects");
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
- HeapObject* const object = object_and_size.first;
+ HeapObject const object = object_and_size.first;
if (!visitor->Visit(object, object_and_size.second)) {
if (iteration_mode == kClearMarkbits) {
marking_state->bitmap(chunk)->ClearRange(
@@ -2671,13 +3014,22 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
IterationMode iteration_mode) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"LiveObjectVisitor::VisitBlackObjectsNoFail");
- for (auto object_and_size :
- LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
- HeapObject* const object = object_and_size.first;
+ DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
+ if (chunk->owner()->identity() == LO_SPACE) {
+ HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
DCHECK(marking_state->IsBlack(object));
- const bool success = visitor->Visit(object, object_and_size.second);
+ const bool success = visitor->Visit(object, object->Size());
USE(success);
DCHECK(success);
+ } else {
+ for (auto object_and_size :
+ LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
+ HeapObject const object = object_and_size.first;
+ DCHECK(marking_state->IsBlack(object));
+ const bool success = visitor->Visit(object, object_and_size.second);
+ USE(success);
+ DCHECK(success);
+ }
}
if (iteration_mode == kClearMarkbits) {
marking_state->ClearLiveness(chunk);
@@ -2693,7 +3045,7 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
"LiveObjectVisitor::VisitGreyObjectsNoFail");
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
- HeapObject* const object = object_and_size.first;
+ HeapObject const object = object_and_size.first;
DCHECK(marking_state->IsGrey(object));
const bool success = visitor->Visit(object, object_and_size.second);
USE(success);
@@ -2717,7 +3069,7 @@ void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
- base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
+ base::MutexGuard guard(heap()->relocation_mutex());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
@@ -2839,10 +3191,10 @@ class ToSpaceUpdatingItem : public UpdatingItem {
void ProcessVisitAll() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ToSpaceUpdatingItem::ProcessVisitAll");
- PointersUpdatingVisitor visitor(chunk_->heap());
+ PointersUpdatingVisitor visitor;
for (Address cur = start_; cur < end_;) {
- HeapObject* object = HeapObject::FromAddress(cur);
- Map* map = object->map();
+ HeapObject object = HeapObject::FromAddress(cur);
+ Map map = object->map();
int size = object->SizeFromMap(map);
object->IterateBodyFast(map, size, &visitor);
cur += size;
@@ -2854,7 +3206,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
"ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
- PointersUpdatingVisitor visitor(chunk_->heap());
+ PointersUpdatingVisitor visitor;
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
object_and_size.first->IterateBodyFast(&visitor);
@@ -2882,27 +3234,31 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void Process() override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"RememberedSetUpdatingItem::Process");
- base::LockGuard<base::Mutex> guard(chunk_->mutex());
+ base::MutexGuard guard(chunk_->mutex());
CodePageMemoryModificationScope memory_modification_scope(chunk_);
UpdateUntypedPointers();
UpdateTypedPointers();
}
private:
- inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
- MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
- HeapObject* heap_object;
- if (!(*slot)->GetHeapObject(&heap_object)) {
+ template <typename TSlot>
+ inline SlotCallbackResult CheckAndUpdateOldToNewSlot(TSlot slot) {
+ static_assert(
+ std::is_same<TSlot, FullMaybeObjectSlot>::value ||
+ std::is_same<TSlot, MaybeObjectSlot>::value,
+ "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
+ using THeapObjectSlot = typename TSlot::THeapObjectSlot;
+ HeapObject heap_object;
+ if (!(*slot).GetHeapObject(&heap_object)) {
return REMOVE_SLOT;
}
if (Heap::InFromSpace(heap_object)) {
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
- HeapObjectReference::Update(
- reinterpret_cast<HeapObjectReference**>(slot),
- map_word.ToForwardingAddress());
+ HeapObjectReference::Update(THeapObjectSlot(slot),
+ map_word.ToForwardingAddress());
}
- bool success = (*slot)->GetHeapObject(&heap_object);
+ bool success = (*slot).GetHeapObject(&heap_object);
USE(success);
DCHECK(success);
// If the object was in from space before and is after executing the
@@ -2918,7 +3274,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
// if the slot was already updated during old->old updating.
// In case the page has been moved, check markbits to determine liveness
// of the slot. In the other case, the slot can just be kept.
- if (Page::FromAddress(heap_object->address())
+ if (Page::FromHeapObject(heap_object)
->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// IsBlackOrGrey is required because objects are marked as grey for
// the young generation collector while they are black for the full
@@ -2940,7 +3296,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
- [this](Address slot) { return CheckAndUpdateOldToNewSlot(slot); },
+ [this](MaybeObjectSlot slot) {
+ return CheckAndUpdateOldToNewSlot(slot);
+ },
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
@@ -2948,10 +3306,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
InvalidatedSlotsFilter filter(chunk_);
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
- [&filter](Address slot) {
- if (!filter.IsValid(slot)) return REMOVE_SLOT;
- return UpdateSlot<AccessMode::NON_ATOMIC>(
- reinterpret_cast<MaybeObject**>(slot));
+ [&filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
@@ -2959,7 +3316,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
chunk_->invalidated_slots() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots()) {
- HeapObject* object = object_size.first;
+ HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object->SizeFromMap(object->map()), size);
}
@@ -2975,11 +3332,11 @@ class RememberedSetUpdatingItem : public UpdatingItem {
nullptr) {
CHECK_NE(chunk_->owner(), heap_->map_space());
const auto check_and_update_old_to_new_slot_fn =
- [this](MaybeObject** slot) {
- return CheckAndUpdateOldToNewSlot(reinterpret_cast<Address>(slot));
+ [this](FullMaybeObjectSlot slot) {
+ return CheckAndUpdateOldToNewSlot(slot);
};
RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_, [=](SlotType slot_type, Address host_addr, Address slot) {
+ chunk_, [=](SlotType slot_type, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
});
@@ -2989,12 +3346,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
nullptr)) {
CHECK_NE(chunk_->owner(), heap_->map_space());
RememberedSet<OLD_TO_OLD>::IterateTyped(
- chunk_, [this](SlotType slot_type, Address host_addr, Address slot) {
+ chunk_, [=](SlotType slot_type, Address slot) {
// Using UpdateStrongSlot is OK here, because there are no weak
// typed slots.
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot,
- UpdateStrongSlot<AccessMode::NON_ATOMIC>);
+ UpdateStrongSlot<AccessMode::NON_ATOMIC, FullMaybeObjectSlot>);
});
}
}
@@ -3017,30 +3374,6 @@ UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
-class GlobalHandlesUpdatingItem : public UpdatingItem {
- public:
- GlobalHandlesUpdatingItem(Heap* heap, GlobalHandles* global_handles,
- size_t start, size_t end)
- : heap_(heap),
- global_handles_(global_handles),
- start_(start),
- end_(end) {}
- ~GlobalHandlesUpdatingItem() override = default;
-
- void Process() override {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "GlobalHandlesUpdatingItem::Process");
- PointersUpdatingVisitor updating_visitor(heap_);
- global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
- }
-
- private:
- Heap* heap_;
- GlobalHandles* global_handles_;
- size_t start_;
- size_t end_;
-};
-
// Update array buffers on a page that has been evacuated by copying objects.
// Target page exclusivity in old space is guaranteed by the fact that
// evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
@@ -3160,7 +3493,7 @@ int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(heap());
+ PointersUpdatingVisitor updating_visitor;
{
TRACE_GC(heap()->tracer(),
@@ -3181,6 +3514,8 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
&updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
remembered_set_pages += CollectRememberedSetUpdatingItems(
&updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->code_lo_space(), RememberedSetUpdatingMode::ALL);
const int remembered_set_tasks =
remembered_set_pages == 0
? 0
@@ -3243,15 +3578,16 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
- HeapObject* failed_object, Page* page) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ HeapObject failed_object, MemoryChunk* chunk) {
+ base::MutexGuard guard(&mutex_);
- aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
+ aborted_evacuation_candidates_.push_back(
+ std::make_pair(failed_object, static_cast<Page*>(chunk)));
}
void MarkCompactCollector::PostProcessEvacuationCandidates() {
for (auto object_and_page : aborted_evacuation_candidates_) {
- HeapObject* failed_object = object_and_page.first;
+ HeapObject failed_object = object_and_page.first;
Page* page = object_and_page.second;
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
// Aborted compaction page. We have to record slots here, since we
@@ -3392,7 +3728,7 @@ void MarkCompactCollector::MarkingWorklist::PrintWorklist(
const char* worklist_name, ConcurrentMarkingWorklist* worklist) {
std::map<InstanceType, int> count;
int total_count = 0;
- worklist->IterateGlobalPool([&count, &total_count](HeapObject* obj) {
+ worklist->IterateGlobalPool([&count, &total_count](HeapObject obj) {
++total_count;
count[obj->map()->instance_type()]++;
});
@@ -3430,11 +3766,11 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
return marking_state_->bitmap(chunk);
}
- bool IsMarked(HeapObject* object) override {
+ bool IsMarked(HeapObject object) override {
return marking_state_->IsGrey(object);
}
- bool IsBlackOrGrey(HeapObject* object) override {
+ bool IsBlackOrGrey(HeapObject object) override {
return marking_state_->IsBlackOrGrey(object);
}
@@ -3443,31 +3779,43 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->new_space());
}
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- DCHECK(!HasWeakHeapObjectTag(*current));
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- if (!Heap::InNewSpace(object)) return;
- CHECK(IsMarked(object));
- }
- }
+ protected:
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ VerifyHeapObjectImpl(rinfo->target_object());
+ }
+ void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
+ VerifyPointersImpl(start, end);
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
- HeapObject* object;
+ private:
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
+ CHECK_IMPLIES(Heap::InNewSpace(heap_object), IsMarked(heap_object));
+ }
+
+ template <typename TSlot>
+ V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = *slot;
+ HeapObject heap_object;
// Minor MC treats weak references as strong.
- if ((*current)->GetHeapObject(&object)) {
- if (!Heap::InNewSpace(object)) {
- continue;
- }
- CHECK(IsMarked(object));
+ if (object.GetHeapObject(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
}
}
}
- private:
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
@@ -3485,41 +3833,42 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
}
protected:
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
- }
- }
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
+ CHECK_IMPLIES(Heap::InNewSpace(heap_object), Heap::InToSpace(heap_object));
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
- HeapObject* object;
- if ((*current)->GetHeapObject(&object)) {
- CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
+
+ template <typename TSlot>
+ void VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot current = start; current < end; ++current) {
+ typename TSlot::TObject object = *current;
+ HeapObject heap_object;
+ if (object.GetHeapObject(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
}
}
}
+
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ VerifyHeapObjectImpl(rinfo->target_object());
+ }
+ void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
};
#endif // VERIFY_HEAP
-template <class ParallelItem>
-void SeedGlobalHandles(Heap* heap, GlobalHandles* global_handles,
- ItemParallelJob* job) {
- // Create batches of global handles.
- const size_t kGlobalHandlesBufferSize = 1000;
- const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
- for (size_t start = 0; start < new_space_nodes;
- start += kGlobalHandlesBufferSize) {
- size_t end = start + kGlobalHandlesBufferSize;
- if (end > new_space_nodes) end = new_space_nodes;
- job->AddItem(new ParallelItem(heap, global_handles, start, end));
- }
-}
-
-bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
+bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
DCHECK_IMPLIES(Heap::InNewSpace(*p), Heap::InToSpace(*p));
return Heap::InNewSpace(*p) && !heap->minor_mark_compact_collector()
->non_atomic_marking_state()
@@ -3536,43 +3885,54 @@ class YoungGenerationMarkingVisitor final
MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
: worklist_(global_worklist, task_id), marking_state_(marking_state) {}
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) {
- VisitPointer(host, p);
- }
+ V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) final {
+ VisitPointersImpl(host, start, end);
}
- V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- for (MaybeObject** p = start; p < end; p++) {
- VisitPointer(host, p);
- }
+ V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ VisitPointersImpl(host, start, end);
}
- V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
- Object* target = *slot;
- DCHECK(!HasWeakHeapObjectTag(target));
- if (Heap::InNewSpace(target)) {
- HeapObject* target_object = HeapObject::cast(target);
- MarkObjectViaMarkingWorklist(target_object);
+ V8_INLINE void VisitPointer(HeapObject host, ObjectSlot slot) final {
+ VisitPointerImpl(host, slot);
+ }
+
+ V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot slot) final {
+ VisitPointerImpl(host, slot);
+ }
+
+ V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
+ // Code objects are not expected in new space.
+ UNREACHABLE();
+ }
+
+ V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
+ // Code objects are not expected in new space.
+ UNREACHABLE();
+ }
+
+ private:
+ template <typename TSlot>
+ V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
+ for (TSlot slot = start; slot < end; ++slot) {
+ VisitPointer(host, slot);
}
}
- V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** slot) final {
- MaybeObject* target = *slot;
+ template <typename TSlot>
+ V8_INLINE void VisitPointerImpl(HeapObject host, TSlot slot) {
+ typename TSlot::TObject target = *slot;
if (Heap::InNewSpace(target)) {
- HeapObject* target_object;
- // Treat weak references as strong. TODO(marja): Proper weakness handling
- // for minor-mcs.
- if (target->GetHeapObject(&target_object)) {
- MarkObjectViaMarkingWorklist(target_object);
- }
+ // Treat weak references as strong.
+ // TODO(marja): Proper weakness handling for minor-mcs.
+ HeapObject target_object = target.GetHeapObject();
+ MarkObjectViaMarkingWorklist(target_object);
}
}
- private:
- inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
+ inline void MarkObjectViaMarkingWorklist(HeapObject object) {
if (marking_state_->WhiteToGrey(object)) {
// Marking deque overflow is unsupported for the young generation.
CHECK(worklist_.Push(object));
@@ -3632,7 +3992,7 @@ class YoungGenerationMigrationObserver final : public MigrationObserver {
: MigrationObserver(heap),
mark_compact_collector_(mark_compact_collector) {}
- inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) final {
// Migrate color to old generation marking in case the object survived young
// generation garbage collection.
@@ -3655,22 +4015,22 @@ class YoungGenerationRecordMigratedSlotVisitor final
MarkCompactCollector* collector)
: RecordMigratedSlotVisitor(collector) {}
- void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
UNREACHABLE();
}
private:
// Only record slots for host objects that are considered as live by the full
// collector.
- inline bool IsLive(HeapObject* object) {
+ inline bool IsLive(HeapObject object) {
return collector_->non_atomic_marking_state()->IsBlack(object);
}
- inline void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
+ inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) final {
if (value->IsStrongOrWeak()) {
- Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+ Page* p = Page::FromAddress(value.ptr());
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
@@ -3688,14 +4048,12 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(heap());
+ PointersUpdatingVisitor updating_visitor;
ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
CollectNewSpaceArrayBufferTrackerItems(&updating_job);
// Create batches of global handles.
- SeedGlobalHandles<GlobalHandlesUpdatingItem>(
- heap(), isolate()->global_handles(), &updating_job);
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
int remembered_set_pages = 0;
remembered_set_pages += CollectRememberedSetUpdatingItems(
@@ -3710,6 +4068,9 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_pages += CollectRememberedSetUpdatingItems(
&updating_job, heap()->lo_space(),
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->code_lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
const int remembered_set_tasks =
remembered_set_pages == 0 ? 0
: NumberOfParallelPointerUpdateTasks(
@@ -3751,19 +4112,20 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
: collector_(collector) {}
- void VisitRootPointer(Root root, const char* description, Object** p) final {
+ void VisitRootPointer(Root root, const char* description,
+ FullObjectSlot p) final {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) {
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) final {
+ for (FullObjectSlot p = start; p < end; ++p) {
MarkObjectByPointer(p);
}
}
private:
- V8_INLINE void MarkObjectByPointer(Object** p) {
+ V8_INLINE void MarkObjectByPointer(FullObjectSlot p) {
if (!(*p)->IsHeapObject()) return;
collector_->MarkRootObject(HeapObject::cast(*p));
}
@@ -3808,7 +4170,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
if (FLAG_concurrent_marking) {
// Ensure that concurrent marker does not track pages that are
// going to be unmapped.
- heap()->concurrent_marking()->ClearLiveness(p);
+ heap()->concurrent_marking()->ClearMemoryChunkData(p);
}
}
}
@@ -3832,11 +4194,10 @@ void MinorMarkCompactCollector::MakeIterable(
// remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
- DCHECK_EQ(0, free_start % (32 * kPointerSize));
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
- HeapObject* const object = object_and_size.first;
+ HeapObject const object = object_and_size.first;
DCHECK(non_atomic_marking_state()->IsGrey(object));
Address free_end = object->address();
if (free_end != free_start) {
@@ -3851,7 +4212,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
- Map* map = object->synchronized_map();
+ Map map = object->synchronized_map();
int size = object->SizeFromMap(map);
free_start = free_end + size;
}
@@ -3885,15 +4246,15 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
: heap_(collector->heap()),
marking_state_(collector->non_atomic_marking_state()) {}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
DCHECK_EQ(static_cast<int>(root),
static_cast<int>(Root::kExternalStringsTable));
// Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
- Object* o = *p;
+ for (FullObjectSlot p = start; p < end; ++p) {
+ Object o = *p;
if (o->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(o);
+ HeapObject heap_object = HeapObject::cast(o);
if (marking_state_->IsWhite(heap_object)) {
if (o->IsExternalString()) {
heap_->FinalizeExternalString(String::cast(*p));
@@ -3902,7 +4263,7 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
DCHECK(o->IsThinString());
}
// Set the entry to the_hole_value (as deleted).
- *p = ReadOnlyRoots(heap_).the_hole_value();
+ p.store(ReadOnlyRoots(heap_).the_hole_value());
}
}
}
@@ -3921,8 +4282,8 @@ class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
MinorMarkCompactCollector* collector)
: marking_state_(collector->non_atomic_marking_state()) {}
- Object* RetainAs(Object* object) override {
- HeapObject* heap_object = HeapObject::cast(object);
+ Object RetainAs(Object object) override {
+ HeapObject heap_object = HeapObject::cast(object);
if (!Heap::InNewSpace(heap_object)) return object;
// Young generation marking only marks to grey instead of black.
@@ -3930,7 +4291,7 @@ class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
if (marking_state_->IsGrey(heap_object)) {
return object;
}
- return nullptr;
+ return Object();
}
private:
@@ -3989,7 +4350,6 @@ UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
}
class MarkingItem;
-class GlobalHandlesMarkingItem;
class PageMarkingItem;
class RootMarkingItem;
class YoungGenerationMarkingTask;
@@ -4036,9 +4396,9 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
}
};
- void MarkObject(Object* object) {
+ void MarkObject(Object object) {
if (!Heap::InNewSpace(object)) return;
- HeapObject* heap_object = HeapObject::cast(object);
+ HeapObject heap_object = HeapObject::cast(object);
if (marking_state_->WhiteToGrey(heap_object)) {
const int size = visitor_.Visit(heap_object);
IncrementLiveBytes(heap_object, size);
@@ -4047,7 +4407,7 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
private:
void EmptyLocalMarkingWorklist() {
- HeapObject* object = nullptr;
+ HeapObject object;
while (marking_worklist_.Pop(&object)) {
const int size = visitor_.Visit(object);
IncrementLiveBytes(object, size);
@@ -4055,16 +4415,15 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
}
void EmptyMarkingWorklist() {
- HeapObject* object = nullptr;
+ HeapObject object;
while (marking_worklist_.Pop(&object)) {
const int size = visitor_.Visit(object);
IncrementLiveBytes(object, size);
}
}
- void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
- local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
- bytes;
+ void IncrementLiveBytes(HeapObject object, intptr_t bytes) {
+ local_live_bytes_[Page::FromHeapObject(object)] += bytes;
}
void FlushLiveBytes() {
@@ -4089,7 +4448,7 @@ class PageMarkingItem : public MarkingItem {
void Process(YoungGenerationMarkingTask* task) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"PageMarkingItem::Process");
- base::LockGuard<base::Mutex> guard(chunk_->mutex());
+ base::MutexGuard guard(chunk_->mutex());
MarkUntypedPointers(task);
MarkTypedPointers(task);
}
@@ -4098,33 +4457,37 @@ class PageMarkingItem : public MarkingItem {
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk_,
+ [this, task](MaybeObjectSlot slot) {
+ return CheckAndMarkObject(task, slot);
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_,
- [this, task](SlotType slot_type, Address host_addr, Address slot) {
+ chunk_, [=](SlotType slot_type, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap(), slot_type, slot, [this, task](MaybeObject** slot) {
- return CheckAndMarkObject(task,
- reinterpret_cast<Address>(slot));
+ heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
+ return CheckAndMarkObject(task, slot);
});
});
}
- SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
- Address slot_address) {
- MaybeObject* object = *reinterpret_cast<MaybeObject**>(slot_address);
+ template <typename TSlot>
+ V8_INLINE SlotCallbackResult
+ CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
+ static_assert(
+ std::is_same<TSlot, FullMaybeObjectSlot>::value ||
+ std::is_same<TSlot, MaybeObjectSlot>::value,
+ "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
+ MaybeObject object = *slot;
if (Heap::InNewSpace(object)) {
// Marking happens before flipping the young generation, so the object
// has to be in ToSpace.
DCHECK(Heap::InToSpace(object));
- HeapObject* heap_object;
- bool success = object->GetHeapObject(&heap_object);
+ HeapObject heap_object;
+ bool success = object.GetHeapObject(&heap_object);
USE(success);
DCHECK(success);
task->MarkObject(heap_object);
@@ -4139,51 +4502,6 @@ class PageMarkingItem : public MarkingItem {
int slots_;
};
-class GlobalHandlesMarkingItem : public MarkingItem {
- public:
- GlobalHandlesMarkingItem(Heap* heap, GlobalHandles* global_handles,
- size_t start, size_t end)
- : global_handles_(global_handles), start_(start), end_(end) {}
- ~GlobalHandlesMarkingItem() override = default;
-
- void Process(YoungGenerationMarkingTask* task) override {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "GlobalHandlesMarkingItem::Process");
- GlobalHandlesRootMarkingVisitor visitor(task);
- global_handles_
- ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
- &visitor, start_, end_);
- }
-
- private:
- class GlobalHandlesRootMarkingVisitor : public RootVisitor {
- public:
- explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
- : task_(task) {}
-
- void VisitRootPointer(Root root, const char* description,
- Object** p) override {
- DCHECK_EQ(Root::kGlobalHandles, root);
- task_->MarkObject(*p);
- }
-
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- DCHECK_EQ(Root::kGlobalHandles, root);
- for (Object** p = start; p < end; p++) {
- task_->MarkObject(*p);
- }
- }
-
- private:
- YoungGenerationMarkingTask* task_;
- };
-
- GlobalHandles* global_handles_;
- size_t start_;
- size_t end_;
-};
-
void MinorMarkCompactCollector::MarkRootSetInParallel(
RootMarkingVisitor* root_visitor) {
std::atomic<int> slots;
@@ -4194,10 +4512,9 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
// Seed the root set (roots + old->new set).
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &JSObject::IsUnmodifiedApiObject);
heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
- // Create batches of global handles.
- SeedGlobalHandles<GlobalHandlesMarkingItem>(
- heap(), isolate()->global_handles(), &job);
// Create items for each page.
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap(), [&job, &slots](MemoryChunk* chunk) {
@@ -4254,7 +4571,7 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
void MinorMarkCompactCollector::ProcessMarkingWorklist() {
MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
- HeapObject* object = nullptr;
+ HeapObject object;
while (marking_worklist.Pop(&object)) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
@@ -4267,7 +4584,7 @@ void MinorMarkCompactCollector::ProcessMarkingWorklist() {
void MinorMarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
- base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
+ base::MutexGuard guard(heap()->relocation_mutex());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
@@ -4321,63 +4638,67 @@ class YoungGenerationEvacuator : public Evacuator {
}
protected:
- void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+ void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
MinorMarkCompactCollector* collector_;
};
-void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
+void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
intptr_t* live_bytes) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"YoungGenerationEvacuator::RawEvacuatePage");
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
- *live_bytes = marking_state->live_bytes(page);
- switch (ComputeEvacuationMode(page)) {
+ *live_bytes = marking_state->live_bytes(chunk);
+ switch (ComputeEvacuationMode(chunk)) {
case kObjectsNewToOld:
LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_space_visitor_,
+ chunk, marking_state, &new_space_visitor_,
LiveObjectVisitor::kClearMarkbits);
// ArrayBufferTracker will be updated during pointers updating.
break;
case kPageNewToOld:
LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_to_old_page_visitor_,
+ chunk, marking_state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_old_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
- // TODO(mlippautz): If cleaning array buffers is too slow here we can
- // delay it until the next GC.
- ArrayBufferTracker::FreeDead(page, marking_state);
- if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- ZAP_FREE_SPACE);
- } else if (heap()->incremental_marking()->IsMarking()) {
- // When incremental marking is on, we need to clear the mark bits of
- // the full collector. We cannot yet discard the young generation mark
- // bits as they are still relevant for pointers updating.
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- IGNORE_FREE_SPACE);
+ marking_state->live_bytes(chunk));
+ if (chunk->owner()->identity() != NEW_LO_SPACE) {
+ // TODO(mlippautz): If cleaning array buffers is too slow here we can
+ // delay it until the next GC.
+ ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
+ if (heap()->ShouldZapGarbage()) {
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
}
break;
case kPageNewToNew:
LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_to_new_page_visitor_,
+ chunk, marking_state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_new_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
+ marking_state->live_bytes(chunk));
+ DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
- ArrayBufferTracker::FreeDead(page, marking_state);
+ ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- ZAP_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- IGNORE_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
}
break;
case kObjectsOldToOld:
@@ -4404,7 +4725,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- evacuation_job.AddItem(new PageEvacuationItem(page));
+ evacuation_job.AddItem(new EvacuationItem(page));
}
if (evacuation_job.NumberOfItems() == 0) return;