diff options
Diffstat (limited to 'deps/v8/src/heap/mark-compact.cc')
-rw-r--r-- | deps/v8/src/heap/mark-compact.cc | 1657 |
1 files changed, 937 insertions, 720 deletions
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc index 78ee33f530..ae7b4676ae 100644 --- a/deps/v8/src/heap/mark-compact.cc +++ b/deps/v8/src/heap/mark-compact.cc @@ -25,7 +25,6 @@ #include "src/heap/spaces-inl.h" #include "src/ic/ic.h" #include "src/ic/stub-cache.h" -#include "src/profiler/cpu-profiler.h" #include "src/utils-inl.h" #include "src/v8.h" @@ -38,9 +37,8 @@ const char* Marking::kBlackBitPattern = "11"; const char* Marking::kGreyBitPattern = "10"; const char* Marking::kImpossibleBitPattern = "01"; - -// The following has to hold in order for {Marking::MarkBitFrom} to not produce -// invalid {kImpossibleBitPattern} in the marking bitmap by overlapping. +// The following has to hold in order for {ObjectMarking::MarkBitFrom} to not +// produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping. STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2); @@ -49,22 +47,22 @@ STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2); MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT + heap_(heap), + page_parallel_job_semaphore_(0), #ifdef DEBUG state_(IDLE), #endif marking_parity_(ODD_MARKING_PARITY), was_marked_incrementally_(false), evacuation_(false), - heap_(heap), + compacting_(false), + black_allocation_(false), + have_code_to_deoptimize_(false), marking_deque_memory_(NULL), marking_deque_memory_committed_(0), code_flusher_(nullptr), - have_code_to_deoptimize_(false), - compacting_(false), - sweeping_in_progress_(false), - pending_sweeper_tasks_semaphore_(0), - pending_compaction_tasks_semaphore_(0), - page_parallel_job_semaphore_(0) { + embedder_heap_tracer_(nullptr), + sweeper_(heap) { } #ifdef VERIFY_HEAP @@ -106,40 +104,40 @@ static void VerifyMarking(Heap* heap, Address bottom, Address top) { VerifyMarkingVisitor visitor(heap); HeapObject* object; Address next_object_must_be_here_or_later = bottom; - - for (Address current = bottom; current < top; current += kPointerSize) { + for (Address current = bottom; current < top;) { object = HeapObject::FromAddress(current); if (MarkCompactCollector::IsMarked(object)) { - CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); + CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object))); CHECK(current >= next_object_must_be_here_or_later); object->Iterate(&visitor); next_object_must_be_here_or_later = current + object->Size(); - // The next word for sure belongs to the current object, jump over it. + // The object is either part of a black area of black allocation or a + // regular black object + Page* page = Page::FromAddress(current); + CHECK( + page->markbits()->AllBitsSetInRange( + page->AddressToMarkbitIndex(current), + page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) || + page->markbits()->AllBitsClearInRange( + page->AddressToMarkbitIndex(current + kPointerSize * 2), + page->AddressToMarkbitIndex(next_object_must_be_here_or_later))); + current = next_object_must_be_here_or_later; + } else { current += kPointerSize; } } } -static void VerifyMarkingBlackPage(Heap* heap, Page* page) { - CHECK(page->IsFlagSet(Page::BLACK_PAGE)); - VerifyMarkingVisitor visitor(heap); - HeapObjectIterator it(page); - for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { - CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); - object->Iterate(&visitor); - } -} - static void VerifyMarking(NewSpace* space) { Address end = space->top(); - NewSpacePageIterator it(space->bottom(), end); // The bottom position is at the start of its page. Allows us to use // page->area_start() as start of range on all pages. - CHECK_EQ(space->bottom(), - NewSpacePage::FromAddress(space->bottom())->area_start()); - while (it.has_next()) { - NewSpacePage* page = it.next(); - Address limit = it.has_next() ? page->area_end() : end; + CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start()); + + NewSpacePageRange range(space->bottom(), end); + for (auto it = range.begin(); it != range.end();) { + Page* page = *(it++); + Address limit = it != range.end() ? page->area_end() : end; CHECK(limit == end || !page->Contains(end)); VerifyMarking(space->heap(), page->area_start(), limit); } @@ -147,15 +145,8 @@ static void VerifyMarking(NewSpace* space) { static void VerifyMarking(PagedSpace* space) { - PageIterator it(space); - - while (it.has_next()) { - Page* p = it.next(); - if (p->IsFlagSet(Page::BLACK_PAGE)) { - VerifyMarkingBlackPage(space->heap(), p); - } else { - VerifyMarking(space->heap(), p->area_start(), p->area_end()); - } + for (Page* p : *space) { + VerifyMarking(space->heap(), p->area_start(), p->area_end()); } } @@ -206,13 +197,12 @@ static void VerifyEvacuation(Page* page) { static void VerifyEvacuation(NewSpace* space) { - NewSpacePageIterator it(space->bottom(), space->top()); VerifyEvacuationVisitor visitor; - - while (it.has_next()) { - NewSpacePage* page = it.next(); + NewSpacePageRange range(space->bottom(), space->top()); + for (auto it = range.begin(); it != range.end();) { + Page* page = *(it++); Address current = page->area_start(); - Address limit = it.has_next() ? page->area_end() : space->top(); + Address limit = it != range.end() ? page->area_end() : space->top(); CHECK(limit == space->top() || !page->Contains(space->top())); while (current < limit) { HeapObject* object = HeapObject::FromAddress(current); @@ -227,10 +217,7 @@ static void VerifyEvacuation(Heap* heap, PagedSpace* space) { if (FLAG_use_allocation_folding && (space == heap->old_space())) { return; } - PageIterator it(space); - - while (it.has_next()) { - Page* p = it.next(); + for (Page* p : *space) { if (p->IsEvacuationCandidate()) continue; VerifyEvacuation(p); } @@ -334,7 +321,6 @@ void MarkCompactCollector::ClearInvalidRememberedSetSlots() { #endif } - void MarkCompactCollector::CollectGarbage() { // Make sure that Prepare() has been called. The individual steps below will // update the state as they proceed. @@ -346,6 +332,8 @@ void MarkCompactCollector::CollectGarbage() { ClearNonLiveReferences(); + RecordObjectStats(); + #ifdef VERIFY_HEAP if (FLAG_verify_heap) { VerifyMarking(heap_); @@ -362,10 +350,7 @@ void MarkCompactCollector::CollectGarbage() { #ifdef VERIFY_HEAP void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { - PageIterator it(space); - - while (it.has_next()) { - Page* p = it.next(); + for (Page* p : *space) { CHECK(p->markbits()->IsClean()); CHECK_EQ(0, p->LiveBytes()); } @@ -373,10 +358,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { - NewSpacePageIterator it(space->bottom(), space->top()); - - while (it.has_next()) { - NewSpacePage* p = it.next(); + for (Page* p : NewSpacePageRange(space->bottom(), space->top())) { CHECK(p->markbits()->IsClean()); CHECK_EQ(0, p->LiveBytes()); } @@ -391,7 +373,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() { LargeObjectIterator it(heap_->lo_space()); for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { - MarkBit mark_bit = Marking::MarkBitFrom(obj); + MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj); CHECK(Marking::IsWhite(mark_bit)); CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); } @@ -421,23 +403,15 @@ void MarkCompactCollector::VerifyOmittedMapChecks() { static void ClearMarkbitsInPagedSpace(PagedSpace* space) { - PageIterator it(space); - - while (it.has_next()) { - Page* p = it.next(); - Bitmap::Clear(p); - if (p->IsFlagSet(Page::BLACK_PAGE)) { - p->ClearFlag(Page::BLACK_PAGE); - } + for (Page* p : *space) { + p->ClearLiveness(); } } static void ClearMarkbitsInNewSpace(NewSpace* space) { - NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); - - while (it.has_next()) { - Bitmap::Clear(it.next()); + for (Page* page : *space) { + page->ClearLiveness(); } } @@ -450,62 +424,72 @@ void MarkCompactCollector::ClearMarkbits() { LargeObjectIterator it(heap_->lo_space()); for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { - Marking::MarkWhite(Marking::MarkBitFrom(obj)); + Marking::MarkWhite(ObjectMarking::MarkBitFrom(obj)); MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); chunk->ResetProgressBar(); chunk->ResetLiveBytes(); - if (chunk->IsFlagSet(Page::BLACK_PAGE)) { - chunk->ClearFlag(Page::BLACK_PAGE); - } } } - -class MarkCompactCollector::SweeperTask : public v8::Task { +class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task { public: - SweeperTask(Heap* heap, AllocationSpace space_to_start) - : heap_(heap), space_to_start_(space_to_start) {} + SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks, + AllocationSpace space_to_start) + : sweeper_(sweeper), + pending_sweeper_tasks_(pending_sweeper_tasks), + space_to_start_(space_to_start) {} virtual ~SweeperTask() {} private: // v8::Task overrides. void Run() override { - DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); + DCHECK_GE(space_to_start_, FIRST_SPACE); DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); - const int offset = space_to_start_ - FIRST_PAGED_SPACE; - const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; + const int offset = space_to_start_ - FIRST_SPACE; + const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1; for (int i = 0; i < num_spaces; i++) { - const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); - DCHECK_GE(space_id, FIRST_PAGED_SPACE); + const int space_id = FIRST_SPACE + ((i + offset) % num_spaces); + DCHECK_GE(space_id, FIRST_SPACE); DCHECK_LE(space_id, LAST_PAGED_SPACE); - heap_->mark_compact_collector()->SweepInParallel( - heap_->paged_space(space_id), 0); + sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); } - heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal(); + pending_sweeper_tasks_->Signal(); } - Heap* heap_; + Sweeper* sweeper_; + base::Semaphore* pending_sweeper_tasks_; AllocationSpace space_to_start_; DISALLOW_COPY_AND_ASSIGN(SweeperTask); }; +void MarkCompactCollector::Sweeper::StartSweeping() { + sweeping_in_progress_ = true; + ForAllSweepingSpaces([this](AllocationSpace space) { + std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(), + [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); + }); + if (FLAG_concurrent_sweeping) { + ForAllSweepingSpaces([this](AllocationSpace space) { + if (space == NEW_SPACE) return; + StartSweepingHelper(space); + }); + } +} -void MarkCompactCollector::StartSweeperThreads() { - V8::GetCurrentPlatform()->CallOnBackgroundThread( - new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask); - V8::GetCurrentPlatform()->CallOnBackgroundThread( - new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask); +void MarkCompactCollector::Sweeper::StartSweepingHelper( + AllocationSpace space_to_start) { + num_sweeping_tasks_.Increment(1); V8::GetCurrentPlatform()->CallOnBackgroundThread( - new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask); + new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), + v8::Platform::kShortRunningTask); } - -void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) { - PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); +void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( + Page* page) { if (!page->SweepingDone()) { - SweepInParallel(page, owner); + ParallelSweepPage(page, page->owner()->identity()); if (!page->SweepingDone()) { // We were not able to sweep that page, i.e., a concurrent // sweeper thread currently owns this page. Wait for the sweeper @@ -515,34 +499,62 @@ void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) { } } - void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { - if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) { - SweepInParallel(heap()->paged_space(space->identity()), 0); + if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) { + sweeper().ParallelSweepSpace(space->identity(), 0); space->RefillFreeList(); } } +Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) { + base::LockGuard<base::Mutex> guard(&mutex_); + SweptList& list = swept_list_[space->identity()]; + if (list.length() > 0) { + return list.RemoveLast(); + } + return nullptr; +} -void MarkCompactCollector::EnsureSweepingCompleted() { - DCHECK(sweeping_in_progress_ == true); +void MarkCompactCollector::Sweeper::EnsureCompleted() { + if (!sweeping_in_progress_) return; // If sweeping is not completed or not running at all, we try to complete it // here. if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { - SweepInParallel(heap()->paged_space(OLD_SPACE), 0); - SweepInParallel(heap()->paged_space(CODE_SPACE), 0); - SweepInParallel(heap()->paged_space(MAP_SPACE), 0); + ForAllSweepingSpaces( + [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); } if (FLAG_concurrent_sweeping) { - pending_sweeper_tasks_semaphore_.Wait(); - pending_sweeper_tasks_semaphore_.Wait(); - pending_sweeper_tasks_semaphore_.Wait(); + while (num_sweeping_tasks_.Value() > 0) { + pending_sweeper_tasks_semaphore_.Wait(); + num_sweeping_tasks_.Increment(-1); + } } - ParallelSweepSpacesComplete(); + ForAllSweepingSpaces([this](AllocationSpace space) { + if (space == NEW_SPACE) { + swept_list_[NEW_SPACE].Clear(); + } + DCHECK(sweeping_list_[space].empty()); + }); + late_pages_ = false; sweeping_in_progress_ = false; +} + +void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { + if (!sweeping_in_progress_) return; + if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { + for (Page* p : *heap_->new_space()) { + SweepOrWaitUntilSweepingCompleted(p); + } + } +} + +void MarkCompactCollector::EnsureSweepingCompleted() { + if (!sweeper().sweeping_in_progress()) return; + + sweeper().EnsureCompleted(); heap()->old_space()->RefillFreeList(); heap()->code_space()->RefillFreeList(); heap()->map_space()->RefillFreeList(); @@ -554,56 +566,14 @@ void MarkCompactCollector::EnsureSweepingCompleted() { #endif } - -bool MarkCompactCollector::IsSweepingCompleted() { - if (!pending_sweeper_tasks_semaphore_.WaitFor( - base::TimeDelta::FromSeconds(0))) { - return false; - } - pending_sweeper_tasks_semaphore_.Signal(); - return true; -} - - -void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { - // This is only used when resizing an object. - DCHECK(MemoryChunk::FromAddress(old_start) == - MemoryChunk::FromAddress(new_start)); - - if (!heap->incremental_marking()->IsMarking() || - Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE)) - return; - - // If the mark doesn't move, we don't check the color of the object. - // It doesn't matter whether the object is black, since it hasn't changed - // size, so the adjustment to the live data count will be zero anyway. - if (old_start == new_start) return; - - MarkBit new_mark_bit = MarkBitFrom(new_start); - MarkBit old_mark_bit = MarkBitFrom(old_start); - -#ifdef DEBUG - ObjectColor old_color = Color(old_mark_bit); -#endif - - if (Marking::IsBlack(old_mark_bit)) { - Marking::BlackToWhite(old_mark_bit); - Marking::MarkBlack(new_mark_bit); - return; - } else if (Marking::IsGrey(old_mark_bit)) { - Marking::GreyToWhite(old_mark_bit); - heap->incremental_marking()->WhiteToGreyAndPush( - HeapObject::FromAddress(new_start), new_mark_bit); - heap->incremental_marking()->RestartIfNotMarking(); +bool MarkCompactCollector::Sweeper::IsSweepingCompleted() { + while (pending_sweeper_tasks_semaphore_.WaitFor( + base::TimeDelta::FromSeconds(0))) { + num_sweeping_tasks_.Increment(-1); } - -#ifdef DEBUG - ObjectColor new_color = Color(new_mark_bit); - DCHECK(new_color == old_color); -#endif + return num_sweeping_tasks_.Value() == 0; } - const char* AllocationSpaceName(AllocationSpace space) { switch (space) { case NEW_SPACE: @@ -627,9 +597,12 @@ const char* AllocationSpaceName(AllocationSpace space) { void MarkCompactCollector::ComputeEvacuationHeuristics( int area_size, int* target_fragmentation_percent, int* max_evacuated_bytes) { - // For memory reducing mode we directly define both constants. + // For memory reducing and optimize for memory mode we directly define both + // constants. const int kTargetFragmentationPercentForReduceMemory = 20; const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize; + const int kTargetFragmentationPercentForOptimizeMemory = 20; + const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB; // For regular mode (which is latency critical) we define less aggressive // defaults to start and switch to a trace-based (using compaction speed) @@ -643,6 +616,10 @@ void MarkCompactCollector::ComputeEvacuationHeuristics( if (heap()->ShouldReduceMemory()) { *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory; *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory; + } else if (heap()->ShouldOptimizeForMemoryUsage()) { + *target_fragmentation_percent = + kTargetFragmentationPercentForOptimizeMemory; + *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory; } else { const double estimated_compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); @@ -677,11 +654,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { std::vector<LiveBytesPagePair> pages; pages.reserve(number_of_pages); - PageIterator it(space); - while (it.has_next()) { - Page* p = it.next(); + for (Page* p : *space) { if (p->NeverEvacuate()) continue; - if (p->IsFlagSet(Page::BLACK_PAGE)) continue; // Invariant: Evacuation candidates are just created when marking is // started. This means that sweeping has finished. Furthermore, at the end // of a GC all evacuation candidates are cleared and their slot buffers are @@ -760,8 +734,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { if (FLAG_trace_fragmentation_verbose) { PrintIsolate(isolate(), "compaction-selection-page: space=%s free_bytes_page=%d " - "fragmentation_limit_kb=%d fragmentation_limit_percent=%d " - "sum_compaction_kb=%d " + "fragmentation_limit_kb=%" V8PRIdPTR + " fragmentation_limit_percent=%d sum_compaction_kb=%d " "compaction_limit_kb=%d\n", AllocationSpaceName(space->identity()), free_bytes / KB, free_bytes_threshold / KB, target_fragmentation_percent, @@ -822,7 +796,7 @@ void MarkCompactCollector::Prepare() { // If concurrent unmapping tasks are still running, we should wait for // them here. - heap()->WaitUntilUnmappingOfFreeChunksCompleted(); + heap()->memory_allocator()->unmapper()->WaitUntilCompleted(); // Clear marking bits if incremental marking is aborted. if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) { @@ -832,9 +806,23 @@ void MarkCompactCollector::Prepare() { AbortWeakCells(); AbortTransitionArrays(); AbortCompaction(); + if (heap_->UsingEmbedderHeapTracer()) { + heap_->mark_compact_collector()->embedder_heap_tracer()->AbortTracing(); + } was_marked_incrementally_ = false; } + if (!was_marked_incrementally_) { + if (heap_->UsingEmbedderHeapTracer()) { + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE); + heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue(); + } + } + + if (UsingEmbedderHeapTracer()) { + embedder_heap_tracer()->EnterFinalPause(); + } + // Don't start compaction if we are in the middle of incremental // marking cycle. We did not collect any slots. if (!FLAG_never_compact && !was_marked_incrementally_) { @@ -846,6 +834,7 @@ void MarkCompactCollector::Prepare() { space = spaces.next()) { space->PrepareForMarkCompact(); } + heap()->account_external_memory_concurrently_freed(); #ifdef VERIFY_HEAP if (!was_marked_incrementally_ && FLAG_verify_heap) { @@ -858,6 +847,12 @@ void MarkCompactCollector::Prepare() { void MarkCompactCollector::Finish() { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH); + if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) { + // If we added some more pages during MC, we need to start at least one + // more task as all other tasks might already be finished. + sweeper().StartSweepingHelper(OLD_SPACE); + } + // The hashing of weak_object_to_code_table is no longer valid. heap()->weak_object_to_code_table()->Rehash( heap()->isolate()->factory()->undefined_value()); @@ -871,11 +866,12 @@ void MarkCompactCollector::Finish() { #endif heap_->isolate()->inner_pointer_to_code_cache()->Flush(); - // The stub cache is not traversed during GC; clear the cache to - // force lazy re-initialization of it. This must be done after the + // The stub caches are not traversed during GC; clear them to force + // their lazy re-initialization. This must be done after the // GC, because it relies on the new address of certain old space // objects (empty string, illegal builtin). - isolate()->stub_cache()->Clear(); + isolate()->load_stub_cache()->Clear(); + isolate()->store_stub_cache()->Clear(); if (have_code_to_deoptimize_) { // Some code objects were marked for deoptimization during the GC. @@ -935,7 +931,7 @@ void CodeFlusher::ProcessJSFunctionCandidates() { SharedFunctionInfo* shared = candidate->shared(); Code* code = shared->code(); - MarkBit code_mark = Marking::MarkBitFrom(code); + MarkBit code_mark = ObjectMarking::MarkBitFrom(code); if (Marking::IsWhite(code_mark)) { if (FLAG_trace_code_flushing && shared->is_compiled()) { PrintF("[code-flushing clears: "); @@ -982,7 +978,7 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() { ClearNextCandidate(candidate); Code* code = candidate->code(); - MarkBit code_mark = Marking::MarkBitFrom(code); + MarkBit code_mark = ObjectMarking::MarkBitFrom(code); if (Marking::IsWhite(code_mark)) { if (FLAG_trace_code_flushing && candidate->is_compiled()) { PrintF("[code-flushing clears: "); @@ -1042,7 +1038,7 @@ void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) { void CodeFlusher::EvictCandidate(JSFunction* function) { - DCHECK(!function->next_function_link()->IsUndefined()); + DCHECK(!function->next_function_link()->IsUndefined(isolate_)); Object* undefined = isolate_->heap()->undefined_value(); // Make sure previous flushing decisions are revisited. @@ -1119,14 +1115,14 @@ class MarkCompactMarkingVisitor // Marks the object black and pushes it on the marking stack. INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { - MarkBit mark = Marking::MarkBitFrom(object); + MarkBit mark = ObjectMarking::MarkBitFrom(object); heap->mark_compact_collector()->MarkObject(object, mark); } // Marks the object black without pushing it on the marking stack. // Returns true if object needed marking and false otherwise. INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) { - MarkBit mark_bit = Marking::MarkBitFrom(object); + MarkBit mark_bit = ObjectMarking::MarkBitFrom(object); if (Marking::IsWhite(mark_bit)) { heap->mark_compact_collector()->SetMark(object, mark_bit); return true; @@ -1140,7 +1136,7 @@ class MarkCompactMarkingVisitor if (!(*p)->IsHeapObject()) return; HeapObject* target_object = HeapObject::cast(*p); collector->RecordSlot(object, p, target_object); - MarkBit mark = Marking::MarkBitFrom(target_object); + MarkBit mark = ObjectMarking::MarkBitFrom(target_object); collector->MarkObject(target_object, mark); } @@ -1154,10 +1150,10 @@ class MarkCompactMarkingVisitor #endif Map* map = obj->map(); Heap* heap = obj->GetHeap(); - MarkBit mark = Marking::MarkBitFrom(obj); + MarkBit mark = ObjectMarking::MarkBitFrom(obj); heap->mark_compact_collector()->SetMark(obj, mark); // Mark the map pointer and the body. - MarkBit map_mark = Marking::MarkBitFrom(map); + MarkBit map_mark = ObjectMarking::MarkBitFrom(map); heap->mark_compact_collector()->MarkObject(map, map_mark); IterateBody(map, obj); } @@ -1177,7 +1173,7 @@ class MarkCompactMarkingVisitor if (!o->IsHeapObject()) continue; collector->RecordSlot(object, p, o); HeapObject* obj = HeapObject::cast(o); - MarkBit mark = Marking::MarkBitFrom(obj); + MarkBit mark = ObjectMarking::MarkBitFrom(obj); if (Marking::IsBlackOrGrey(mark)) continue; VisitUnmarkedObject(collector, obj); } @@ -1211,7 +1207,7 @@ class MarkCompactMarkingVisitor // was marked through the compilation cache before marker reached JSRegExp // object. FixedArray* data = FixedArray::cast(re->data()); - if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(data))) { + if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(data))) { Object** slot = data->data_start() + JSRegExp::saved_code_index(is_one_byte); heap->mark_compact_collector()->RecordSlot(data, slot, code); @@ -1265,10 +1261,6 @@ void MarkCompactMarkingVisitor::Initialize() { StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize(); table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode); - - if (FLAG_track_gc_object_stats) { - ObjectStatsVisitor::Initialize(&table_); - } } @@ -1299,8 +1291,8 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { Object* obj = *slot; if (obj->IsSharedFunctionInfo()) { SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); - MarkBit shared_mark = Marking::MarkBitFrom(shared); - MarkBit code_mark = Marking::MarkBitFrom(shared->code()); + MarkBit shared_mark = ObjectMarking::MarkBitFrom(shared); + MarkBit code_mark = ObjectMarking::MarkBitFrom(shared->code()); collector_->MarkObject(shared->code(), code_mark); collector_->MarkObject(shared, shared_mark); } @@ -1320,11 +1312,11 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, // actual optimized code object. StackFrame* frame = it.frame(); Code* code = frame->unchecked_code(); - MarkBit code_mark = Marking::MarkBitFrom(code); + MarkBit code_mark = ObjectMarking::MarkBitFrom(code); MarkObject(code, code_mark); if (frame->is_optimized()) { Code* optimized_code = frame->LookupCode(); - MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code); + MarkBit optimized_code_mark = ObjectMarking::MarkBitFrom(optimized_code); MarkObject(optimized_code, optimized_code_mark); } } @@ -1374,9 +1366,9 @@ class RootMarkingVisitor : public ObjectVisitor { void MarkObjectByPointer(Object** p) { if (!(*p)->IsHeapObject()) return; - // Replace flat cons strings in place. HeapObject* object = HeapObject::cast(*p); - MarkBit mark_bit = Marking::MarkBitFrom(object); + + MarkBit mark_bit = ObjectMarking::MarkBitFrom(object); if (Marking::IsBlackOrGrey(mark_bit)) return; Map* map = object->map(); @@ -1384,7 +1376,7 @@ class RootMarkingVisitor : public ObjectVisitor { collector_->SetMark(object, mark_bit); // Mark the map pointer and body, and push them on the marking stack. - MarkBit map_mark = Marking::MarkBitFrom(map); + MarkBit map_mark = ObjectMarking::MarkBitFrom(map); collector_->MarkObject(map, map_mark); MarkCompactMarkingVisitor::IterateBody(map, object); @@ -1412,7 +1404,7 @@ class StringTableCleaner : public ObjectVisitor { for (Object** p = start; p < end; p++) { Object* o = *p; if (o->IsHeapObject()) { - if (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) { + if (Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(o)))) { if (finalize_external_strings) { DCHECK(o->IsExternalString()); heap_->FinalizeExternalString(String::cast(*p)); @@ -1449,7 +1441,7 @@ typedef StringTableCleaner<true, false> ExternalStringTableCleaner; class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { public: virtual Object* RetainAs(Object* object) { - MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object)); + MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(object)); DCHECK(!Marking::IsGrey(mark_bit)); if (Marking::IsBlack(mark_bit)) { return object; @@ -1479,7 +1471,7 @@ void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) { Map* filler_map = heap()->one_pointer_filler_map(); for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) { - MarkBit markbit = Marking::MarkBitFrom(object); + MarkBit markbit = ObjectMarking::MarkBitFrom(object); if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { Marking::GreyToBlack(markbit); PushBlack(object); @@ -1493,7 +1485,7 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) { LiveObjectIterator<kGreyObjects> it(p); HeapObject* object = NULL; while ((object = it.Next()) != NULL) { - MarkBit markbit = Marking::MarkBitFrom(object); + MarkBit markbit = ObjectMarking::MarkBitFrom(object); DCHECK(Marking::IsGrey(markbit)); Marking::GreyToBlack(markbit); PushBlack(object); @@ -1503,6 +1495,9 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) { class RecordMigratedSlotVisitor final : public ObjectVisitor { public: + explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector) + : collector_(collector) {} + inline void VisitPointer(Object** p) final { RecordMigratedSlot(*p, reinterpret_cast<Address>(p)); } @@ -1518,10 +1513,69 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { Address code_entry = Memory::Address_at(code_entry_slot); if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), - CODE_ENTRY_SLOT, code_entry_slot); + nullptr, CODE_ENTRY_SLOT, + code_entry_slot); } } + inline void VisitCodeTarget(RelocInfo* rinfo) final { + DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); + Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); + Code* host = rinfo->host(); + // The target is always in old space, we don't have to record the slot in + // the old-to-new remembered set. + DCHECK(!collector_->heap()->InNewSpace(target)); + collector_->RecordRelocSlot(host, rinfo, target); + } + + inline void VisitDebugTarget(RelocInfo* rinfo) final { + DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && + rinfo->IsPatchedDebugBreakSlotSequence()); + Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address()); + Code* host = rinfo->host(); + // The target is always in old space, we don't have to record the slot in + // the old-to-new remembered set. + DCHECK(!collector_->heap()->InNewSpace(target)); + collector_->RecordRelocSlot(host, rinfo, target); + } + + inline void VisitEmbeddedPointer(RelocInfo* rinfo) final { + DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); + HeapObject* object = HeapObject::cast(rinfo->target_object()); + Code* host = rinfo->host(); + collector_->heap()->RecordWriteIntoCode(host, rinfo, object); + collector_->RecordRelocSlot(host, rinfo, object); + } + + inline void VisitCell(RelocInfo* rinfo) final { + DCHECK(rinfo->rmode() == RelocInfo::CELL); + Cell* cell = rinfo->target_cell(); + Code* host = rinfo->host(); + // The cell is always in old space, we don't have to record the slot in + // the old-to-new remembered set. + DCHECK(!collector_->heap()->InNewSpace(cell)); + collector_->RecordRelocSlot(host, rinfo, cell); + } + + // Entries that will never move. + inline void VisitCodeAgeSequence(RelocInfo* rinfo) final { + DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); + Code* stub = rinfo->code_age_stub(); + USE(stub); + DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); + } + + // Entries that are skipped for recording. + inline void VisitExternalReference(RelocInfo* rinfo) final {} + inline void VisitExternalReference(Address* p) final {} + inline void VisitRuntimeEntry(RelocInfo* rinfo) final {} + inline void VisitExternalOneByteString( + v8::String::ExternalOneByteStringResource** resource) final {} + inline void VisitExternalTwoByteString( + v8::String::ExternalStringResource** resource) final {} + inline void VisitInternalReference(RelocInfo* rinfo) final {} + inline void VisitEmbedderReference(Object** p, uint16_t class_id) final {} + private: inline void RecordMigratedSlot(Object* value, Address slot) { if (value->IsHeapObject()) { @@ -1533,6 +1587,8 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { } } } + + MarkCompactCollector* collector_; }; class MarkCompactCollector::HeapObjectVisitor { @@ -1550,12 +1606,15 @@ class MarkCompactCollector::EvacuateVisitorBase : heap_(heap), compaction_spaces_(compaction_spaces), profiling_( - heap->isolate()->cpu_profiler()->is_profiling() || + heap->isolate()->is_profiling() || heap->isolate()->logger()->is_logging_code_events() || heap->isolate()->heap_profiler()->is_tracking_object_moves()) {} inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, HeapObject** target_object) { +#ifdef VERIFY_HEAP + if (AbortCompactionForTesting(object)) return false; +#endif // VERIFY_HEAP int size = object->Size(); AllocationAlignment alignment = object->RequiredAlignment(); AllocationResult allocation = target_space->AllocateRaw(size, alignment); @@ -1590,7 +1649,7 @@ class MarkCompactCollector::EvacuateVisitorBase PROFILE(heap_->isolate(), CodeMoveEvent(AbstractCode::cast(src), dst_addr)); } - RecordMigratedSlotVisitor visitor; + RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor); } else if (dest == CODE_SPACE) { DCHECK_CODEOBJECT_SIZE(size, heap_->code_space()); @@ -1599,9 +1658,9 @@ class MarkCompactCollector::EvacuateVisitorBase CodeMoveEvent(AbstractCode::cast(src), dst_addr)); } heap_->CopyBlock(dst_addr, src_addr, size); - RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(dst_addr), - RELOCATED_CODE_OBJECT, dst_addr); Code::cast(dst)->Relocate(dst_addr - src_addr); + RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); + dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor); } else { DCHECK_OBJECT_SIZE(size); DCHECK(dest == NEW_SPACE); @@ -1613,6 +1672,26 @@ class MarkCompactCollector::EvacuateVisitorBase Memory::Address_at(src_addr) = dst_addr; } +#ifdef VERIFY_HEAP + bool AbortCompactionForTesting(HeapObject* object) { + if (FLAG_stress_compaction) { + const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) & + Page::kPageAlignmentMask & ~kPointerAlignmentMask; + if ((reinterpret_cast<uintptr_t>(object->address()) & + Page::kPageAlignmentMask) == mask) { + Page* page = Page::FromAddress(object->address()); + if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) { + page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING); + } else { + page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING); + return true; + } + } + } + return false; + } +#endif // VERIFY_HEAP + Heap* heap_; CompactionSpaceCollection* compaction_spaces_; bool profiling_; @@ -1626,7 +1705,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final explicit EvacuateNewSpaceVisitor(Heap* heap, CompactionSpaceCollection* compaction_spaces, - HashMap* local_pretenuring_feedback) + base::HashMap* local_pretenuring_feedback) : EvacuateVisitorBase(heap, compaction_spaces), buffer_(LocalAllocationBuffer::InvalidBuffer()), space_to_allocate_(NEW_SPACE), @@ -1634,28 +1713,20 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final semispace_copied_size_(0), local_pretenuring_feedback_(local_pretenuring_feedback) {} - bool Visit(HeapObject* object) override { + inline bool Visit(HeapObject* object) override { heap_->UpdateAllocationSite<Heap::kCached>(object, local_pretenuring_feedback_); int size = object->Size(); HeapObject* target_object = nullptr; - if (heap_->ShouldBePromoted(object->address(), size) && + if (heap_->ShouldBePromoted<DEFAULT_PROMOTION>(object->address(), size) && TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, &target_object)) { - // If we end up needing more special cases, we should factor this out. - if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { - heap_->array_buffer_tracker()->Promote( - JSArrayBuffer::cast(target_object)); - } promoted_size_ += size; return true; } HeapObject* target = nullptr; AllocationSpace space = AllocateTargetObject(object, &target); MigrateObject(HeapObject::cast(target), object, size, space); - if (V8_UNLIKELY(target->IsJSArrayBuffer())) { - heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); - } semispace_copied_size_ += size; return true; } @@ -1674,6 +1745,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final const int size = old_object->Size(); AllocationAlignment alignment = old_object->RequiredAlignment(); AllocationResult allocation; + AllocationSpace space_allocated_in = space_to_allocate_; if (space_to_allocate_ == NEW_SPACE) { if (size > kMaxLabObjectSize) { allocation = @@ -1684,11 +1756,12 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final } if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) { allocation = AllocateInOldSpace(size, alignment); + space_allocated_in = OLD_SPACE; } bool ok = allocation.To(target_object); DCHECK(ok); USE(ok); - return space_to_allocate_; + return space_allocated_in; } inline bool NewLocalAllocationBuffer() { @@ -1763,9 +1836,45 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final AllocationSpace space_to_allocate_; intptr_t promoted_size_; intptr_t semispace_copied_size_; - HashMap* local_pretenuring_feedback_; + base::HashMap* local_pretenuring_feedback_; }; +class MarkCompactCollector::EvacuateNewSpacePageVisitor final + : public MarkCompactCollector::HeapObjectVisitor { + public: + explicit EvacuateNewSpacePageVisitor(Heap* heap) + : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {} + + static void MoveToOldSpace(Page* page, PagedSpace* owner) { + page->Unlink(); + Page* new_page = Page::ConvertNewToOld(page, owner); + new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); + } + + static void MoveToToSpace(Page* page) { + page->heap()->new_space()->MovePageFromSpaceToSpace(page); + page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION); + } + + inline bool Visit(HeapObject* object) { + RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); + object->IterateBodyFast(&visitor); + promoted_size_ += object->Size(); + return true; + } + + intptr_t promoted_size() { return promoted_size_; } + intptr_t semispace_copied_size() { return semispace_copied_size_; } + + void account_semispace_copied(intptr_t copied) { + semispace_copied_size_ += copied; + } + + private: + Heap* heap_; + intptr_t promoted_size_; + intptr_t semispace_copied_size_; +}; class MarkCompactCollector::EvacuateOldSpaceVisitor final : public MarkCompactCollector::EvacuateVisitorBase { @@ -1774,7 +1883,7 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final CompactionSpaceCollection* compaction_spaces) : EvacuateVisitorBase(heap, compaction_spaces) {} - bool Visit(HeapObject* object) override { + inline bool Visit(HeapObject* object) override { CompactionSpace* target_space = compaction_spaces_->Get( Page::FromAddress(object->address())->owner()->identity()); HeapObject* target_object = nullptr; @@ -1786,14 +1895,24 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final } }; +class MarkCompactCollector::EvacuateRecordOnlyVisitor final + : public MarkCompactCollector::HeapObjectVisitor { + public: + explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {} + + inline bool Visit(HeapObject* object) { + RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); + object->IterateBody(&visitor); + return true; + } + + private: + Heap* heap_; +}; void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { - PageIterator it(space); - while (it.has_next()) { - Page* p = it.next(); - if (!p->IsFlagSet(Page::BLACK_PAGE)) { - DiscoverGreyObjectsOnPage(p); - } + for (Page* p : *space) { + DiscoverGreyObjectsOnPage(p); if (marking_deque()->IsFull()) return; } } @@ -1801,9 +1920,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() { NewSpace* space = heap()->new_space(); - NewSpacePageIterator it(space->bottom(), space->top()); - while (it.has_next()) { - NewSpacePage* page = it.next(); + for (Page* page : NewSpacePageRange(space->bottom(), space->top())) { DiscoverGreyObjectsOnPage(page); if (marking_deque()->IsFull()) return; } @@ -1814,7 +1931,7 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { Object* o = *p; if (!o->IsHeapObject()) return false; HeapObject* heap_object = HeapObject::cast(o); - MarkBit mark = Marking::MarkBitFrom(heap_object); + MarkBit mark = ObjectMarking::MarkBitFrom(heap_object); return Marking::IsWhite(mark); } @@ -1824,7 +1941,7 @@ bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap, Object* o = *p; DCHECK(o->IsHeapObject()); HeapObject* heap_object = HeapObject::cast(o); - MarkBit mark = Marking::MarkBitFrom(heap_object); + MarkBit mark = ObjectMarking::MarkBitFrom(heap_object); return Marking::IsWhite(mark); } @@ -1832,7 +1949,7 @@ bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap, void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) { StringTable* string_table = heap()->string_table(); // Mark the string table itself. - MarkBit string_table_mark = Marking::MarkBitFrom(string_table); + MarkBit string_table_mark = ObjectMarking::MarkBitFrom(string_table); if (Marking::IsWhite(string_table_mark)) { // String table could have already been marked by visiting the handles list. SetMark(string_table, string_table_mark); @@ -1844,7 +1961,7 @@ void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) { void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) { - MarkBit mark_bit = Marking::MarkBitFrom(site); + MarkBit mark_bit = ObjectMarking::MarkBitFrom(site); SetMark(site, mark_bit); } @@ -1901,19 +2018,16 @@ void MarkCompactCollector::MarkImplicitRefGroups( // After: the marking stack is empty, and all objects reachable from the // marking stack have been marked, or are overflowed in the heap. void MarkCompactCollector::EmptyMarkingDeque() { - Map* filler_map = heap_->one_pointer_filler_map(); while (!marking_deque_.IsEmpty()) { HeapObject* object = marking_deque_.Pop(); - // Explicitly skip one word fillers. Incremental markbit patterns are - // correct only for objects that occupy at least two words. - Map* map = object->map(); - if (map == filler_map) continue; + DCHECK(!object->IsFiller()); DCHECK(object->IsHeapObject()); DCHECK(heap()->Contains(object)); - DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object))); + DCHECK(!Marking::IsWhite(ObjectMarking::MarkBitFrom(object))); - MarkBit map_mark = Marking::MarkBitFrom(map); + Map* map = object->map(); + MarkBit map_mark = ObjectMarking::MarkBitFrom(map); MarkObject(map, map_mark); MarkCompactMarkingVisitor::IterateBody(map, object); @@ -1962,15 +2076,22 @@ void MarkCompactCollector::ProcessMarkingDeque() { } } - // Mark all objects reachable (transitively) from objects on the marking // stack including references only considered in the atomic marking pause. void MarkCompactCollector::ProcessEphemeralMarking( ObjectVisitor* visitor, bool only_process_harmony_weak_collections) { - bool work_to_do = true; DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed()); + bool work_to_do = true; while (work_to_do) { + if (UsingEmbedderHeapTracer()) { + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING); + RegisterWrappersWithEmbedderHeapTracer(); + embedder_heap_tracer()->AdvanceTracing( + 0, EmbedderHeapTracer::AdvanceTracingActions( + EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION)); + } if (!only_process_harmony_weak_collections) { + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_OBJECT_GROUPING); isolate()->global_handles()->IterateObjectGroups( visitor, &IsUnmarkedHeapObjectWithHeap); MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject); @@ -1981,7 +2102,6 @@ void MarkCompactCollector::ProcessEphemeralMarking( } } - void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { for (StackFrameIterator it(isolate(), isolate()->thread_local_top()); !it.done(); it.Advance()) { @@ -2080,6 +2200,85 @@ void MarkingDeque::Uninitialize(bool aborting) { in_use_ = false; } +void MarkCompactCollector::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) { + DCHECK_NOT_NULL(tracer); + CHECK_NULL(embedder_heap_tracer_); + embedder_heap_tracer_ = tracer; +} + +void MarkCompactCollector::RegisterWrappersWithEmbedderHeapTracer() { + DCHECK(UsingEmbedderHeapTracer()); + if (wrappers_to_trace_.empty()) { + return; + } + embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_); + wrappers_to_trace_.clear(); +} + +void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) { + DCHECK(js_object->WasConstructedFromApiFunction()); + if (js_object->GetInternalFieldCount() >= 2 && + js_object->GetInternalField(0) && + js_object->GetInternalField(0) != heap_->undefined_value() && + js_object->GetInternalField(1) != heap_->undefined_value()) { + DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0); + wrappers_to_trace_.push_back(std::pair<void*, void*>( + reinterpret_cast<void*>(js_object->GetInternalField(0)), + reinterpret_cast<void*>(js_object->GetInternalField(1)))); + } +} + +class MarkCompactCollector::ObjectStatsVisitor + : public MarkCompactCollector::HeapObjectVisitor { + public: + ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats, + ObjectStats* dead_stats) + : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) { + DCHECK_NOT_NULL(live_stats); + DCHECK_NOT_NULL(dead_stats); + // Global objects are roots and thus recorded as live. + live_collector_.CollectGlobalStatistics(); + } + + bool Visit(HeapObject* obj) override { + if (Marking::IsBlack(ObjectMarking::MarkBitFrom(obj))) { + live_collector_.CollectStatistics(obj); + } else { + DCHECK(!Marking::IsGrey(ObjectMarking::MarkBitFrom(obj))); + dead_collector_.CollectStatistics(obj); + } + return true; + } + + private: + ObjectStatsCollector live_collector_; + ObjectStatsCollector dead_collector_; +}; + +void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) { + SpaceIterator space_it(heap()); + HeapObject* obj = nullptr; + while (space_it.has_next()) { + ObjectIterator* it = space_it.next(); + while ((obj = it->Next()) != nullptr) { + visitor->Visit(obj); + } + } +} + +void MarkCompactCollector::RecordObjectStats() { + if (FLAG_track_gc_object_stats) { + ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_, + heap()->dead_object_stats_); + VisitAllObjects(&visitor); + if (FLAG_trace_gc_object_stats) { + heap()->live_object_stats_->PrintJSON("live"); + heap()->dead_object_stats_->PrintJSON("dead"); + } + heap()->live_object_stats_->CheckpointObjectStats(); + heap()->dead_object_stats_->ClearObjectStats(); + } +} void MarkCompactCollector::MarkLiveObjects() { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); @@ -2137,7 +2336,6 @@ void MarkCompactCollector::MarkLiveObjects() { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL); ProcessEphemeralMarking(&root_visitor, false); - ProcessMarkingDeque(); } // The objects reachable from the roots, weak maps or object groups @@ -2171,7 +2369,10 @@ void MarkCompactCollector::MarkLiveObjects() { { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY); ProcessEphemeralMarking(&root_visitor, true); - ProcessMarkingDeque(); + if (UsingEmbedderHeapTracer()) { + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE); + embedder_heap_tracer()->TraceEpilogue(); + } } } @@ -2179,12 +2380,6 @@ void MarkCompactCollector::MarkLiveObjects() { heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() - start_time); } - if (FLAG_track_gc_object_stats) { - if (FLAG_trace_gc_object_stats) { - heap()->object_stats_->TraceObjectStats(); - } - heap()->object_stats_->CheckpointObjectStats(); - } } @@ -2258,12 +2453,41 @@ void MarkCompactCollector::MarkDependentCodeForDeoptimization( current = current->next_link(); } + { + ArrayList* list = heap_->weak_new_space_object_to_code_list(); + int counter = 0; + for (int i = 0; i < list->Length(); i += 2) { + WeakCell* obj = WeakCell::cast(list->Get(i)); + WeakCell* dep = WeakCell::cast(list->Get(i + 1)); + if (obj->cleared() || dep->cleared()) { + if (!dep->cleared()) { + Code* code = Code::cast(dep->value()); + if (!code->marked_for_deoptimization()) { + DependentCode::SetMarkedForDeoptimization( + code, DependentCode::DependencyGroup::kWeakCodeGroup); + code->InvalidateEmbeddedObjects(); + have_code_to_deoptimize_ = true; + } + } + } else { + // We record the slot manually because marking is finished at this + // point and the write barrier would bailout. + list->Set(counter, obj, SKIP_WRITE_BARRIER); + RecordSlot(list, list->Slot(counter), obj); + counter++; + list->Set(counter, dep, SKIP_WRITE_BARRIER); + RecordSlot(list, list->Slot(counter), dep); + counter++; + } + } + } + WeakHashTable* table = heap_->weak_object_to_code_table(); uint32_t capacity = table->Capacity(); for (uint32_t i = 0; i < capacity; i++) { uint32_t key_index = table->EntryToIndex(i); Object* key = table->get(key_index); - if (!table->IsKey(key)) continue; + if (!table->IsKey(isolate, key)) continue; uint32_t value_index = table->EntryToValueIndex(i); Object* value = table->get(value_index); DCHECK(key->IsWeakCell()); @@ -2286,11 +2510,11 @@ void MarkCompactCollector::ClearSimpleMapTransitions( while (weak_cell_obj != Smi::FromInt(0)) { WeakCell* weak_cell = WeakCell::cast(weak_cell_obj); Map* map = Map::cast(weak_cell->value()); - DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map))); + DCHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(map))); Object* potential_parent = map->constructor_or_backpointer(); if (potential_parent->IsMap()) { Map* parent = Map::cast(potential_parent); - if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) && + if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(parent)) && parent->raw_transitions() == weak_cell) { ClearSimpleMapTransition(parent, map); } @@ -2330,7 +2554,7 @@ void MarkCompactCollector::ClearFullMapTransitions() { Map* map = array->GetTarget(0); Map* parent = Map::cast(map->constructor_or_backpointer()); bool parent_is_alive = - Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)); + Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(parent)); DescriptorArray* descriptors = parent_is_alive ? parent->instance_descriptors() : nullptr; bool descriptors_owner_died = @@ -2355,7 +2579,7 @@ bool MarkCompactCollector::CompactTransitionArray( for (int i = 0; i < num_transitions; ++i) { Map* target = transitions->GetTarget(i); DCHECK_EQ(target->constructor_or_backpointer(), map); - if (Marking::IsWhite(Marking::MarkBitFrom(target))) { + if (Marking::IsWhite(ObjectMarking::MarkBitFrom(target))) { if (descriptors != nullptr && target->instance_descriptors() == descriptors) { descriptors_owner_died = true; @@ -2532,7 +2756,7 @@ void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list, if (cell_value->IsHeapObject() && MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) { // Resurrect the cell. - MarkBit mark = Marking::MarkBitFrom(value); + MarkBit mark = ObjectMarking::MarkBitFrom(value); SetMark(value, mark); Object** slot = HeapObject::RawField(value, Cell::kValueOffset); RecordSlot(value, slot, *slot); @@ -2602,30 +2826,16 @@ void MarkCompactCollector::AbortTransitionArrays() { heap()->set_encountered_transition_arrays(Smi::FromInt(0)); } -static inline SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { - if (RelocInfo::IsCodeTarget(rmode)) { - return CODE_TARGET_SLOT; - } else if (RelocInfo::IsCell(rmode)) { - return CELL_TARGET_SLOT; - } else if (RelocInfo::IsEmbeddedObject(rmode)) { - return EMBEDDED_OBJECT_SLOT; - } else if (RelocInfo::IsDebugBreakSlot(rmode)) { - return DEBUG_TARGET_SLOT; - } - UNREACHABLE(); - return NUMBER_OF_SLOT_TYPES; -} - void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target) { Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host)); - RelocInfo::Mode rmode = rinfo->rmode(); if (target_page->IsEvacuationCandidate() && (rinfo->host() == NULL || !ShouldSkipEvacuationSlotRecording(rinfo->host()))) { + RelocInfo::Mode rmode = rinfo->rmode(); Address addr = rinfo->pc(); - SlotType slot_type = SlotTypeForRMode(rmode); + SlotType slot_type = SlotTypeForRelocInfoMode(rmode); if (rinfo->IsInConstantPool()) { addr = rinfo->constant_pool_entry_address(); if (RelocInfo::IsCodeTarget(rmode)) { @@ -2635,145 +2845,64 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo, slot_type = OBJECT_SLOT; } } - RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr); + RememberedSet<OLD_TO_OLD>::InsertTyped( + source_page, reinterpret_cast<Address>(host), slot_type, addr); } } -static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v, - SlotType slot_type, Address addr) { - switch (slot_type) { - case CODE_TARGET_SLOT: { - RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL); - rinfo.Visit(isolate, v); - break; - } - case CELL_TARGET_SLOT: { - RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL); - rinfo.Visit(isolate, v); - break; - } - case CODE_ENTRY_SLOT: { - v->VisitCodeEntry(addr); - break; - } - case RELOCATED_CODE_OBJECT: { - HeapObject* obj = HeapObject::FromAddress(addr); - Code::BodyDescriptor::IterateBody(obj, v); - break; - } - case DEBUG_TARGET_SLOT: { - RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0, - NULL); - if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v); - break; - } - case EMBEDDED_OBJECT_SLOT: { - RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL); - rinfo.Visit(isolate, v); - break; - } - case OBJECT_SLOT: { - v->VisitPointer(reinterpret_cast<Object**>(addr)); - break; +static inline SlotCallbackResult UpdateSlot(Object** slot) { + Object* obj = reinterpret_cast<Object*>( + base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); + + if (obj->IsHeapObject()) { + HeapObject* heap_obj = HeapObject::cast(obj); + MapWord map_word = heap_obj->map_word(); + if (map_word.IsForwardingAddress()) { + DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) || + MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) || + Page::FromAddress(heap_obj->address()) + ->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); + HeapObject* target = map_word.ToForwardingAddress(); + base::NoBarrier_CompareAndSwap( + reinterpret_cast<base::AtomicWord*>(slot), + reinterpret_cast<base::AtomicWord>(obj), + reinterpret_cast<base::AtomicWord>(target)); + DCHECK(!heap_obj->GetHeap()->InFromSpace(target) && + !MarkCompactCollector::IsOnEvacuationCandidate(target)); } - default: - UNREACHABLE(); - break; } + return REMOVE_SLOT; } - // Visitor for updating pointers from live objects in old spaces to new space. // It does not expect to encounter pointers to dead objects. class PointersUpdatingVisitor : public ObjectVisitor { public: - explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {} - - void VisitPointer(Object** p) override { UpdatePointer(p); } + void VisitPointer(Object** p) override { UpdateSlot(p); } void VisitPointers(Object** start, Object** end) override { - for (Object** p = start; p < end; p++) UpdatePointer(p); + for (Object** p = start; p < end; p++) UpdateSlot(p); } void VisitCell(RelocInfo* rinfo) override { - DCHECK(rinfo->rmode() == RelocInfo::CELL); - Object* cell = rinfo->target_cell(); - Object* old_cell = cell; - VisitPointer(&cell); - if (cell != old_cell) { - rinfo->set_target_cell(reinterpret_cast<Cell*>(cell)); - } + UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot); } void VisitEmbeddedPointer(RelocInfo* rinfo) override { - DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); - Object* target = rinfo->target_object(); - Object* old_target = target; - VisitPointer(&target); - // Avoid unnecessary changes that might unnecessary flush the instruction - // cache. - if (target != old_target) { - rinfo->set_target_object(target); - } + UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot); } void VisitCodeTarget(RelocInfo* rinfo) override { - DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); - Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); - Object* old_target = target; - VisitPointer(&target); - if (target != old_target) { - rinfo->set_target_address(Code::cast(target)->instruction_start()); - } + UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot); } - void VisitCodeAgeSequence(RelocInfo* rinfo) override { - DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); - Object* stub = rinfo->code_age_stub(); - DCHECK(stub != NULL); - VisitPointer(&stub); - if (stub != rinfo->code_age_stub()) { - rinfo->set_code_age_stub(Code::cast(stub)); - } + void VisitCodeEntry(Address entry_address) override { + UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot); } void VisitDebugTarget(RelocInfo* rinfo) override { - DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && - rinfo->IsPatchedDebugBreakSlotSequence()); - Object* target = - Code::GetCodeFromTargetAddress(rinfo->debug_call_address()); - VisitPointer(&target); - rinfo->set_debug_call_address(Code::cast(target)->instruction_start()); - } - - static inline void UpdateSlot(Heap* heap, Object** slot) { - Object* obj = reinterpret_cast<Object*>( - base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); - - if (!obj->IsHeapObject()) return; - - HeapObject* heap_obj = HeapObject::cast(obj); - - MapWord map_word = heap_obj->map_word(); - if (map_word.IsForwardingAddress()) { - DCHECK(heap->InFromSpace(heap_obj) || - MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) || - Page::FromAddress(heap_obj->address()) - ->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); - HeapObject* target = map_word.ToForwardingAddress(); - base::NoBarrier_CompareAndSwap( - reinterpret_cast<base::AtomicWord*>(slot), - reinterpret_cast<base::AtomicWord>(obj), - reinterpret_cast<base::AtomicWord>(target)); - DCHECK(!heap->InFromSpace(target) && - !MarkCompactCollector::IsOnEvacuationCandidate(target)); - } + UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot); } - - private: - inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); } - - Heap* heap_; }; static String* UpdateReferenceInExternalStringTableEntry(Heap* heap, @@ -2792,9 +2921,8 @@ bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) { DCHECK(owner != heap_->lo_space() && owner != nullptr); USE(owner); - // If we are on a black page, we cannot find the actual object start - // easiliy. We just return true but do not set the out_object. - if (p->IsFlagSet(Page::BLACK_PAGE)) { + // We may be part of a black area. + if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(slot))) { return true; } @@ -2861,8 +2989,13 @@ bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) { base_address += (cell_index - base_address_cell_index) * Bitmap::kBitsPerCell * kPointerSize; Address address = base_address + offset * kPointerSize; + + // If the found mark bit is part of a black area, the slot cannot be part + // of a live object since it is not marked. + if (p->IsBlackAreaEndMarker(address + kPointerSize)) return false; + HeapObject* object = HeapObject::FromAddress(address); - CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); + CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object))); CHECK(object->address() < reinterpret_cast<Address>(slot)); if ((object->address() + kPointerSize) <= slot && (object->address() + object->Size()) > slot) { @@ -2891,61 +3024,71 @@ HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) { return nullptr; } - if (p->IsFlagSet(Page::BLACK_PAGE)) { - HeapObjectIterator it(p); - HeapObject* object = nullptr; - while ((object = it.Next()) != nullptr) { - int size = object->Size(); - if (object->address() > slot) return nullptr; - if (object->address() <= slot && slot < (object->address() + size)) { - return object; - } - } - } else { - LiveObjectIterator<kBlackObjects> it(p); - HeapObject* object = nullptr; - while ((object = it.Next()) != nullptr) { - int size = object->Size(); - if (object->address() > slot) return nullptr; - if (object->address() <= slot && slot < (object->address() + size)) { - return object; - } + LiveObjectIterator<kBlackObjects> it(p); + HeapObject* object = nullptr; + while ((object = it.Next()) != nullptr) { + int size = object->Size(); + if (object->address() > slot) return nullptr; + if (object->address() <= slot && slot < (object->address() + size)) { + return object; } } + return nullptr; } void MarkCompactCollector::EvacuateNewSpacePrologue() { NewSpace* new_space = heap()->new_space(); - NewSpacePageIterator it(new_space->bottom(), new_space->top()); // Append the list of new space pages to be processed. - while (it.has_next()) { - newspace_evacuation_candidates_.Add(it.next()); + for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) { + newspace_evacuation_candidates_.Add(p); } new_space->Flip(); new_space->ResetAllocationInfo(); } -void MarkCompactCollector::EvacuateNewSpaceEpilogue() { - newspace_evacuation_candidates_.Rewind(0); -} - - class MarkCompactCollector::Evacuator : public Malloced { public: + enum EvacuationMode { + kObjectsNewToOld, + kPageNewToOld, + kObjectsOldToOld, + kPageNewToNew, + }; + + static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { + // Note: The order of checks is important in this function. + if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) + return kPageNewToOld; + if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION)) + return kPageNewToNew; + if (chunk->InNewSpace()) return kObjectsNewToOld; + DCHECK(chunk->IsEvacuationCandidate()); + return kObjectsOldToOld; + } + + // NewSpacePages with more live bytes than this threshold qualify for fast + // evacuation. + static int PageEvacuationThreshold() { + if (FLAG_page_promotion) + return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; + return Page::kAllocatableMemory + kPointerSize; + } + explicit Evacuator(MarkCompactCollector* collector) : collector_(collector), compaction_spaces_(collector->heap()), - local_pretenuring_feedback_(HashMap::PointersMatch, + local_pretenuring_feedback_(base::HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity), new_space_visitor_(collector->heap(), &compaction_spaces_, &local_pretenuring_feedback_), + new_space_page_visitor(collector->heap()), old_space_visitor_(collector->heap(), &compaction_spaces_), duration_(0.0), bytes_compacted_(0) {} - inline bool EvacuatePage(MemoryChunk* chunk); + inline bool EvacuatePage(Page* chunk); // Merge back locally cached info sequentially. Note that this method needs // to be called from the main thread. @@ -2956,23 +3099,22 @@ class MarkCompactCollector::Evacuator : public Malloced { private: static const int kInitialLocalPretenuringFeedbackCapacity = 256; - Heap* heap() { return collector_->heap(); } + inline Heap* heap() { return collector_->heap(); } void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { duration_ += duration; bytes_compacted_ += bytes_compacted; } - inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor); - MarkCompactCollector* collector_; // Locally cached collector data. CompactionSpaceCollection compaction_spaces_; - HashMap local_pretenuring_feedback_; + base::HashMap local_pretenuring_feedback_; // Visitors for the corresponding spaces. EvacuateNewSpaceVisitor new_space_visitor_; + EvacuateNewSpacePageVisitor new_space_page_visitor; EvacuateOldSpaceVisitor old_space_visitor_; // Book keeping info. @@ -2980,43 +3122,74 @@ class MarkCompactCollector::Evacuator : public Malloced { intptr_t bytes_compacted_; }; -bool MarkCompactCollector::Evacuator::EvacuateSinglePage( - MemoryChunk* p, HeapObjectVisitor* visitor) { +bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { bool success = false; - DCHECK(p->IsEvacuationCandidate() || p->InNewSpace()); - int saved_live_bytes = p->LiveBytes(); - double evacuation_time; + DCHECK(page->SweepingDone()); + int saved_live_bytes = page->LiveBytes(); + double evacuation_time = 0.0; + Heap* heap = page->heap(); { - AlwaysAllocateScope always_allocate(heap()->isolate()); + AlwaysAllocateScope always_allocate(heap->isolate()); TimedScope timed_scope(&evacuation_time); - success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits); + switch (ComputeEvacuationMode(page)) { + case kObjectsNewToOld: + success = collector_->VisitLiveObjects(page, &new_space_visitor_, + kClearMarkbits); + ArrayBufferTracker::ProcessBuffers( + page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); + DCHECK(success); + break; + case kPageNewToOld: + success = collector_->VisitLiveObjects(page, &new_space_page_visitor, + kKeepMarking); + // ArrayBufferTracker will be updated during sweeping. + DCHECK(success); + break; + case kPageNewToNew: + new_space_page_visitor.account_semispace_copied(page->LiveBytes()); + // ArrayBufferTracker will be updated during sweeping. + success = true; + break; + case kObjectsOldToOld: + success = collector_->VisitLiveObjects(page, &old_space_visitor_, + kClearMarkbits); + if (!success) { + // Aborted compaction page. We have to record slots here, since we + // might not have recorded them in first place. + // Note: We mark the page as aborted here to be able to record slots + // for code objects in |RecordMigratedSlotVisitor|. + page->SetFlag(Page::COMPACTION_WAS_ABORTED); + EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); + success = + collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking); + ArrayBufferTracker::ProcessBuffers( + page, ArrayBufferTracker::kUpdateForwardedKeepOthers); + DCHECK(success); + // We need to return failure here to indicate that we want this page + // added to the sweeper. + success = false; + } else { + ArrayBufferTracker::ProcessBuffers( + page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); + } + break; + default: + UNREACHABLE(); + } } + ReportCompactionProgress(evacuation_time, saved_live_bytes); if (FLAG_trace_evacuation) { - PrintIsolate(heap()->isolate(), - "evacuation[%p]: page=%p new_space=%d executable=%d " + PrintIsolate(heap->isolate(), + "evacuation[%p]: page=%p new_space=%d " + "page_evacuation=%d executable=%d contains_age_mark=%d " "live_bytes=%d time=%f\n", - this, p, p->InNewSpace(), - p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, - evacuation_time); - } - if (success) { - ReportCompactionProgress(evacuation_time, saved_live_bytes); - } - return success; -} - -bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { - bool success = false; - if (chunk->InNewSpace()) { - DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), - NewSpacePage::kSweepingDone); - success = EvacuateSinglePage(chunk, &new_space_visitor_); - DCHECK(success); - USE(success); - } else { - DCHECK(chunk->IsEvacuationCandidate()); - DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); - success = EvacuateSinglePage(chunk, &old_space_visitor_); + static_cast<void*>(this), static_cast<void*>(page), + page->InNewSpace(), + page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || + page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), + page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), + page->Contains(heap->new_space()->age_mark()), + saved_live_bytes, evacuation_time); } return success; } @@ -3026,12 +3199,16 @@ void MarkCompactCollector::Evacuator::Finalize() { heap()->code_space()->MergeCompactionSpace( compaction_spaces_.Get(CODE_SPACE)); heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); - heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); + heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + + new_space_page_visitor.promoted_size()); heap()->IncrementSemiSpaceCopiedObjectSize( - new_space_visitor_.semispace_copied_size()); + new_space_visitor_.semispace_copied_size() + + new_space_page_visitor.semispace_copied_size()); heap()->IncrementYoungSurvivorsCounter( new_space_visitor_.promoted_size() + - new_space_visitor_.semispace_copied_size()); + new_space_visitor_.semispace_copied_size() + + new_space_page_visitor.promoted_size() + + new_space_page_visitor.semispace_copied_size()); heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); } @@ -3074,35 +3251,38 @@ class EvacuationJobTraits { static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, MemoryChunk* chunk, PerPageData) { - return evacuator->EvacuatePage(chunk); + return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); } - static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success, - PerPageData data) { - if (chunk->InNewSpace()) { - DCHECK(success); - } else { - Page* p = static_cast<Page*>(chunk); - if (success) { - DCHECK(p->IsEvacuationCandidate()); - DCHECK(p->SweepingDone()); - p->Unlink(); - } else { - // We have partially compacted the page, i.e., some objects may have - // moved, others are still in place. - // We need to: - // - Leave the evacuation candidate flag for later processing of slots - // buffer entries. - // - Leave the slots buffer there for processing of entries added by - // the write barrier. - // - Rescan the page as slot recording in the migration buffer only - // happens upon moving (which we potentially didn't do). - // - Leave the page in the list of pages of a space since we could not - // fully evacuate it. - DCHECK(p->IsEvacuationCandidate()); - p->SetFlag(Page::COMPACTION_WAS_ABORTED); - *data += 1; - } + static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, + bool success, PerPageData data) { + using Evacuator = MarkCompactCollector::Evacuator; + Page* p = static_cast<Page*>(chunk); + switch (Evacuator::ComputeEvacuationMode(p)) { + case Evacuator::kPageNewToOld: + break; + case Evacuator::kPageNewToNew: + DCHECK(success); + break; + case Evacuator::kObjectsNewToOld: + DCHECK(success); + break; + case Evacuator::kObjectsOldToOld: + if (success) { + DCHECK(p->IsEvacuationCandidate()); + DCHECK(p->SweepingDone()); + p->Unlink(); + } else { + // We have partially compacted the page, i.e., some objects may have + // moved, others are still in place. + p->ClearEvacuationCandidate(); + // Slots have already been recorded so we just need to add it to the + // sweeper, which will happen after updating pointers. + *data += 1; + } + break; + default: + UNREACHABLE(); } } }; @@ -3118,8 +3298,20 @@ void MarkCompactCollector::EvacuatePagesInParallel() { live_bytes += page->LiveBytes(); job.AddPage(page, &abandoned_pages); } - for (NewSpacePage* page : newspace_evacuation_candidates_) { + + const Address age_mark = heap()->new_space()->age_mark(); + for (Page* page : newspace_evacuation_candidates_) { live_bytes += page->LiveBytes(); + if (!page->NeverEvacuate() && + (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && + !page->Contains(age_mark)) { + if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { + EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space()); + } else { + EvacuateNewSpacePageVisitor::MoveToToSpace(page); + } + } + job.AddPage(page, &abandoned_pages); } DCHECK_GE(job.NumberOfPages(), 1); @@ -3144,16 +3336,15 @@ void MarkCompactCollector::EvacuatePagesInParallel() { delete[] evacuators; if (FLAG_trace_evacuation) { - PrintIsolate( - isolate(), - "%8.0f ms: evacuation-summary: parallel=%s pages=%d aborted=%d " - "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX - "d compaction_speed=%.f\n", - isolate()->time_millis_since_init(), - FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(), - abandoned_pages, wanted_num_tasks, job.NumberOfTasks(), - V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), - live_bytes, compaction_speed); + PrintIsolate(isolate(), + "%8.0f ms: evacuation-summary: parallel=%s pages=%d " + "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS + " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", + isolate()->time_millis_since_init(), + FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(), + abandoned_pages, wanted_num_tasks, job.NumberOfTasks(), + V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), + live_bytes, compaction_speed); } } @@ -3171,28 +3362,21 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer { } }; -enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS }; - -enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST }; +int MarkCompactCollector::Sweeper::RawSweep( + Page* p, FreeListRebuildingMode free_list_mode, + FreeSpaceTreatmentMode free_space_mode) { + Space* space = p->owner(); + DCHECK_NOT_NULL(space); + DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE || + space->identity() == CODE_SPACE || space->identity() == MAP_SPACE); + DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); -enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE }; + // Before we sweep objects on the page, we free dead array buffers which + // requires valid mark bits. + ArrayBufferTracker::FreeDead(p); -// Sweeps a page. After sweeping the page can be iterated. -// Slots in live objects pointing into evacuation candidates are updated -// if requested. -// Returns the size of the biggest continuous freed memory chunk in bytes. -template <SweepingMode sweeping_mode, - MarkCompactCollector::SweepingParallelism parallelism, - SkipListRebuildingMode skip_list_mode, - FreeSpaceTreatmentMode free_space_mode> -static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) { - DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); - DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); - DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, - space->identity() == CODE_SPACE); - DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); - DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || - sweeping_mode == SWEEP_ONLY); + // We also release the black area markers here. + p->ReleaseBlackAreaEndMarkerMap(); Address free_start = p->area_start(); DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); @@ -3200,8 +3384,10 @@ static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) { // If we use the skip list for code space pages, we have to lock the skip // list because it could be accessed concurrently by the runtime or the // deoptimizer. + const bool rebuild_skip_list = + space->identity() == CODE_SPACE && p->skip_list() != nullptr; SkipList* skip_list = p->skip_list(); - if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { + if (rebuild_skip_list) { skip_list->Clear(); } @@ -3212,22 +3398,25 @@ static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) { LiveObjectIterator<kBlackObjects> it(p); HeapObject* object = NULL; while ((object = it.Next()) != NULL) { - DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); + DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object))); Address free_end = object->address(); if (free_end != free_start) { int size = static_cast<int>(free_end - free_start); if (free_space_mode == ZAP_FREE_SPACE) { memset(free_start, 0xcc, size); } - freed_bytes = space->UnaccountedFree(free_start, size); - max_freed_bytes = Max(freed_bytes, max_freed_bytes); + if (free_list_mode == REBUILD_FREE_LIST) { + freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree( + free_start, size); + max_freed_bytes = Max(freed_bytes, max_freed_bytes); + } else { + p->heap()->CreateFillerObjectAt(free_start, size, + ClearRecordedSlots::kNo); + } } Map* map = object->synchronized_map(); int size = object->SizeFromMap(map); - if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { - object->IterateBody(map->instance_type(), size, v); - } - if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { + if (rebuild_skip_list) { int new_region_start = SkipList::RegionNumber(free_end); int new_region_end = SkipList::RegionNumber(free_end + size - kPointerSize); @@ -3240,36 +3429,45 @@ static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) { } // Clear the mark bits of that page and reset live bytes count. - Bitmap::Clear(p); + p->ClearLiveness(); if (free_start != p->area_end()) { int size = static_cast<int>(p->area_end() - free_start); if (free_space_mode == ZAP_FREE_SPACE) { memset(free_start, 0xcc, size); } - freed_bytes = space->UnaccountedFree(free_start, size); - max_freed_bytes = Max(freed_bytes, max_freed_bytes); + if (free_list_mode == REBUILD_FREE_LIST) { + freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree( + free_start, size); + max_freed_bytes = Max(freed_bytes, max_freed_bytes); + } else { + p->heap()->CreateFillerObjectAt(free_start, size, + ClearRecordedSlots::kNo); + } } p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); + if (free_list_mode == IGNORE_FREE_LIST) return 0; return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); } - void MarkCompactCollector::InvalidateCode(Code* code) { + Page* page = Page::FromAddress(code->address()); + Address start = code->instruction_start(); + Address end = code->address() + code->Size(); + + RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end); + if (heap_->incremental_marking()->IsCompacting() && !ShouldSkipEvacuationSlotRecording(code)) { DCHECK(compacting_); // If the object is white than no slots were recorded on it yet. - MarkBit mark_bit = Marking::MarkBitFrom(code); + MarkBit mark_bit = ObjectMarking::MarkBitFrom(code); if (Marking::IsWhite(mark_bit)) return; // Ignore all slots that might have been recorded in the body of the // deoptimized code object. Assumption: no slots will be recorded for // this object after invalidating it. - Page* page = Page::FromAddress(code->address()); - Address start = code->instruction_start(); - Address end = code->address() + code->Size(); RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end); } } @@ -3286,14 +3484,13 @@ static void VerifyAllBlackObjects(MemoryChunk* page) { LiveObjectIterator<kAllLiveObjects> it(page); HeapObject* object = NULL; while ((object = it.Next()) != NULL) { - CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); + CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object))); } } #endif // VERIFY_HEAP - -bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, - HeapObjectVisitor* visitor, +template <class Visitor> +bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor, IterationMode mode) { #ifdef VERIFY_HEAP VerifyAllBlackObjects(page); @@ -3302,7 +3499,7 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, LiveObjectIterator<kBlackObjects> it(page); HeapObject* object = nullptr; while ((object = it.Next()) != nullptr) { - DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); + DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object))); if (!visitor->Visit(object)) { if (mode == kClearMarkbits) { page->markbits()->ClearRange( @@ -3312,13 +3509,17 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, page->old_to_new_slots()->RemoveRange( 0, static_cast<int>(object->address() - page->address())); } + if (page->typed_old_to_new_slots() != nullptr) { + RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(), + object->address()); + } RecomputeLiveBytes(page); } return false; } } if (mode == kClearMarkbits) { - Bitmap::Clear(page); + page->ClearLiveness(); } return true; } @@ -3334,58 +3535,12 @@ void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) { page->SetLiveBytes(new_live_size); } - -void MarkCompactCollector::VisitLiveObjectsBody(Page* page, - ObjectVisitor* visitor) { -#ifdef VERIFY_HEAP - VerifyAllBlackObjects(page); -#endif // VERIFY_HEAP - - LiveObjectIterator<kBlackObjects> it(page); - HeapObject* object = NULL; - while ((object = it.Next()) != NULL) { - DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); - Map* map = object->synchronized_map(); - int size = object->SizeFromMap(map); - object->IterateBody(map->instance_type(), size, visitor); - } -} - - -void MarkCompactCollector::SweepAbortedPages() { - // Second pass on aborted pages. - for (Page* p : evacuation_candidates_) { - if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { - p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); - p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); - PagedSpace* space = static_cast<PagedSpace*>(p->owner()); - switch (space->identity()) { - case OLD_SPACE: - Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, - IGNORE_FREE_SPACE>(space, p, nullptr); - break; - case CODE_SPACE: - if (FLAG_zap_code_space) { - Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, - ZAP_FREE_SPACE>(space, p, nullptr); - } else { - Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, - IGNORE_FREE_SPACE>(space, p, nullptr); - } - break; - default: - UNREACHABLE(); - break; - } - { - base::LockGuard<base::Mutex> guard(&swept_pages_mutex_); - swept_pages(space->identity())->Add(p); - } - } - } +void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, + Page* page) { + base::LockGuard<base::Mutex> guard(&mutex_); + swept_list_[space->identity()].Add(page); } - void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); Heap::RelocationLock relocation_lock(heap()); @@ -3396,35 +3551,55 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { EvacuateNewSpacePrologue(); EvacuatePagesInParallel(); - EvacuateNewSpaceEpilogue(); heap()->new_space()->set_age_mark(heap()->new_space()->top()); } UpdatePointersAfterEvacuation(); + if (!heap()->new_space()->Rebalance()) { + FatalProcessOutOfMemory("NewSpace::Rebalance"); + } + // Give pages that are queued to be freed back to the OS. Note that filtering // slots only handles old space (for unboxed doubles), and thus map space can // still contain stale pointers. We only free the chunks after pointer updates // to still have access to page headers. - heap()->FreeQueuedChunks(); + heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); - // After updating all pointers, we can finally sweep the aborted pages, - // effectively overriding any forward pointers. - SweepAbortedPages(); - // EvacuateNewSpaceAndCandidates iterates over new space objects and for - // ArrayBuffers either re-registers them as live or promotes them. This is - // needed to properly free them. - heap()->array_buffer_tracker()->FreeDead(false); + for (Page* p : newspace_evacuation_candidates_) { + if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { + p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); + sweeper().AddLatePage(p->owner()->identity(), p); + } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { + p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); + p->ForAllFreeListCategories( + [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); + sweeper().AddLatePage(p->owner()->identity(), p); + } + } + newspace_evacuation_candidates_.Rewind(0); + + for (Page* p : evacuation_candidates_) { + // Important: skip list should be cleared only after roots were updated + // because root iteration traverses the stack and might have to find + // code objects from non-updated pc pointing into evacuation candidate. + SkipList* list = p->skip_list(); + if (list != NULL) list->Clear(); + if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { + sweeper().AddLatePage(p->owner()->identity(), p); + p->ClearFlag(Page::COMPACTION_WAS_ABORTED); + } + } // Deallocate evacuated candidate pages. ReleaseEvacuationCandidates(); } #ifdef VERIFY_HEAP - if (FLAG_verify_heap && !sweeping_in_progress_) { + if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { VerifyEvacuation(heap()); } #endif @@ -3434,12 +3609,12 @@ template <PointerDirection direction> class PointerUpdateJobTraits { public: typedef int PerPageData; // Per page data is not used in this job. - typedef PointersUpdatingVisitor* PerTaskData; + typedef int PerTaskData; // Per task data is not used in this job. - static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, - MemoryChunk* chunk, PerPageData) { + static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk, + PerPageData) { UpdateUntypedPointers(heap, chunk); - UpdateTypedPointers(heap, chunk, visitor); + UpdateTypedPointers(heap, chunk); return true; } static const bool NeedSequentialFinalization = false; @@ -3449,41 +3624,70 @@ class PointerUpdateJobTraits { private: static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { if (direction == OLD_TO_NEW) { - RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk, - UpdateOldToNewSlot); + RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap, chunk](Address slot) { + return CheckAndUpdateOldToNewSlot(heap, slot); + }); } else { - RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) { - PointersUpdatingVisitor::UpdateSlot(heap, - reinterpret_cast<Object**>(slot)); - return REMOVE_SLOT; + RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { + return UpdateSlot(reinterpret_cast<Object**>(slot)); }); } } - static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk, - PointersUpdatingVisitor* visitor) { + static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) { if (direction == OLD_TO_OLD) { Isolate* isolate = heap->isolate(); RememberedSet<OLD_TO_OLD>::IterateTyped( - chunk, [isolate, visitor](SlotType type, Address slot) { - UpdateTypedSlot(isolate, visitor, type, slot); - return REMOVE_SLOT; + chunk, [isolate](SlotType type, Address host_addr, Address slot) { + return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot, + UpdateSlot); + }); + } else { + Isolate* isolate = heap->isolate(); + RememberedSet<OLD_TO_NEW>::IterateTyped( + chunk, + [isolate, heap](SlotType type, Address host_addr, Address slot) { + return UpdateTypedSlotHelper::UpdateTypedSlot( + isolate, type, slot, [heap](Object** slot) { + return CheckAndUpdateOldToNewSlot( + heap, reinterpret_cast<Address>(slot)); + }); }); } } - static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) { - MapWord map_word = object->map_word(); - // Since we only filter invalid slots in old space, the store buffer can - // still contain stale pointers in large object and in map spaces. Ignore - // these pointers here. - DCHECK(map_word.IsForwardingAddress() || - !object->GetHeap()->old_space()->Contains( - reinterpret_cast<Address>(address))); - if (map_word.IsForwardingAddress()) { - // Update the corresponding slot. - *address = map_word.ToForwardingAddress(); + static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap, + Address slot_address) { + Object** slot = reinterpret_cast<Object**>(slot_address); + if (heap->InFromSpace(*slot)) { + HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot); + DCHECK(heap_object->IsHeapObject()); + MapWord map_word = heap_object->map_word(); + // There could still be stale pointers in large object space, map space, + // and old space for pages that have been promoted. + if (map_word.IsForwardingAddress()) { + // Update the corresponding slot. + *slot = map_word.ToForwardingAddress(); + } + // If the object was in from space before and is after executing the + // callback in to space, the object is still live. + // Unfortunately, we do not know about the slot. It could be in a + // just freed free space object. + if (heap->InToSpace(*slot)) { + return KEEP_SLOT; + } + } else if (heap->InToSpace(*slot)) { + // Slots can point to "to" space if the page has been moved, or if the + // slot has been recorded multiple times in the remembered set. Since + // there is no forwarding information present we need to check the + // markbits to determine liveness. + if (Marking::IsBlack( + ObjectMarking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot)))) + return KEEP_SLOT; + } else { + DCHECK(!heap->InNewSpace(*slot)); } + return REMOVE_SLOT; } }; @@ -3496,24 +3700,13 @@ int NumberOfPointerUpdateTasks(int pages) { template <PointerDirection direction> void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { - // Work-around bug in clang-3.4 - // https://github.com/nodejs/node/issues/8323 - struct MemoryChunkVisitor { - PageParallelJob<PointerUpdateJobTraits<direction> >& job_; - MemoryChunkVisitor(PageParallelJob<PointerUpdateJobTraits<direction> >& job) - : job_(job) {} - void operator()(MemoryChunk* chunk) { - job_.AddPage(chunk, 0); - } - }; - PageParallelJob<PointerUpdateJobTraits<direction> > job( heap, heap->isolate()->cancelable_task_manager(), semaphore); - RememberedSet<direction>::IterateMemoryChunks(heap, MemoryChunkVisitor(job)); - PointersUpdatingVisitor visitor(heap); + RememberedSet<direction>::IterateMemoryChunks( + heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); int num_pages = job.NumberOfPages(); int num_tasks = NumberOfPointerUpdateTasks(num_pages); - job.Run(num_tasks, [&visitor](int i) { return &visitor; }); + job.Run(num_tasks, [](int i) { return 0; }); } class ToSpacePointerUpdateJobTraits { @@ -3523,6 +3716,24 @@ class ToSpacePointerUpdateJobTraits { static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, MemoryChunk* chunk, PerPageData limits) { + if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { + // New->new promoted pages contain garbage so they require iteration + // using markbits. + ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); + } else { + ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); + } + return true; + } + + static const bool NeedSequentialFinalization = false; + static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { + } + + private: + static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, + MemoryChunk* chunk, + PerPageData limits) { for (Address cur = limits.first; cur < limits.second;) { HeapObject* object = HeapObject::FromAddress(cur); Map* map = object->map(); @@ -3530,10 +3741,18 @@ class ToSpacePointerUpdateJobTraits { object->IterateBody(map->instance_type(), size, visitor); cur += size; } - return true; } - static const bool NeedSequentialFinalization = false; - static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { + + static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, + MemoryChunk* chunk, + PerPageData limits) { + LiveObjectIterator<kBlackObjects> it(chunk); + HeapObject* object = NULL; + while ((object = it.Next()) != NULL) { + Map* map = object->map(); + int size = object->SizeFromMap(map); + object->IterateBody(map->instance_type(), size, visitor); + } } }; @@ -3542,15 +3761,13 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { heap, heap->isolate()->cancelable_task_manager(), semaphore); Address space_start = heap->new_space()->bottom(); Address space_end = heap->new_space()->top(); - NewSpacePageIterator it(space_start, space_end); - while (it.has_next()) { - NewSpacePage* page = it.next(); + for (Page* page : NewSpacePageRange(space_start, space_end)) { Address start = page->Contains(space_start) ? space_start : page->area_start(); Address end = page->Contains(space_end) ? space_end : page->area_end(); job.AddPage(page, std::make_pair(start, end)); } - PointersUpdatingVisitor visitor(heap); + PointersUpdatingVisitor visitor; int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; job.Run(num_tasks, [&visitor](int i) { return &visitor; }); } @@ -3558,7 +3775,7 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { void MarkCompactCollector::UpdatePointersAfterEvacuation() { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); - PointersUpdatingVisitor updating_visitor(heap()); + PointersUpdatingVisitor updating_visitor; { TRACE_GC(heap()->tracer(), @@ -3578,25 +3795,6 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { { TRACE_GC(heap()->tracer(), - GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); - for (Page* p : evacuation_candidates_) { - DCHECK(p->IsEvacuationCandidate()); - // Important: skip list should be cleared only after roots were updated - // because root iteration traverses the stack and might have to find - // code objects from non-updated pc pointing into evacuation candidate. - SkipList* list = p->skip_list(); - if (list != NULL) list->Clear(); - - // First pass on aborted pages, fixing up all live objects. - if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { - p->ClearEvacuationCandidate(); - VisitLiveObjectsBody(p, &updating_visitor); - } - } - } - - { - TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); // Update pointers from external string table. heap_->UpdateReferencesInExternalStringTable( @@ -3618,33 +3816,29 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() { } evacuation_candidates_.Rewind(0); compacting_ = false; - heap()->FreeQueuedChunks(); + heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); } - -int MarkCompactCollector::SweepInParallel(PagedSpace* space, - int required_freed_bytes, - int max_pages) { +int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, + int required_freed_bytes, + int max_pages) { int max_freed = 0; - int max_freed_overall = 0; - int page_count = 0; - for (Page* p : sweeping_list(space)) { - max_freed = SweepInParallel(p, space); - DCHECK(max_freed >= 0); - if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { + int pages_freed = 0; + Page* page = nullptr; + while ((page = GetSweepingPageSafe(identity)) != nullptr) { + int freed = ParallelSweepPage(page, identity); + pages_freed += 1; + DCHECK_GE(freed, 0); + max_freed = Max(max_freed, freed); + if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) return max_freed; - } - max_freed_overall = Max(max_freed, max_freed_overall); - page_count++; - if (max_pages > 0 && page_count >= max_pages) { - break; - } + if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; } - return max_freed_overall; + return max_freed; } - -int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { +int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, + AllocationSpace identity) { int max_freed = 0; if (page->mutex()->TryLock()) { // If this page was already swept in the meantime, we can return here. @@ -3653,19 +3847,20 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { return 0; } page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); - if (space->identity() == OLD_SPACE) { - max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, - IGNORE_FREE_SPACE>(space, page, NULL); - } else if (space->identity() == CODE_SPACE) { - max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, - IGNORE_FREE_SPACE>(space, page, NULL); + const Sweeper::FreeSpaceTreatmentMode free_space_mode = + Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; + if (identity == NEW_SPACE) { + RawSweep(page, IGNORE_FREE_LIST, free_space_mode); + } else if (identity == OLD_SPACE) { + max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); + } else if (identity == CODE_SPACE) { + max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); } else { - max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, - IGNORE_FREE_SPACE>(space, page, NULL); + max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); } { - base::LockGuard<base::Mutex> guard(&swept_pages_mutex_); - swept_pages(space->identity())->Add(page); + base::LockGuard<base::Mutex> guard(&mutex_); + swept_list_[identity].Add(page); } page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); page->mutex()->Unlock(); @@ -3673,32 +3868,60 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { return max_freed; } +void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { + DCHECK(!sweeping_in_progress_); + PrepareToBeSweptPage(space, page); + sweeping_list_[space].push_back(page); +} + +void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, + Page* page) { + DCHECK(sweeping_in_progress_); + PrepareToBeSweptPage(space, page); + late_pages_ = true; + AddSweepingPageSafe(space, page); +} + +void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, + Page* page) { + page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); + int to_sweep = page->area_size() - page->LiveBytes(); + if (space != NEW_SPACE) + heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); +} + +Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( + AllocationSpace space) { + base::LockGuard<base::Mutex> guard(&mutex_); + Page* page = nullptr; + if (!sweeping_list_[space].empty()) { + page = sweeping_list_[space].front(); + sweeping_list_[space].pop_front(); + } + return page; +} + +void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space, + Page* page) { + base::LockGuard<base::Mutex> guard(&mutex_); + sweeping_list_[space].push_back(page); +} void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { space->ClearStats(); - PageIterator it(space); - int will_be_swept = 0; bool unused_page_present = false; - while (it.has_next()) { - Page* p = it.next(); + // Loop needs to support deletion if live bytes == 0 for a page. + for (auto it = space->begin(); it != space->end();) { + Page* p = *(it++); DCHECK(p->SweepingDone()); if (p->IsEvacuationCandidate()) { // Will be processed in EvacuateNewSpaceAndCandidates. DCHECK(evacuation_candidates_.length() > 0); - continue; - } - - // We can not sweep black pages, since all mark bits are set for these - // pages. - if (p->IsFlagSet(Page::BLACK_PAGE)) { - Bitmap::Clear(p); - p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); - p->ClearFlag(Page::BLACK_PAGE); - // TODO(hpayer): Free unused memory of last black page. + DCHECK(!p->HasBlackAreas()); continue; } @@ -3708,8 +3931,9 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { // (in the free list) dropped again. Since we only use the flag for // testing this is fine. p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); - Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, - IGNORE_FREE_SPACE>(space, p, nullptr); + Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST, + Heap::ShouldZapGarbage() ? Sweeper::ZAP_FREE_SPACE + : Sweeper::IGNORE_FREE_SPACE); continue; } @@ -3717,18 +3941,17 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { if (p->LiveBytes() == 0) { if (unused_page_present) { if (FLAG_gc_verbose) { - PrintIsolate(isolate(), "sweeping: released page: %p", p); + PrintIsolate(isolate(), "sweeping: released page: %p", + static_cast<void*>(p)); } + ArrayBufferTracker::FreeAll(p); space->ReleasePage(p); continue; } unused_page_present = true; } - p->concurrent_sweeping_state().SetValue(Page::kSweepingPending); - sweeping_list(space).push_back(p); - int to_sweep = p->area_size() - p->LiveBytes(); - space->accounting_stats_.ShrinkSpace(to_sweep); + sweeper().AddPage(space->identity(), p); will_be_swept++; } @@ -3736,8 +3959,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d", AllocationSpaceName(space->identity()), will_be_swept); } - std::sort(sweeping_list(space).begin(), sweeping_list(space).end(), - [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); } @@ -3753,7 +3974,6 @@ void MarkCompactCollector::SweepSpaces() { #endif { - sweeping_in_progress_ = true; { GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD); @@ -3769,9 +3989,7 @@ void MarkCompactCollector::SweepSpaces() { GCTracer::Scope::MC_SWEEP_MAP); StartSweepSpace(heap()->map_space()); } - if (FLAG_concurrent_sweeping) { - StartSweeperThreads(); - } + sweeper().StartSweeping(); } // Deallocate unmarked large objects. @@ -3783,13 +4001,6 @@ void MarkCompactCollector::SweepSpaces() { } } - -void MarkCompactCollector::ParallelSweepSpacesComplete() { - sweeping_list(heap()->old_space()).clear(); - sweeping_list(heap()->code_space()).clear(); - sweeping_list(heap()->map_space()).clear(); -} - Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); } @@ -3804,7 +4015,10 @@ void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot, Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host)); if (target_page->IsEvacuationCandidate() && !ShouldSkipEvacuationSlotRecording(host)) { - RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot); + // TODO(ulan): remove this check after investigating crbug.com/414964. + CHECK(target->IsCode()); + RememberedSet<OLD_TO_OLD>::InsertTyped( + source_page, reinterpret_cast<Address>(host), CODE_ENTRY_SLOT, slot); } } @@ -3815,9 +4029,12 @@ void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) { Code* host = isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer( pc); - MarkBit mark_bit = Marking::MarkBitFrom(host); + MarkBit mark_bit = ObjectMarking::MarkBitFrom(host); if (Marking::IsBlack(mark_bit)) { RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); + // The target is always in old space, we don't have to record the slot in + // the old-to-new remembered set. + DCHECK(!heap()->InNewSpace(target)); RecordRelocSlot(host, &rinfo, target); } } |