aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/incremental-marking.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/incremental-marking.cc')
-rw-r--r--deps/v8/src/heap/incremental-marking.cc518
1 files changed, 221 insertions, 297 deletions
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index b9e7c61ba0..99be9d0123 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -19,33 +19,22 @@
namespace v8 {
namespace internal {
-IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
- return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_MARKING,
- IncrementalMarking::DO_NOT_FORCE_COMPLETION);
-}
-
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
- observer_(*this, kAllocatedThreshold),
state_(STOPPED),
+ initial_old_generation_size_(0),
+ bytes_marked_ahead_of_schedule_(0),
+ unscanned_bytes_of_large_object_(0),
+ idle_marking_delay_counter_(0),
+ incremental_marking_finalization_rounds_(0),
is_compacting_(false),
- steps_count_(0),
- old_generation_space_available_at_start_of_incremental_(0),
- old_generation_space_used_at_start_of_incremental_(0),
- bytes_rescanned_(0),
should_hurry_(false),
- marking_speed_(0),
- bytes_scanned_(0),
- allocated_(0),
- write_barriers_invoked_since_last_step_(0),
- idle_marking_delay_counter_(0),
- unscanned_bytes_of_large_object_(0),
was_activated_(false),
black_allocation_(false),
finalize_marking_completed_(false),
- incremental_marking_finalization_rounds_(0),
- request_type_(NONE) {}
+ request_type_(NONE),
+ new_generation_observer_(*this, kAllocatedThreshold),
+ old_generation_observer_(*this, kAllocatedThreshold) {}
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
@@ -76,19 +65,7 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
Isolate* isolate) {
DCHECK(obj->IsHeapObject());
- IncrementalMarking* marking = isolate->heap()->incremental_marking();
-
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- int counter = chunk->write_barrier_counter();
- if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
- marking->write_barriers_invoked_since_last_step_ +=
- MemoryChunk::kWriteBarrierCounterGranularity -
- chunk->write_barrier_counter();
- chunk->set_write_barrier_counter(
- MemoryChunk::kWriteBarrierCounterGranularity);
- }
-
- marking->RecordWrite(obj, slot, *slot);
+ isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
}
// static
@@ -202,20 +179,15 @@ class IncrementalMarkingMarkingVisitor
StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
- table_.Register(kVisitJSRegExp, &VisitJSRegExp);
}
static const int kProgressBarScanningChunk = 32 * 1024;
static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- // TODO(mstarzinger): Move setting of the flag to the allocation site of
- // the array. The visitor should just check the flag.
- if (FLAG_use_marking_progress_bar &&
- chunk->owner()->identity() == LO_SPACE) {
- chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
- }
if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ DCHECK(!FLAG_use_marking_progress_bar ||
+ chunk->owner()->identity() == LO_SPACE);
Heap* heap = map->GetHeap();
// When using a progress bar for large fixed arrays, scan only a chunk of
// the array and try to push it onto the marking deque again until it is
@@ -423,22 +395,6 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
}
-bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
-#ifndef DEBUG
- static const intptr_t kActivationThreshold = 8 * MB;
-#else
- // TODO(gc) consider setting this to some low level so that some
- // debug tests run with incremental marking and some without.
- static const intptr_t kActivationThreshold = 0;
-#endif
- // Don't switch on for very small heaps.
- return CanBeActivated() &&
- heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
- heap_->HeapIsFullEnoughToStartIncrementalMarking(
- heap_->old_generation_allocation_limit());
-}
-
-
bool IncrementalMarking::WasActivated() { return was_activated_; }
@@ -467,21 +423,6 @@ void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
}
-void IncrementalMarking::NotifyOfHighPromotionRate() {
- if (IsMarking()) {
- if (marking_speed_ < kFastMarking) {
- if (FLAG_trace_gc) {
- PrintIsolate(heap()->isolate(),
- "Increasing marking speed to %d "
- "due to high promotion rate\n",
- static_cast<int>(kFastMarking));
- }
- marking_speed_ = kFastMarking;
- }
- }
-}
-
-
static void PatchIncrementalMarkingRecordWriteStubs(
Heap* heap, RecordWriteStub::Mode mode) {
UnseededNumberDictionary* stubs = heap->code_stubs();
@@ -503,34 +444,60 @@ static void PatchIncrementalMarkingRecordWriteStubs(
}
}
-
-void IncrementalMarking::Start(const char* reason) {
+void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start (%s)\n",
- (reason == nullptr) ? "unknown reason" : reason);
+ int old_generation_size_mb =
+ static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
+ int old_generation_limit_mb =
+ static_cast<int>(heap()->old_generation_allocation_limit() / MB);
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
+ "slack %dMB\n",
+ Heap::GarbageCollectionReasonToString(gc_reason),
+ old_generation_size_mb, old_generation_limit_mb,
+ Max(0, old_generation_limit_mb - old_generation_size_mb));
}
DCHECK(FLAG_incremental_marking);
DCHECK(state_ == STOPPED);
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
DCHECK(!heap_->isolate()->serializer_enabled());
+ Counters* counters = heap_->isolate()->counters();
+
+ counters->incremental_marking_reason()->AddSample(
+ static_cast<int>(gc_reason));
HistogramTimerScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking_start());
+ counters->gc_incremental_marking_start());
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
- ResetStepCounters();
-
+ heap_->tracer()->NotifyIncrementalMarkingStart();
+
+ start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
+ initial_old_generation_size_ = heap_->PromotedSpaceSizeOfObjects();
+ old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
+ bytes_allocated_ = 0;
+ bytes_marked_ahead_of_schedule_ = 0;
+ should_hurry_ = false;
was_activated_ = true;
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
StartMarking();
} else {
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start sweeping.\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Start sweeping.\n");
}
state_ = SWEEPING;
}
- heap_->new_space()->AddAllocationObserver(&observer_);
+ SpaceIterator it(heap_);
+ while (it.has_next()) {
+ Space* space = it.next();
+ if (space == heap_->new_space()) {
+ space->AddAllocationObserver(&new_generation_observer_);
+ } else {
+ space->AddAllocationObserver(&old_generation_observer_);
+ }
+ }
incremental_marking_job()->Start(heap_);
}
@@ -542,12 +509,14 @@ void IncrementalMarking::StartMarking() {
// but we cannot enable black allocation while deserializing. Hence, we
// have to delay the start of incremental marking in that case.
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start delayed - serializer\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Start delayed - serializer\n");
}
return;
}
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start marking\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Start marking\n");
}
is_compacting_ = !FLAG_never_compact &&
@@ -559,7 +528,8 @@ void IncrementalMarking::StartMarking() {
if (heap_->UsingEmbedderHeapTracer()) {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
- heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+ heap_->embedder_heap_tracer()->TracePrologue(
+ heap_->embedder_reachable_reference_reporter());
}
RecordWriteStub::Mode mode = is_compacting_
@@ -589,7 +559,7 @@ void IncrementalMarking::StartMarking() {
// Ready to start incremental marking.
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Running\n");
+ heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
}
}
@@ -601,7 +571,8 @@ void IncrementalMarking::StartBlackAllocation() {
heap()->map_space()->MarkAllocationInfoBlack();
heap()->code_space()->MarkAllocationInfoBlack();
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Black allocation started\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Black allocation started\n");
}
}
@@ -609,11 +580,22 @@ void IncrementalMarking::FinishBlackAllocation() {
if (black_allocation_) {
black_allocation_ = false;
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Black allocation finished\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Black allocation finished\n");
}
}
}
+void IncrementalMarking::AbortBlackAllocation() {
+ for (Page* page : *heap()->old_space()) {
+ page->ReleaseBlackAreaEndMarkerMap();
+ }
+ if (FLAG_trace_incremental_marking) {
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Black allocation aborted\n");
+ }
+}
+
void IncrementalMarking::MarkRoots() {
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
@@ -742,7 +724,6 @@ void IncrementalMarking::RetainMaps() {
}
}
-
void IncrementalMarking::FinalizeIncrementally() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
DCHECK(!finalize_marking_completed_);
@@ -775,11 +756,12 @@ void IncrementalMarking::FinalizeIncrementally() {
abs(old_marking_deque_top -
heap_->mark_compact_collector()->marking_deque()->top());
+ marking_progress += static_cast<int>(heap_->wrappers_to_trace());
+
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
- heap_->tracer()->AddMarkingTime(delta);
if (FLAG_trace_incremental_marking) {
- PrintF(
+ heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Finalize incrementally round %d, "
"spent %d ms, marking progress %d.\n",
static_cast<int>(delta), incremental_marking_finalization_rounds_,
@@ -926,23 +908,23 @@ void IncrementalMarking::Hurry() {
// because should_hurry_ will force a full GC.
if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
double start = 0.0;
- if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
+ if (FLAG_trace_incremental_marking) {
start = heap_->MonotonicallyIncreasingTimeInMs();
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Hurry\n");
+ heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
}
}
// TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped.
ProcessMarkingDeque(0, FORCE_COMPLETION);
state_ = COMPLETE;
- if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
+ if (FLAG_trace_incremental_marking) {
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
- heap_->tracer()->AddMarkingTime(delta);
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
- static_cast<int>(delta));
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+ static_cast<int>(delta));
}
}
}
@@ -968,12 +950,28 @@ void IncrementalMarking::Hurry() {
void IncrementalMarking::Stop() {
if (IsStopped()) return;
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Stopping.\n");
+ int old_generation_size_mb =
+ static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
+ int old_generation_limit_mb =
+ static_cast<int>(heap()->old_generation_allocation_limit() / MB);
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
+ "overshoot %dMB\n",
+ old_generation_size_mb, old_generation_limit_mb,
+ Max(0, old_generation_size_mb - old_generation_limit_mb));
+ }
+
+ SpaceIterator it(heap_);
+ while (it.has_next()) {
+ Space* space = it.next();
+ if (space == heap_->new_space()) {
+ space->RemoveAllocationObserver(&new_generation_observer_);
+ } else {
+ space->RemoveAllocationObserver(&old_generation_observer_);
+ }
}
- heap_->new_space()->RemoveAllocationObserver(&observer_);
IncrementalMarking::set_should_hurry(false);
- ResetStepCounters();
if (IsMarking()) {
PatchIncrementalMarkingRecordWriteStubs(heap_,
RecordWriteStub::STORE_BUFFER_ONLY);
@@ -995,7 +993,7 @@ void IncrementalMarking::Finalize() {
void IncrementalMarking::FinalizeMarking(CompletionAction action) {
DCHECK(!finalize_marking_completed_);
if (FLAG_trace_incremental_marking) {
- PrintF(
+ heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] requesting finalization of incremental "
"marking.\n");
}
@@ -1015,7 +1013,8 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
// the should-hurry flag to indicate that there can't be much work left to do.
set_should_hurry(true);
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Complete (normal).\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Complete (normal).\n");
}
request_type_ = COMPLETE_MARKING;
if (action == GC_VIA_STACK_GUARD) {
@@ -1031,246 +1030,170 @@ void IncrementalMarking::Epilogue() {
}
double IncrementalMarking::AdvanceIncrementalMarking(
- double deadline_in_ms, IncrementalMarking::StepActions step_actions) {
+ double deadline_in_ms, CompletionAction completion_action,
+ ForceCompletionAction force_completion, StepOrigin step_origin) {
DCHECK(!IsStopped());
- intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
- GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
- heap()
- ->tracer()
- ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
double remaining_time_in_ms = 0.0;
- intptr_t bytes_processed = 0;
+ intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
+ kStepSizeInMs,
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
do {
- bytes_processed =
- Step(step_size_in_bytes, step_actions.completion_action,
- step_actions.force_marking, step_actions.force_completion);
+ Step(step_size_in_bytes, completion_action, force_completion, step_origin);
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
- } while (bytes_processed > 0 &&
- remaining_time_in_ms >=
- 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
- !IsComplete() &&
+ } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
!heap()->mark_compact_collector()->marking_deque()->IsEmpty());
return remaining_time_in_ms;
}
-void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
- if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
- heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
- "old space step");
- } else {
- Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
- }
-}
-
-
-void IncrementalMarking::SpeedUp() {
- bool speed_up = false;
-
- if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
- static_cast<int>(kMarkingSpeedAccellerationInterval));
- }
- speed_up = true;
- }
-
- bool space_left_is_very_small =
- (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
-
- bool only_1_nth_of_space_that_was_available_still_left =
- (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
- old_generation_space_available_at_start_of_incremental_);
-
- if (space_left_is_very_small ||
- only_1_nth_of_space_that_was_available_still_left) {
- if (FLAG_trace_incremental_marking)
- PrintIsolate(heap()->isolate(),
- "Speed up marking because of low space left\n");
- speed_up = true;
- }
-
- bool size_of_old_space_multiplied_by_n_during_marking =
- (heap_->PromotedTotalSize() >
- (marking_speed_ + 1) *
- old_generation_space_used_at_start_of_incremental_);
- if (size_of_old_space_multiplied_by_n_during_marking) {
- speed_up = true;
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(),
- "Speed up marking because of heap size increase\n");
- }
- }
-
- int64_t promoted_during_marking =
- heap_->PromotedTotalSize() -
- old_generation_space_used_at_start_of_incremental_;
- intptr_t delay = marking_speed_ * MB;
- intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
-
- // We try to scan at at least twice the speed that we are allocating.
- if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(),
- "Speed up marking because marker was not keeping up\n");
- }
- speed_up = true;
- }
-
- if (speed_up) {
- if (state_ != MARKING) {
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(),
- "Postponing speeding up marking until marking starts\n");
- }
- } else {
- marking_speed_ += kMarkingSpeedAccelleration;
- marking_speed_ = static_cast<int>(
- Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
- marking_speed_);
- }
- }
- }
-}
-
void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
- (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() ||
- !FLAG_concurrent_sweeping)) {
+ (!FLAG_concurrent_sweeping ||
+ heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
- bytes_scanned_ = 0;
StartMarking();
}
}
-intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
- CompletionAction action,
- ForceMarkingAction marking,
- ForceCompletionAction completion) {
- DCHECK(allocated_bytes >= 0);
+size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
+ // Update bytes_allocated_ based on the allocation counter.
+ size_t current_counter = heap_->OldGenerationAllocationCounter();
+ bytes_allocated_ += current_counter - old_generation_allocation_counter_;
+ old_generation_allocation_counter_ = current_counter;
+ return bytes_allocated_;
+}
+
+size_t IncrementalMarking::StepSizeToMakeProgress() {
+ // We increase step size gradually based on the time passed in order to
+ // leave marking work to standalone tasks. The ramp up duration and the
+ // target step count are chosen based on benchmarks.
+ const int kRampUpIntervalMs = 300;
+ const size_t kTargetStepCount = 128;
+ const size_t kTargetStepCountAtOOM = 16;
+ size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
+
+ if (heap()->IsCloseToOutOfMemory(oom_slack)) {
+ return heap()->PromotedSpaceSizeOfObjects() / kTargetStepCountAtOOM;
+ }
+
+ size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
+ IncrementalMarking::kAllocatedThreshold);
+ double time_passed_ms =
+ heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
+ double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
+ return static_cast<size_t>(factor * step_size);
+}
+void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
(state_ != SWEEPING && state_ != MARKING)) {
- return 0;
- }
-
- allocated_ += allocated_bytes;
-
- if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
- write_barriers_invoked_since_last_step_ <
- kWriteBarriersInvokedThreshold) {
- return 0;
+ return;
}
- // If an idle notification happened recently, we delay marking steps.
- if (marking == DO_NOT_FORCE_MARKING &&
- heap_->RecentIdleNotificationHappened()) {
- return 0;
+ size_t bytes_to_process =
+ StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
+
+ if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
+ // The first step after Scavenge will see many allocated bytes.
+ // Cap the step size to distribute the marking work more uniformly.
+ size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ kMaxStepSizeInMs,
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ bytes_to_process = Min(bytes_to_process, max_step_size);
+
+ size_t bytes_processed = 0;
+ if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
+ // Steps performed in tasks have put us ahead of schedule.
+ // We skip processing of marking dequeue here and thus
+ // shift marking time from inside V8 to standalone tasks.
+ bytes_marked_ahead_of_schedule_ -= bytes_to_process;
+ bytes_processed = bytes_to_process;
+ } else {
+ bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
+ FORCE_COMPLETION, StepOrigin::kV8);
+ }
+ bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
+}
- intptr_t bytes_processed = 0;
- {
- HistogramTimerScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking());
- TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
- TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
- double start = heap_->MonotonicallyIncreasingTimeInMs();
-
- // The marking speed is driven either by the allocation rate or by the rate
- // at which we are having to check the color of objects in the write
- // barrier.
- // It is possible for a tight non-allocating loop to run a lot of write
- // barriers before we get here and check them (marking can only take place
- // on
- // allocation), so to reduce the lumpiness we don't use the write barriers
- // invoked since last step directly to determine the amount of work to do.
- intptr_t bytes_to_process =
- marking_speed_ *
- Max(allocated_, write_barriers_invoked_since_last_step_);
- allocated_ = 0;
- write_barriers_invoked_since_last_step_ = 0;
-
- bytes_scanned_ += bytes_to_process;
-
- // TODO(hpayer): Do not account for sweeping finalization while marking.
- if (state_ == SWEEPING) {
- FinalizeSweeping();
- }
+size_t IncrementalMarking::Step(size_t bytes_to_process,
+ CompletionAction action,
+ ForceCompletionAction completion,
+ StepOrigin step_origin) {
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
+ double start = heap_->MonotonicallyIncreasingTimeInMs();
- if (state_ == MARKING) {
+ if (state_ == SWEEPING) {
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
+ FinalizeSweeping();
+ }
+
+ size_t bytes_processed = 0;
+ if (state_ == MARKING) {
+ const bool incremental_wrapper_tracing =
+ FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
+ const bool process_wrappers =
+ incremental_wrapper_tracing &&
+ (heap_->RequiresImmediateWrapperProcessing() ||
+ heap_->mark_compact_collector()->marking_deque()->IsEmpty());
+ bool wrapper_work_left = incremental_wrapper_tracing;
+ if (!process_wrappers) {
bytes_processed = ProcessMarkingDeque(bytes_to_process);
- if (FLAG_incremental_marking_wrappers &&
- heap_->UsingEmbedderHeapTracer()) {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
- // This currently marks through all registered wrappers and does not
- // respect bytes_to_process.
- // TODO(hpayer): Integrate incremental marking of wrappers into
- // bytes_to_process logic.
- heap_->mark_compact_collector()
- ->RegisterWrappersWithEmbedderHeapTracer();
- heap_->mark_compact_collector()->embedder_heap_tracer()->AdvanceTracing(
- 0,
- EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+ if (step_origin == StepOrigin::kTask) {
+ bytes_marked_ahead_of_schedule_ += bytes_processed;
}
- if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
- if (completion == FORCE_COMPLETION ||
- IsIdleMarkingDelayCounterLimitReached()) {
- if (!finalize_marking_completed_) {
- FinalizeMarking(action);
- } else {
- MarkingComplete(action);
- }
+ } else {
+ const double wrapper_deadline =
+ heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
+ heap_->RegisterWrappersWithEmbedderHeapTracer();
+ wrapper_work_left = heap_->embedder_heap_tracer()->AdvanceTracing(
+ wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::
+ DO_NOT_FORCE_COMPLETION));
+ }
+
+ if (heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
+ !wrapper_work_left) {
+ if (completion == FORCE_COMPLETION ||
+ IsIdleMarkingDelayCounterLimitReached()) {
+ if (!finalize_marking_completed_) {
+ FinalizeMarking(action);
} else {
- IncrementIdleMarkingDelayCounter();
+ MarkingComplete(action);
}
+ } else {
+ IncrementIdleMarkingDelayCounter();
}
}
+ }
- steps_count_++;
-
- // Speed up marking if we are marking too slow or if we are almost done
- // with marking.
- SpeedUp();
-
- double end = heap_->MonotonicallyIncreasingTimeInMs();
- double duration = (end - start);
- // Note that we report zero bytes here when sweeping was in progress or
- // when we just started incremental marking. In these cases we did not
- // process the marking deque.
- heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
+ double end = heap_->MonotonicallyIncreasingTimeInMs();
+ double duration = (end - start);
+ // Note that we report zero bytes here when sweeping was in progress or
+ // when we just started incremental marking. In these cases we did not
+ // process the marking deque.
+ heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
+ if (FLAG_trace_incremental_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n",
+ step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed,
+ bytes_to_process, duration);
}
return bytes_processed;
}
-void IncrementalMarking::ResetStepCounters() {
- steps_count_ = 0;
- old_generation_space_available_at_start_of_incremental_ =
- SpaceLeftInOldSpace();
- old_generation_space_used_at_start_of_incremental_ =
- heap_->PromotedTotalSize();
- bytes_rescanned_ = 0;
- marking_speed_ = kInitialMarkingSpeed;
- bytes_scanned_ = 0;
- write_barriers_invoked_since_last_step_ = 0;
-}
-
-
-int64_t IncrementalMarking::SpaceLeftInOldSpace() {
- return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
-}
-
-
bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
}
@@ -1284,5 +1207,6 @@ void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
void IncrementalMarking::ClearIdleMarkingDelayCounter() {
idle_marking_delay_counter_ = 0;
}
+
} // namespace internal
} // namespace v8