aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-02-14 11:27:26 +0100
committerMichaël Zasso <targos@protonmail.com>2017-02-22 15:55:42 +0100
commit7a77daf24344db7942e34c962b0f1ee729ab7af5 (patch)
treee7cbe7bf4e2f4b802a8f5bc18336c546cd6a0d7f /deps/v8/src/heap
parent5f08871ee93ea739148cc49e0f7679e33c70295a (diff)
downloadandroid-node-v8-7a77daf24344db7942e34c962b0f1ee729ab7af5.tar.gz
android-node-v8-7a77daf24344db7942e34c962b0f1ee729ab7af5.tar.bz2
android-node-v8-7a77daf24344db7942e34c962b0f1ee729ab7af5.zip
deps: update V8 to 5.6.326.55
PR-URL: https://github.com/nodejs/node/pull/10992 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h2
-rw-r--r--deps/v8/src/heap/gc-tracer.cc225
-rw-r--r--deps/v8/src/heap/gc-tracer.h108
-rw-r--r--deps/v8/src/heap/heap-inl.h62
-rw-r--r--deps/v8/src/heap/heap.cc561
-rw-r--r--deps/v8/src/heap/heap.h206
-rw-r--r--deps/v8/src/heap/incremental-marking.cc22
-rw-r--r--deps/v8/src/heap/incremental-marking.h5
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h10
-rw-r--r--deps/v8/src/heap/mark-compact.cc714
-rw-r--r--deps/v8/src/heap/mark-compact.h149
-rw-r--r--deps/v8/src/heap/memory-reducer.cc41
-rw-r--r--deps/v8/src/heap/memory-reducer.h6
-rw-r--r--deps/v8/src/heap/object-stats.cc118
-rw-r--r--deps/v8/src/heap/object-stats.h11
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h5
-rw-r--r--deps/v8/src/heap/objects-visiting.cc39
-rw-r--r--deps/v8/src/heap/page-parallel-job.h3
-rw-r--r--deps/v8/src/heap/remembered-set.cc107
-rw-r--r--deps/v8/src/heap/remembered-set.h17
-rw-r--r--deps/v8/src/heap/scavenge-job.h3
-rw-r--r--deps/v8/src/heap/scavenger-inl.h6
-rw-r--r--deps/v8/src/heap/scavenger.cc41
-rw-r--r--deps/v8/src/heap/scavenger.h3
-rw-r--r--deps/v8/src/heap/slot-set.h100
-rw-r--r--deps/v8/src/heap/spaces-inl.h15
-rw-r--r--deps/v8/src/heap/spaces.cc314
-rw-r--r--deps/v8/src/heap/spaces.h336
-rw-r--r--deps/v8/src/heap/store-buffer.cc135
-rw-r--r--deps/v8/src/heap/store-buffer.h112
30 files changed, 1751 insertions, 1725 deletions
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index 39dea7e1ff..7ce0c1a2f6 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -68,7 +68,7 @@ class GCIdleTimeHeapState {
// The idle time handler makes decisions about which garbage collection
// operations are executing during IdleNotification.
-class GCIdleTimeHandler {
+class V8_EXPORT_PRIVATE GCIdleTimeHandler {
public:
// If we haven't recorded any incremental marking events yet, we carefully
// mark with a conservative lower bound for the marking speed.
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 8049ce498b..dcd319fdae 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -11,10 +11,11 @@
namespace v8 {
namespace internal {
-static intptr_t CountTotalHolesSize(Heap* heap) {
- intptr_t holes_size = 0;
+static size_t CountTotalHolesSize(Heap* heap) {
+ size_t holes_size = 0;
OldSpaces spaces(heap);
for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
+ DCHECK_GE(holes_size + space->Waste() + space->Available(), holes_size);
holes_size += space->Waste() + space->Available();
}
return holes_size;
@@ -28,8 +29,7 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
STATIC_ASSERT(FIRST_INCREMENTAL_SCOPE == 0);
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
- FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
RuntimeCallStats::Enter(
tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_,
&RuntimeCallStats::GC);
@@ -40,8 +40,7 @@ GCTracer::Scope::~Scope() {
tracer_->AddScopeSample(
scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_);
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
- FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
RuntimeCallStats::Leave(
tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_);
}
@@ -83,28 +82,17 @@ GCTracer::Event::Event(Type type, GarbageCollectionReason gc_reason,
}
}
-
const char* GCTracer::Event::TypeName(bool short_name) const {
switch (type) {
case SCAVENGER:
- if (short_name) {
- return "s";
- } else {
- return "Scavenge";
- }
+ return (short_name) ? "s" : "Scavenge";
case MARK_COMPACTOR:
case INCREMENTAL_MARK_COMPACTOR:
- if (short_name) {
- return "ms";
- } else {
- return "Mark-sweep";
- }
+ return (short_name) ? "ms" : "Mark-sweep";
+ case MINOR_MARK_COMPACTOR:
+ return (short_name) ? "mmc" : "Minor Mark-Compact";
case START:
- if (short_name) {
- return "st";
- } else {
- return "Start";
- }
+ return (short_name) ? "st" : "Start";
}
return "Unknown Event Type";
}
@@ -115,6 +103,7 @@ GCTracer::GCTracer(Heap* heap)
previous_(current_),
incremental_marking_bytes_(0),
incremental_marking_duration_(0.0),
+ incremental_marking_start_time_(0.0),
recorded_incremental_marking_speed_(0.0),
allocation_time_ms_(0.0),
new_space_allocation_counter_bytes_(0),
@@ -139,8 +128,8 @@ void GCTracer::ResetForTesting() {
new_space_allocation_in_bytes_since_gc_ = 0.0;
old_generation_allocation_in_bytes_since_gc_ = 0.0;
combined_mark_compact_speed_cache_ = 0.0;
- recorded_scavenges_total_.Reset();
- recorded_scavenges_survived_.Reset();
+ recorded_minor_gcs_total_.Reset();
+ recorded_minor_gcs_survived_.Reset();
recorded_compactions_.Reset();
recorded_mark_compacts_.Reset();
recorded_incremental_mark_compacts_.Reset();
@@ -162,15 +151,22 @@ void GCTracer::Start(GarbageCollector collector,
SampleAllocation(start_time, heap_->NewSpaceAllocationCounter(),
heap_->OldGenerationAllocationCounter());
- if (collector == SCAVENGER) {
- current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
- } else if (collector == MARK_COMPACTOR) {
- if (heap_->incremental_marking()->WasActivated()) {
+ switch (collector) {
+ case SCAVENGER:
+ current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
+ break;
+ case MINOR_MARK_COMPACTOR:
current_ =
- Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason, collector_reason);
- } else {
- current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
- }
+ Event(Event::MINOR_MARK_COMPACTOR, gc_reason, collector_reason);
+ break;
+ case MARK_COMPACTOR:
+ if (heap_->incremental_marking()->WasActivated()) {
+ current_ = Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason,
+ collector_reason);
+ } else {
+ current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
+ }
+ break;
}
current_.reduce_memory = heap_->ShouldReduceMemory();
@@ -188,12 +184,12 @@ void GCTracer::Start(GarbageCollector collector,
current_.scopes[i] = 0;
}
- int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
- int used_memory = static_cast<int>(current_.start_object_size / KB);
+ size_t committed_memory = heap_->CommittedMemory() / KB;
+ size_t used_memory = current_.start_object_size / KB;
Counters* counters = heap_->isolate()->counters();
- if (collector == SCAVENGER) {
+ if (Heap::IsYoungGenerationCollector(collector)) {
counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason));
} else {
counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
@@ -202,8 +198,7 @@ void GCTracer::Start(GarbageCollector collector,
committed_memory);
counters->aggregated_memory_heap_used()->AddSample(start_time, used_memory);
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
- FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
RuntimeCallStats::Enter(heap_->isolate()->counters()->runtime_call_stats(),
&timer_, &RuntimeCallStats::GC);
}
@@ -220,15 +215,16 @@ void GCTracer::ResetIncrementalMarkingCounters() {
void GCTracer::Stop(GarbageCollector collector) {
start_counter_--;
if (start_counter_ != 0) {
- heap_->isolate()->PrintWithTimestamp(
- "[Finished reentrant %s during %s.]\n",
- collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
- current_.TypeName(false));
+ heap_->isolate()->PrintWithTimestamp("[Finished reentrant %s during %s.]\n",
+ Heap::CollectorName(collector),
+ current_.TypeName(false));
return;
}
DCHECK(start_counter_ >= 0);
DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
+ (collector == MINOR_MARK_COMPACTOR &&
+ current_.type == Event::MINOR_MARK_COMPACTOR) ||
(collector == MARK_COMPACTOR &&
(current_.type == Event::MARK_COMPACTOR ||
current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
@@ -241,8 +237,8 @@ void GCTracer::Stop(GarbageCollector collector) {
AddAllocation(current_.end_time);
- int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
- int used_memory = static_cast<int>(current_.end_object_size / KB);
+ size_t committed_memory = heap_->CommittedMemory() / KB;
+ size_t used_memory = current_.end_object_size / KB;
heap_->isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
current_.end_time, committed_memory);
heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
@@ -250,36 +246,45 @@ void GCTracer::Stop(GarbageCollector collector) {
double duration = current_.end_time - current_.start_time;
- if (current_.type == Event::SCAVENGER) {
- recorded_scavenges_total_.Push(
- MakeBytesAndDuration(current_.new_space_object_size, duration));
- recorded_scavenges_survived_.Push(MakeBytesAndDuration(
- current_.survived_new_space_object_size, duration));
- } else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
- current_.incremental_marking_bytes = incremental_marking_bytes_;
- current_.incremental_marking_duration = incremental_marking_duration_;
- for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
- current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
- current_.scopes[i] = incremental_marking_scopes_[i].duration;
- }
- RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
- current_.incremental_marking_duration);
- recorded_incremental_mark_compacts_.Push(
- MakeBytesAndDuration(current_.start_object_size, duration));
- ResetIncrementalMarkingCounters();
- combined_mark_compact_speed_cache_ = 0.0;
- } else {
- DCHECK_EQ(0, current_.incremental_marking_bytes);
- DCHECK_EQ(0, current_.incremental_marking_duration);
- recorded_mark_compacts_.Push(
- MakeBytesAndDuration(current_.start_object_size, duration));
- ResetIncrementalMarkingCounters();
- combined_mark_compact_speed_cache_ = 0.0;
+ switch (current_.type) {
+ case Event::SCAVENGER:
+ case Event::MINOR_MARK_COMPACTOR:
+ recorded_minor_gcs_total_.Push(
+ MakeBytesAndDuration(current_.new_space_object_size, duration));
+ recorded_minor_gcs_survived_.Push(MakeBytesAndDuration(
+ current_.survived_new_space_object_size, duration));
+ break;
+ case Event::INCREMENTAL_MARK_COMPACTOR:
+ current_.incremental_marking_bytes = incremental_marking_bytes_;
+ current_.incremental_marking_duration = incremental_marking_duration_;
+ for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+ current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
+ current_.scopes[i] = incremental_marking_scopes_[i].duration;
+ }
+ RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
+ current_.incremental_marking_duration);
+ recorded_incremental_mark_compacts_.Push(
+ MakeBytesAndDuration(current_.start_object_size, duration));
+ ResetIncrementalMarkingCounters();
+ combined_mark_compact_speed_cache_ = 0.0;
+ break;
+ case Event::MARK_COMPACTOR:
+ DCHECK_EQ(0u, current_.incremental_marking_bytes);
+ DCHECK_EQ(0, current_.incremental_marking_duration);
+ recorded_mark_compacts_.Push(
+ MakeBytesAndDuration(current_.start_object_size, duration));
+ ResetIncrementalMarkingCounters();
+ combined_mark_compact_speed_cache_ = 0.0;
+ break;
+ case Event::START:
+ UNREACHABLE();
}
heap_->UpdateTotalGCTime(duration);
- if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger)
+ if ((current_.type == Event::SCAVENGER ||
+ current_.type == Event::MINOR_MARK_COMPACTOR) &&
+ FLAG_trace_gc_ignore_scavenger)
return;
if (FLAG_trace_gc_nvp) {
@@ -293,8 +298,7 @@ void GCTracer::Stop(GarbageCollector collector) {
}
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
- FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
RuntimeCallStats::Leave(heap_->isolate()->counters()->runtime_call_stats(),
&timer_);
}
@@ -348,9 +352,8 @@ void GCTracer::AddContextDisposalTime(double time) {
recorded_context_disposal_times_.Push(time);
}
-
void GCTracer::AddCompactionEvent(double duration,
- intptr_t live_bytes_compacted) {
+ size_t live_bytes_compacted) {
recorded_compactions_.Push(
MakeBytesAndDuration(live_bytes_compacted, duration));
}
@@ -360,8 +363,7 @@ void GCTracer::AddSurvivalRatio(double promotion_ratio) {
recorded_survival_ratios_.Push(promotion_ratio);
}
-
-void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
+void GCTracer::AddIncrementalMarkingStep(double duration, size_t bytes) {
if (bytes > 0) {
incremental_marking_bytes_ += bytes;
incremental_marking_duration_ += duration;
@@ -426,7 +428,7 @@ void GCTracer::Print() const {
void GCTracer::PrintNVP() const {
double duration = current_.end_time - current_.start_time;
double spent_in_mutator = current_.start_time - previous_.end_time;
- intptr_t allocated_since_last_gc =
+ size_t allocated_since_last_gc =
current_.start_object_size - previous_.end_object_size;
double incremental_walltime_duration = 0;
@@ -449,26 +451,25 @@ void GCTracer::PrintNVP() const {
"roots=%.2f "
"code=%.2f "
"semispace=%.2f "
- "object_groups=%.2f "
- "external_prologue=%.2f "
- "external_epilogue=%.2f "
+ "external.prologue=%.2f "
+ "external.epilogue=%.2f "
"external_weak_global_handles=%.2f "
"steps_count=%d "
"steps_took=%.1f "
"scavenge_throughput=%.f "
- "total_size_before=%" V8PRIdPTR
+ "total_size_before=%" PRIuS
" "
- "total_size_after=%" V8PRIdPTR
+ "total_size_after=%" PRIuS
" "
- "holes_size_before=%" V8PRIdPTR
+ "holes_size_before=%" PRIuS
" "
- "holes_size_after=%" V8PRIdPTR
+ "holes_size_after=%" PRIuS
" "
- "allocated=%" V8PRIdPTR
+ "allocated=%" PRIuS
" "
- "promoted=%" V8PRIdPTR
+ "promoted=%" PRIuS
" "
- "semi_space_copied=%" V8PRIdPTR
+ "semi_space_copied=%" PRIuS
" "
"nodes_died_in_new=%d "
"nodes_copied_in_new=%d "
@@ -486,9 +487,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::SCAVENGER_ROOTS],
current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
current_.scopes[Scope::SCAVENGER_SEMISPACE],
- current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS],
- current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE],
- current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE],
+ current_.scopes[Scope::EXTERNAL_PROLOGUE],
+ current_.scopes[Scope::EXTERNAL_EPILOGUE],
current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
.steps,
@@ -505,6 +505,15 @@ void GCTracer::PrintNVP() const {
NewSpaceAllocationThroughputInBytesPerMillisecond(),
ContextDisposalRateInMilliseconds());
break;
+ case Event::MINOR_MARK_COMPACTOR:
+ heap_->isolate()->PrintWithTimestamp(
+ "pause=%.1f "
+ "mutator=%.1f "
+ "gc=%s "
+ "reduce_memory=%d\n",
+ duration, spent_in_mutator, current_.TypeName(true),
+ current_.reduce_memory);
+ break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
heap_->isolate()->PrintWithTimestamp(
@@ -523,6 +532,7 @@ void GCTracer::PrintNVP() const {
"clear.weak_cells=%.1f "
"clear.weak_collections=%.1f "
"clear.weak_lists=%.1f "
+ "epilogue=%.1f "
"evacuate=%.1f "
"evacuate.candidates=%.1f "
"evacuate.clean_up=%.1f "
@@ -531,8 +541,8 @@ void GCTracer::PrintNVP() const {
"evacuate.update_pointers.to_evacuated=%.1f "
"evacuate.update_pointers.to_new=%.1f "
"evacuate.update_pointers.weak=%.1f "
- "external.mc_prologue=%.1f "
- "external.mc_epilogue=%.1f "
+ "external.prologue=%.1f "
+ "external.epilogue=%.1f "
"external.weak_global_handles=%.1f "
"finish=%.1f "
"mark=%.1f "
@@ -548,6 +558,7 @@ void GCTracer::PrintNVP() const {
"mark.wrapper_prologue=%.1f "
"mark.wrapper_epilogue=%.1f "
"mark.wrapper_tracing=%.1f "
+ "prologue=%.1f "
"sweep=%.1f "
"sweep.code=%.1f "
"sweep.map=%.1f "
@@ -568,19 +579,19 @@ void GCTracer::PrintNVP() const {
"incremental_steps_count=%d "
"incremental_marking_throughput=%.f "
"incremental_walltime_duration=%.f "
- "total_size_before=%" V8PRIdPTR
+ "total_size_before=%" PRIuS
" "
- "total_size_after=%" V8PRIdPTR
+ "total_size_after=%" PRIuS
" "
- "holes_size_before=%" V8PRIdPTR
+ "holes_size_before=%" PRIuS
" "
- "holes_size_after=%" V8PRIdPTR
+ "holes_size_after=%" PRIuS
" "
- "allocated=%" V8PRIdPTR
+ "allocated=%" PRIuS
" "
- "promoted=%" V8PRIdPTR
+ "promoted=%" PRIuS
" "
- "semi_space_copied=%" V8PRIdPTR
+ "semi_space_copied=%" PRIuS
" "
"nodes_died_in_new=%d "
"nodes_copied_in_new=%d "
@@ -604,6 +615,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_CLEAR_WEAK_CELLS],
current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
+ current_.scopes[Scope::MC_EPILOGUE],
current_.scopes[Scope::MC_EVACUATE],
current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
@@ -612,8 +624,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
- current_.scopes[Scope::MC_EXTERNAL_PROLOGUE],
- current_.scopes[Scope::MC_EXTERNAL_EPILOGUE],
+ current_.scopes[Scope::EXTERNAL_PROLOGUE],
+ current_.scopes[Scope::EXTERNAL_EPILOGUE],
current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
@@ -628,7 +640,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_MARK_WRAPPER_PROLOGUE],
current_.scopes[Scope::MC_MARK_WRAPPER_EPILOGUE],
current_.scopes[Scope::MC_MARK_WRAPPER_TRACING],
- current_.scopes[Scope::MC_SWEEP],
+ current_.scopes[Scope::MC_PROLOGUE], current_.scopes[Scope::MC_SWEEP],
current_.scopes[Scope::MC_SWEEP_CODE],
current_.scopes[Scope::MC_SWEEP_MAP],
current_.scopes[Scope::MC_SWEEP_OLD],
@@ -674,7 +686,7 @@ void GCTracer::PrintNVP() const {
}
}
-double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
+double GCTracer::AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer,
const BytesAndDuration& initial, double time_ms) {
BytesAndDuration sum = buffer.Sum(
[time_ms](BytesAndDuration a, BytesAndDuration b) {
@@ -693,11 +705,12 @@ double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
return speed;
}
-double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer) {
+double GCTracer::AverageSpeed(
+ const base::RingBuffer<BytesAndDuration>& buffer) {
return AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 0);
}
-void GCTracer::RecordIncrementalMarkingSpeed(intptr_t bytes, double duration) {
+void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) {
if (duration == 0 || bytes == 0) return;
double current_speed = bytes / duration;
if (recorded_incremental_marking_speed_ == 0) {
@@ -722,9 +735,9 @@ double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
double GCTracer::ScavengeSpeedInBytesPerMillisecond(
ScavengeSpeedMode mode) const {
if (mode == kForAllObjects) {
- return AverageSpeed(recorded_scavenges_total_);
+ return AverageSpeed(recorded_minor_gcs_total_);
} else {
- return AverageSpeed(recorded_scavenges_survived_);
+ return AverageSpeed(recorded_minor_gcs_survived_);
}
}
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index e8c72c1e2c..ed62dee5f1 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -7,6 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/platform/platform.h"
+#include "src/base/ring-buffer.h"
#include "src/counters.h"
#include "src/globals.h"
#include "testing/gtest/include/gtest/gtest_prod.h"
@@ -14,44 +15,6 @@
namespace v8 {
namespace internal {
-template <typename T>
-class RingBuffer {
- public:
- RingBuffer() { Reset(); }
- static const int kSize = 10;
- void Push(const T& value) {
- if (count_ == kSize) {
- elements_[start_++] = value;
- if (start_ == kSize) start_ = 0;
- } else {
- DCHECK_EQ(start_, 0);
- elements_[count_++] = value;
- }
- }
-
- int Count() const { return count_; }
-
- template <typename Callback>
- T Sum(Callback callback, const T& initial) const {
- int j = start_ + count_ - 1;
- if (j >= kSize) j -= kSize;
- T result = initial;
- for (int i = 0; i < count_; i++) {
- result = callback(result, elements_[j]);
- if (--j == -1) j += kSize;
- }
- return result;
- }
-
- void Reset() { start_ = count_ = 0; }
-
- private:
- T elements_[kSize];
- int start_;
- int count_;
- DISALLOW_COPY_AND_ASSIGN(RingBuffer);
-};
-
typedef std::pair<uint64_t, double> BytesAndDuration;
inline BytesAndDuration MakeBytesAndDuration(uint64_t bytes, double duration) {
@@ -74,6 +37,8 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
+ F(EXTERNAL_EPILOGUE) \
+ F(EXTERNAL_PROLOGUE) \
F(EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(MC_CLEAR) \
F(MC_CLEAR_CODE_FLUSH) \
@@ -86,6 +51,7 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
+ F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
@@ -94,8 +60,6 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
- F(MC_EXTERNAL_EPILOGUE) \
- F(MC_EXTERNAL_PROLOGUE) \
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
@@ -110,14 +74,12 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
F(MC_MARK_OBJECT_GROUPING) \
+ F(MC_PROLOGUE) \
F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
- F(SCAVENGER_EXTERNAL_EPILOGUE) \
- F(SCAVENGER_EXTERNAL_PROLOGUE) \
- F(SCAVENGER_OBJECT_GROUPS) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
@@ -132,7 +94,7 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
-class GCTracer {
+class V8_EXPORT_PRIVATE GCTracer {
public:
struct IncrementalMarkingInfos {
IncrementalMarkingInfos() : duration(0), longest_step(0), steps(0) {}
@@ -190,7 +152,8 @@ class GCTracer {
SCAVENGER = 0,
MARK_COMPACTOR = 1,
INCREMENTAL_MARK_COMPACTOR = 2,
- START = 3
+ MINOR_MARK_COMPACTOR = 3,
+ START = 4
};
Event(Type type, GarbageCollectionReason gc_reason,
@@ -215,10 +178,10 @@ class GCTracer {
bool reduce_memory;
// Size of objects in heap set in constructor.
- intptr_t start_object_size;
+ size_t start_object_size;
// Size of objects in heap set in destructor.
- intptr_t end_object_size;
+ size_t end_object_size;
// Size of memory allocated from OS set in constructor.
size_t start_memory_size;
@@ -228,23 +191,20 @@ class GCTracer {
// Total amount of space either wasted or contained in one of free lists
// before the current GC.
- intptr_t start_holes_size;
+ size_t start_holes_size;
// Total amount of space either wasted or contained in one of free lists
// after the current GC.
- intptr_t end_holes_size;
+ size_t end_holes_size;
// Size of new space objects in constructor.
- intptr_t new_space_object_size;
+ size_t new_space_object_size;
// Size of survived new space objects in destructor.
- intptr_t survived_new_space_object_size;
-
- // Bytes marked since creation of tracer (value at start of event).
- intptr_t cumulative_incremental_marking_bytes;
+ size_t survived_new_space_object_size;
// Bytes marked incrementally for INCREMENTAL_MARK_COMPACTOR
- intptr_t incremental_marking_bytes;
+ size_t incremental_marking_bytes;
// Duration of incremental marking steps for INCREMENTAL_MARK_COMPACTOR.
double incremental_marking_duration;
@@ -277,12 +237,12 @@ class GCTracer {
void AddContextDisposalTime(double time);
- void AddCompactionEvent(double duration, intptr_t live_bytes_compacted);
+ void AddCompactionEvent(double duration, size_t live_bytes_compacted);
void AddSurvivalRatio(double survival_ratio);
// Log an incremental marking step.
- void AddIncrementalMarkingStep(double duration, intptr_t bytes);
+ void AddIncrementalMarkingStep(double duration, size_t bytes);
// Compute the average incremental marking speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
@@ -380,13 +340,13 @@ class GCTracer {
// Returns the average speed of the events in the buffer.
// If the buffer is empty, the result is 0.
// Otherwise, the result is between 1 byte/ms and 1 GB/ms.
- static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer);
- static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
+ static double AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer);
+ static double AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer,
const BytesAndDuration& initial, double time_ms);
void ResetForTesting();
void ResetIncrementalMarkingCounters();
- void RecordIncrementalMarkingSpeed(intptr_t bytes, double duration);
+ void RecordIncrementalMarkingSpeed(size_t bytes, double duration);
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@@ -402,12 +362,10 @@ class GCTracer {
double TotalExternalTime() const {
return current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES] +
- current_.scopes[Scope::MC_EXTERNAL_EPILOGUE] +
- current_.scopes[Scope::MC_EXTERNAL_PROLOGUE] +
+ current_.scopes[Scope::EXTERNAL_EPILOGUE] +
+ current_.scopes[Scope::EXTERNAL_PROLOGUE] +
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE] +
- current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE] +
- current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE] +
- current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE];
+ current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE];
}
// Pointer to the heap that owns this tracer.
@@ -422,7 +380,7 @@ class GCTracer {
// Size of incremental marking steps (in bytes) accumulated since the end of
// the last mark compact GC.
- intptr_t incremental_marking_bytes_;
+ size_t incremental_marking_bytes_;
// Duration of incremental marking steps since the end of the last mark-
// compact event.
@@ -456,15 +414,15 @@ class GCTracer {
// Separate timer used for --runtime_call_stats
RuntimeCallTimer timer_;
- RingBuffer<BytesAndDuration> recorded_scavenges_total_;
- RingBuffer<BytesAndDuration> recorded_scavenges_survived_;
- RingBuffer<BytesAndDuration> recorded_compactions_;
- RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
- RingBuffer<BytesAndDuration> recorded_mark_compacts_;
- RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
- RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
- RingBuffer<double> recorded_context_disposal_times_;
- RingBuffer<double> recorded_survival_ratios_;
+ base::RingBuffer<BytesAndDuration> recorded_minor_gcs_total_;
+ base::RingBuffer<BytesAndDuration> recorded_minor_gcs_survived_;
+ base::RingBuffer<BytesAndDuration> recorded_compactions_;
+ base::RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
+ base::RingBuffer<BytesAndDuration> recorded_mark_compacts_;
+ base::RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
+ base::RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
+ base::RingBuffer<double> recorded_context_disposal_times_;
+ base::RingBuffer<double> recorded_survival_ratios_;
DISALLOW_COPY_AND_ASSIGN(GCTracer);
};
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 23e171232d..7d0d241289 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -12,6 +12,7 @@
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/object-stats.h"
#include "src/heap/remembered-set.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
@@ -490,37 +491,18 @@ bool Heap::InOldSpaceSlow(Address address) {
return old_space_->ContainsSlow(address);
}
-template <PromotionMode promotion_mode>
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
-
- if (promotion_mode == PROMOTE_MARKED) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(old_address);
- if (!Marking::IsWhite(mark_bit)) {
- return true;
- }
- }
-
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
}
-PromotionMode Heap::CurrentPromotionMode() {
- if (incremental_marking()->IsMarking()) {
- return PROMOTE_MARKED;
- } else {
- return DEFAULT_PROMOTION;
- }
-}
-
void Heap::RecordWrite(Object* object, int offset, Object* o) {
if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
return;
}
- RememberedSet<OLD_TO_NEW>::Insert(
- Page::FromAddress(reinterpret_cast<Address>(object)),
- HeapObject::cast(object)->address() + offset);
+ store_buffer()->InsertEntry(HeapObject::cast(object)->address() + offset);
}
void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
@@ -531,11 +513,9 @@ void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
void Heap::RecordFixedArrayElements(FixedArray* array, int offset, int length) {
if (InNewSpace(array)) return;
- Page* page = Page::FromAddress(reinterpret_cast<Address>(array));
for (int i = 0; i < length; i++) {
if (!InNewSpace(array->get(offset + i))) continue;
- RememberedSet<OLD_TO_NEW>::Insert(
- page,
+ store_buffer()->InsertEntry(
reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
}
}
@@ -647,7 +627,13 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
template <Heap::UpdateAllocationSiteMode mode>
void Heap::UpdateAllocationSite(HeapObject* object,
base::HashMap* pretenuring_feedback) {
- DCHECK(InFromSpace(object));
+ DCHECK(InFromSpace(object) ||
+ (InToSpace(object) &&
+ Page::FromAddress(object->address())
+ ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
+ (!InNewSpace(object) &&
+ Page::FromAddress(object->address())
+ ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
if (!FLAG_allocation_site_pretenuring ||
!AllocationSite::CanTrack(object->map()->instance_type()))
return;
@@ -759,9 +745,7 @@ void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
#endif
}
-void Heap::ClearInstanceofCache() {
- set_instanceof_cache_function(Smi::FromInt(0));
-}
+void Heap::ClearInstanceofCache() { set_instanceof_cache_function(Smi::kZero); }
Oddball* Heap::ToBoolean(bool condition) {
return condition ? true_value() : false_value();
@@ -769,8 +753,8 @@ Oddball* Heap::ToBoolean(bool condition) {
void Heap::CompletelyClearInstanceofCache() {
- set_instanceof_cache_map(Smi::FromInt(0));
- set_instanceof_cache_function(Smi::FromInt(0));
+ set_instanceof_cache_map(Smi::kZero);
+ set_instanceof_cache_function(Smi::kZero);
}
@@ -793,27 +777,27 @@ int Heap::NextScriptId() {
}
void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
- DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
+ DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::kZero);
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
- DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
+ DCHECK(construct_stub_deopt_pc_offset() == Smi::kZero);
set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
- DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
+ DCHECK(getter_stub_deopt_pc_offset() == Smi::kZero);
set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
- DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
+ DCHECK(setter_stub_deopt_pc_offset() == Smi::kZero);
set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
- DCHECK(interpreter_entry_return_pc_offset() == Smi::FromInt(0));
+ DCHECK(interpreter_entry_return_pc_offset() == Smi::kZero);
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
}
@@ -828,6 +812,16 @@ void Heap::SetSerializedTemplates(FixedArray* templates) {
set_serialized_templates(templates);
}
+void Heap::CreateObjectStats() {
+ if (V8_LIKELY(FLAG_gc_stats == 0)) return;
+ if (!live_object_stats_) {
+ live_object_stats_ = new ObjectStats(this);
+ }
+ if (!dead_object_stats_) {
+ dead_object_stats_ = new ObjectStats(this);
+ }
+}
+
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()) {
heap_->always_allocate_scope_count_.Increment(1);
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index d823232ac7..2059dae6b7 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -156,8 +156,8 @@ Heap::Heap()
strong_roots_list_(NULL),
heap_iterator_depth_(0),
embedder_heap_tracer_(nullptr),
- embedder_reference_reporter_(new TracePossibleWrapperReporter(this)),
- force_oom_(false) {
+ force_oom_(false),
+ delay_sweeper_tasks_for_testing_(false) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -170,23 +170,22 @@ Heap::Heap()
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(NULL);
- set_allocation_sites_list(Smi::FromInt(0));
- set_encountered_weak_collections(Smi::FromInt(0));
- set_encountered_weak_cells(Smi::FromInt(0));
- set_encountered_transition_arrays(Smi::FromInt(0));
+ set_allocation_sites_list(Smi::kZero);
+ set_encountered_weak_collections(Smi::kZero);
+ set_encountered_weak_cells(Smi::kZero);
+ set_encountered_transition_arrays(Smi::kZero);
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
}
-
-intptr_t Heap::Capacity() {
+size_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
return new_space_->Capacity() + OldGenerationCapacity();
}
-intptr_t Heap::OldGenerationCapacity() {
+size_t Heap::OldGenerationCapacity() {
if (!HasBeenSetUp()) return 0;
return old_space_->Capacity() + code_space_->Capacity() +
@@ -233,11 +232,10 @@ void Heap::UpdateMaximumCommitted() {
}
}
-
-intptr_t Heap::Available() {
+size_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
- intptr_t total = 0;
+ size_t total = 0;
AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->Available();
@@ -266,6 +264,12 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR;
}
+ if (incremental_marking()->NeedsFinalization() &&
+ AllocationLimitOvershotByLargeMargin()) {
+ *reason = "Incremental marking needs finalization";
+ return MARK_COMPACTOR;
+ }
+
// Is there enough space left in OLD to guarantee that a scavenge can
// succeed?
//
@@ -275,8 +279,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
// and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
- if (static_cast<intptr_t>(memory_allocator()->MaxAvailable()) <=
- new_space_->Size()) {
+ if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
@@ -286,7 +289,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
// Default
*reason = NULL;
- return SCAVENGER;
+ return YoungGenerationCollector();
}
@@ -316,55 +319,55 @@ void Heap::ReportStatisticsBeforeGC() {
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
- PrintIsolate(isolate_,
- "Memory allocator, used: %6zu KB,"
- " available: %6zu KB\n",
+ PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS
+ " KB,"
+ " available: %6" PRIuS " KB\n",
memory_allocator()->Size() / KB,
memory_allocator()->Available() / KB);
- PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "New space, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS " KB\n",
new_space_->Size() / KB, new_space_->Available() / KB,
new_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Old space, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "Old space, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS " KB\n",
old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
old_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Code space, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "Code space, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS "KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Map space, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "Map space, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS " KB\n",
map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
map_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS " KB\n",
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "All spaces, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "All spaces, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS "KB\n",
this->SizeOfObjects() / KB, this->Available() / KB,
this->CommittedMemory() / KB);
- PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
- static_cast<intptr_t>(external_memory_ / KB));
+ PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
+ external_memory_ / KB);
PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
total_gc_time_ms_);
}
@@ -439,12 +442,11 @@ void Heap::GarbageCollectionPrologue() {
}
CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
- store_buffer()->MoveEntriesToRememberedSet();
+ store_buffer()->MoveAllEntriesToRememberedSet();
}
-
-intptr_t Heap::SizeOfObjects() {
- intptr_t total = 0;
+size_t Heap::SizeOfObjects() {
+ size_t total = 0;
AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->SizeOfObjects();
@@ -742,7 +744,7 @@ void Heap::PreprocessStackTraces() {
}
// We must not compact the weak fixed list here, as we may be in the middle
// of writing to it, when the GC triggered. Instead, we reset the root value.
- set_weak_stack_trace_list(Smi::FromInt(0));
+ set_weak_stack_trace_list(Smi::kZero);
}
@@ -822,7 +824,7 @@ void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
- if (collector == SCAVENGER) {
+ if (IsYoungGenerationCollector(collector)) {
return isolate_->counters()->gc_scavenger();
} else {
if (!incremental_marking()->IsStopped()) {
@@ -862,7 +864,8 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
- isolate()->optimizing_compile_dispatcher()->Flush();
+ isolate()->optimizing_compile_dispatcher()->Flush(
+ OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
}
isolate()->ClearSerializerData();
set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
@@ -952,7 +955,8 @@ bool Heap::CollectGarbage(GarbageCollector collector,
EnsureFillerObjectAtTop();
- if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
+ if (IsYoungGenerationCollector(collector) &&
+ !incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] Scavenge during marking.\n");
@@ -963,6 +967,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
!ShouldFinalizeIncrementalMarking() && !ShouldAbortIncrementalMarking() &&
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
+ !incremental_marking()->NeedsFinalization() &&
!IsCloseToOutOfMemory(new_space_->Capacity())) {
if (!incremental_marking()->IsComplete() &&
!mark_compact_collector()->marking_deque()->IsEmpty() &&
@@ -971,13 +976,13 @@ bool Heap::CollectGarbage(GarbageCollector collector,
isolate()->PrintWithTimestamp(
"[IncrementalMarking] Delaying MarkSweep.\n");
}
- collector = SCAVENGER;
+ collector = YoungGenerationCollector();
collector_reason = "incremental marking delaying mark-sweep";
}
}
bool next_gc_likely_to_collect_more = false;
- intptr_t committed_memory_before = 0;
+ size_t committed_memory_before = 0;
if (collector == MARK_COMPACTOR) {
committed_memory_before = CommittedOldGenerationMemory();
@@ -1004,8 +1009,8 @@ bool Heap::CollectGarbage(GarbageCollector collector,
}
if (collector == MARK_COMPACTOR) {
- intptr_t committed_memory_after = CommittedOldGenerationMemory();
- intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
+ size_t committed_memory_after = CommittedOldGenerationMemory();
+ size_t used_memory_after = PromotedSpaceSizeOfObjects();
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = MonotonicallyIncreasingTimeInMs();
@@ -1014,7 +1019,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
// - there is high fragmentation,
// - there are live detached contexts.
event.next_gc_likely_to_collect_more =
- (committed_memory_before - committed_memory_after) > MB ||
+ (committed_memory_before > committed_memory_after + MB) ||
HasHighFragmentation(used_memory_after, committed_memory_after) ||
(detached_contexts()->length() > 0);
if (deserialization_complete_) {
@@ -1036,7 +1041,8 @@ bool Heap::CollectGarbage(GarbageCollector collector,
// generator needs incremental marking to stay off after it aborted.
// We do this only for scavenger to avoid a loop where mark-compact
// causes another mark-compact.
- if (collector == SCAVENGER && !ShouldAbortIncrementalMarking()) {
+ if (IsYoungGenerationCollector(collector) &&
+ !ShouldAbortIncrementalMarking()) {
StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
kNoGCCallbackFlags);
}
@@ -1056,7 +1062,8 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
}
if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
- isolate()->optimizing_compile_dispatcher()->Flush();
+ isolate()->optimizing_compile_dispatcher()->Flush(
+ OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
}
AgeInlineCaches();
number_of_disposed_maps_ = retained_maps()->Length();
@@ -1172,8 +1179,9 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
for (auto& chunk : *reservation) {
AllocationResult allocation;
int size = chunk.size;
- DCHECK_LE(size, MemoryAllocator::PageAreaSize(
- static_cast<AllocationSpace>(space)));
+ DCHECK_LE(static_cast<size_t>(size),
+ MemoryAllocator::PageAreaSize(
+ static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
} else {
@@ -1275,7 +1283,7 @@ bool Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
int freed_global_handles = 0;
- if (collector != SCAVENGER) {
+ if (!IsYoungGenerationCollector(collector)) {
PROFILE(isolate_, CodeMovingGCEvent());
}
@@ -1292,9 +1300,7 @@ bool Heap::PerformGarbageCollection(
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
- TRACE_GC(tracer(), collector == MARK_COMPACTOR
- ? GCTracer::Scope::MC_EXTERNAL_PROLOGUE
- : GCTracer::Scope::SCAVENGER_EXTERNAL_PROLOGUE);
+ TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_PROLOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
@@ -1308,18 +1314,25 @@ bool Heap::PerformGarbageCollection(
{
Heap::PretenuringScope pretenuring_scope(this);
- if (collector == MARK_COMPACTOR) {
- UpdateOldGenerationAllocationCounter();
- // Perform mark-sweep with optional compaction.
- MarkCompact();
- old_generation_size_configured_ = true;
- // This should be updated before PostGarbageCollectionProcessing, which
- // can cause another GC. Take into account the objects promoted during GC.
- old_generation_allocation_counter_at_last_gc_ +=
- static_cast<size_t>(promoted_objects_size_);
- old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
- } else {
- Scavenge();
+ switch (collector) {
+ case MARK_COMPACTOR:
+ UpdateOldGenerationAllocationCounter();
+ // Perform mark-sweep with optional compaction.
+ MarkCompact();
+ old_generation_size_configured_ = true;
+ // This should be updated before PostGarbageCollectionProcessing, which
+ // can cause another GC. Take into account the objects promoted during
+ // GC.
+ old_generation_allocation_counter_at_last_gc_ +=
+ static_cast<size_t>(promoted_objects_size_);
+ old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
+ break;
+ case MINOR_MARK_COMPACTOR:
+ MinorMarkCompact();
+ break;
+ case SCAVENGER:
+ Scavenge();
+ break;
}
ProcessPretenuringFeedback();
@@ -1348,7 +1361,7 @@ bool Heap::PerformGarbageCollection(
double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
double mutator_speed =
tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
- intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
+ size_t old_gen_size = PromotedSpaceSizeOfObjects();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
external_memory_at_last_mark_compact_ = external_memory_;
@@ -1363,9 +1376,7 @@ bool Heap::PerformGarbageCollection(
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
- TRACE_GC(tracer(), collector == MARK_COMPACTOR
- ? GCTracer::Scope::MC_EXTERNAL_EPILOGUE
- : GCTracer::Scope::SCAVENGER_EXTERNAL_EPILOGUE);
+ TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_EPILOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
@@ -1444,8 +1455,10 @@ void Heap::MarkCompact() {
}
}
+void Heap::MinorMarkCompact() { UNREACHABLE(); }
void Heap::MarkCompactEpilogue() {
+ TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
gc_state_ = NOT_IN_GC;
isolate_->counters()->objs_since_last_full()->Set(0);
@@ -1455,18 +1468,12 @@ void Heap::MarkCompactEpilogue() {
PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped());
- // We finished a marking cycle. We can uncommit the marking deque until
- // we start marking again.
- mark_compact_collector()->marking_deque()->Uninitialize();
- mark_compact_collector()->EnsureMarkingDequeIsCommitted(
- MarkCompactCollector::kMinMarkingDequeSize);
+ mark_compact_collector()->marking_deque()->StopUsing();
}
void Heap::MarkCompactPrologue() {
- // At any old GC clear the keyed lookup cache to enable collection of unused
- // maps.
- isolate_->keyed_lookup_cache()->Clear();
+ TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
isolate_->context_slot_cache()->Clear();
isolate_->descriptor_lookup_cache()->Clear();
RegExpResultsCache::Clear(string_split_cache());
@@ -1604,7 +1611,7 @@ void Heap::Scavenge() {
LOG(isolate_, ResourceEvent("scavenge", "begin"));
// Used for updating survived_since_last_expansion_ at function end.
- intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
+ size_t survived_watermark = PromotedSpaceSizeOfObjects();
scavenge_collector_->SelectScavengingVisitorsTable();
@@ -1640,13 +1647,10 @@ void Heap::Scavenge() {
Address new_space_front = new_space_->ToSpaceStart();
promotion_queue_.Initialize();
- PromotionMode promotion_mode = CurrentPromotionMode();
ScavengeVisitor scavenge_visitor(this);
- if (FLAG_scavenge_reclaim_unmodified_objects) {
- isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &IsUnmodifiedHeapObject);
- }
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &IsUnmodifiedHeapObject);
{
// Copy roots.
@@ -1678,8 +1682,6 @@ void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
// Copy objects reachable from the encountered weak collections list.
scavenge_visitor.VisitPointer(&encountered_weak_collections_);
- // Copy objects reachable from the encountered weak cells.
- scavenge_visitor.VisitPointer(&encountered_weak_cells_);
}
{
@@ -1693,36 +1695,15 @@ void Heap::Scavenge() {
{
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
- new_space_front =
- DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
}
- if (FLAG_scavenge_reclaim_unmodified_objects) {
- isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
- &IsUnscavengedHeapObject);
-
- isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
- &scavenge_visitor);
- new_space_front =
- DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
- } else {
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
- while (isolate()->global_handles()->IterateObjectGroups(
- &scavenge_visitor, &IsUnscavengedHeapObject)) {
- new_space_front =
- DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
- }
- isolate()->global_handles()->RemoveObjectGroups();
- isolate()->global_handles()->RemoveImplicitRefGroups();
-
- isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
- &IsUnscavengedHeapObject);
+ isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnscavengedHeapObject);
- isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
- &scavenge_visitor);
- new_space_front =
- DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
- }
+ isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
+ &scavenge_visitor);
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -1742,9 +1723,9 @@ void Heap::Scavenge() {
ArrayBufferTracker::FreeDeadInNewSpace(this);
// Update how much has survived scavenge.
- IncrementYoungSurvivorsCounter(
- static_cast<int>((PromotedSpaceSizeOfObjects() - survived_watermark) +
- new_space_->Size()));
+ DCHECK_GE(PromotedSpaceSizeOfObjects(), survived_watermark);
+ IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
+ new_space_->Size() - survived_watermark);
LOG(isolate_, ResourceEvent("scavenge", "end"));
@@ -1905,8 +1886,7 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
}
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
- Address new_space_front,
- PromotionMode promotion_mode) {
+ Address new_space_front) {
do {
SemiSpace::AssertValidRange(new_space_front, new_space_->top());
// The addresses new_space_front and new_space_.top() define a
@@ -1915,14 +1895,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
while (new_space_front != new_space_->top()) {
if (!Page::IsAlignedToPageSize(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
- if (promotion_mode == PROMOTE_MARKED) {
- new_space_front += StaticScavengeVisitor<PROMOTE_MARKED>::IterateBody(
- object->map(), object);
- } else {
- new_space_front +=
- StaticScavengeVisitor<DEFAULT_PROMOTION>::IterateBody(
- object->map(), object);
- }
+ new_space_front +=
+ StaticScavengeVisitor::IterateBody(object->map(), object);
} else {
new_space_front = Page::FromAllocationAreaAddress(new_space_front)
->next_page()
@@ -1944,8 +1918,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// to new space.
DCHECK(!target->IsMap());
- IteratePromotedObject(target, static_cast<int>(size), was_marked_black,
- &Scavenger::ScavengeObject);
+ IterateAndScavengePromotedObject(target, static_cast<int>(size),
+ was_marked_black);
}
}
@@ -2039,7 +2013,7 @@ void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
old_generation_allocation_limit_ =
Max(MinimumAllocationLimitGrowingStep(),
- static_cast<intptr_t>(
+ static_cast<size_t>(
static_cast<double>(old_generation_allocation_limit_) *
(tracer()->AverageSurvivalRatio() / 100)));
}
@@ -2074,7 +2048,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
Map::OwnsDescriptors::encode(true) |
Map::ConstructionCounter::encode(Map::kNoSlackTracking);
reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
- reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
+ reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::kZero);
return result;
}
@@ -2098,8 +2072,8 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
- map->set_weak_cell_cache(Smi::FromInt(0));
- map->set_raw_transitions(Smi::FromInt(0));
+ map->set_weak_cell_cache(Smi::kZero);
+ map->set_raw_transitions(Smi::kZero);
map->set_unused_property_fields(0);
map->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
@@ -2171,7 +2145,7 @@ namespace {
void FinalizePartialMap(Heap* heap, Map* map) {
map->set_code_cache(heap->empty_fixed_array());
map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
- map->set_raw_transitions(Smi::FromInt(0));
+ map->set_raw_transitions(Smi::kZero);
map->set_instance_descriptors(heap->empty_descriptor_array());
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -2281,7 +2255,6 @@ bool Heap::CreateInitialMaps() {
DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info_entry)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
Context::NUMBER_FUNCTION_INDEX)
@@ -2506,7 +2479,7 @@ AllocationResult Heap::AllocatePropertyCell() {
PropertyCell* cell = PropertyCell::cast(result);
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
- cell->set_property_details(PropertyDetails(Smi::FromInt(0)));
+ cell->set_property_details(PropertyDetails(Smi::kZero));
cell->set_value(the_hole_value());
return result;
}
@@ -2553,16 +2526,6 @@ AllocationResult Heap::AllocateTransitionArray(int capacity) {
void Heap::CreateApiObjects() {
HandleScope scope(isolate());
- Factory* factory = isolate()->factory();
- Handle<Map> new_neander_map =
- factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
-
- // Don't use Smi-only elements optimizations for objects with the neander
- // map. There are too many cases where element values are set directly with a
- // bottleneck to trap the Smi-only -> fast elements transition, and there
- // appears to be no benefit for optimize this case.
- new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
- set_neander_map(*new_neander_map);
set_message_listeners(*TemplateList::New(isolate(), 2));
}
@@ -2636,8 +2599,7 @@ void Heap::CreateInitialObjects() {
// Initialize the null_value.
Oddball::Initialize(isolate(), factory->null_value(), "null",
- handle(Smi::FromInt(0), isolate()), "object",
- Oddball::kNull);
+ handle(Smi::kZero, isolate()), "object", Oddball::kNull);
// Initialize the_hole_value.
Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
@@ -2651,7 +2613,7 @@ void Heap::CreateInitialObjects() {
// Initialize the false_value.
Oddball::Initialize(isolate(), factory->false_value(), "false",
- handle(Smi::FromInt(0), isolate()), "boolean",
+ handle(Smi::kZero, isolate()), "boolean",
Oddball::kFalse);
set_uninitialized_value(
@@ -2697,9 +2659,9 @@ void Heap::CreateInitialObjects() {
// expanding the dictionary during bootstrapping.
set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
- set_instanceof_cache_function(Smi::FromInt(0));
- set_instanceof_cache_map(Smi::FromInt(0));
- set_instanceof_cache_answer(Smi::FromInt(0));
+ set_instanceof_cache_function(Smi::kZero);
+ set_instanceof_cache_map(Smi::kZero);
+ set_instanceof_cache_answer(Smi::kZero);
{
HandleScope scope(isolate());
@@ -2768,7 +2730,7 @@ void Heap::CreateInitialObjects() {
set_undefined_cell(*factory->NewCell(factory->undefined_value()));
// The symbol registry is initialized lazily.
- set_symbol_registry(Smi::FromInt(0));
+ set_symbol_registry(Smi::kZero);
// Microtask queue uses the empty fixed array as a sentinel for "empty".
// Number of queued microtasks stored in Isolate::pending_microtask_count().
@@ -2816,7 +2778,7 @@ void Heap::CreateInitialObjects() {
empty_type_feedback_vector->set(TypeFeedbackVector::kMetadataIndex,
empty_fixed_array());
empty_type_feedback_vector->set(TypeFeedbackVector::kInvocationCountIndex,
- Smi::FromInt(0));
+ Smi::kZero);
set_empty_type_feedback_vector(*empty_type_feedback_vector);
// We use a canonical empty LiteralsArray for all functions that neither
@@ -2839,14 +2801,6 @@ void Heap::CreateInitialObjects() {
Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
set_empty_weak_cell(*cell);
cell->clear();
-
- Handle<FixedArray> cleared_optimized_code_map =
- factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
- cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
- *cell);
- STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
- SharedFunctionInfo::kSharedCodeIndex == 0);
- set_cleared_optimized_code_map(*cleared_optimized_code_map);
}
set_detached_contexts(empty_fixed_array());
@@ -2860,7 +2814,7 @@ void Heap::CreateInitialObjects() {
ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
weak_new_space_object_to_code_list()->SetLength(0);
- set_script_list(Smi::FromInt(0));
+ set_script_list(Smi::kZero);
Handle<SeededNumberDictionary> slow_element_dictionary =
SeededNumberDictionary::New(isolate(), 0, TENURED);
@@ -2871,7 +2825,7 @@ void Heap::CreateInitialObjects() {
// Handling of script id generation is in Heap::NextScriptId().
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
- set_next_template_serial_number(Smi::FromInt(0));
+ set_next_template_serial_number(Smi::kZero);
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
@@ -2879,7 +2833,7 @@ void Heap::CreateInitialObjects() {
set_empty_script(*script);
Handle<PropertyCell> cell = factory->NewPropertyCell();
- cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_protector(*cell);
cell = factory->NewPropertyCell();
@@ -2887,29 +2841,34 @@ void Heap::CreateInitialObjects() {
set_empty_property_cell(*cell);
cell = factory->NewPropertyCell();
- cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_has_instance_protector(*cell);
Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
Handle<Cell> species_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_species_protector(*species_cell);
cell = factory->NewPropertyCell();
- cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_string_length_protector(*cell);
- set_serialized_templates(empty_fixed_array());
+ Handle<Cell> fast_array_iteration_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_fast_array_iteration_protector(*fast_array_iteration_cell);
- set_weak_stack_trace_list(Smi::FromInt(0));
+ Handle<Cell> array_iterator_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_array_iterator_protector(*array_iterator_cell);
- set_noscript_shared_function_infos(Smi::FromInt(0));
+ set_serialized_templates(empty_fixed_array());
- // Initialize keyed lookup cache.
- isolate_->keyed_lookup_cache()->Clear();
+ set_weak_stack_trace_list(Smi::kZero);
+
+ set_noscript_shared_function_infos(Smi::kZero);
// Initialize context slot cache.
isolate_->context_slot_cache()->Clear();
@@ -2964,12 +2923,13 @@ int Heap::FullSizeNumberStringCacheLength() {
// Compute the size of the number string cache based on the max newspace size.
// The number string cache has a minimum size based on twice the initial cache
// size to ensure that it is bigger after being made 'full size'.
- int number_string_cache_size = max_semi_space_size_ / 512;
- number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
- Min(0x4000, number_string_cache_size));
+ size_t number_string_cache_size = max_semi_space_size_ / 512;
+ number_string_cache_size =
+ Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
+ Min<size_t>(0x4000u, number_string_cache_size));
// There is a string and a number per entry so the length is twice the number
// of entries.
- return number_string_cache_size * 2;
+ return static_cast<int>(number_string_cache_size * 2);
}
@@ -3308,7 +3268,7 @@ AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
- elements->set_base_pointer(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
elements->set_length(length);
return elements;
@@ -3392,7 +3352,7 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
DCHECK(!memory_allocator()->code_range()->valid() ||
memory_allocator()->code_range()->contains(code->address()) ||
object_size <= code_space()->AreaSize());
- code->set_gc_metadata(Smi::FromInt(0));
+ code->set_gc_metadata(Smi::kZero);
code->set_ic_age(global_ic_age_);
return code;
}
@@ -3489,7 +3449,7 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
// TODO(1240798): Initialize the object's body using valid initial values
// according to the object's initial map. For example, if the map's
// instance type is JS_ARRAY_TYPE, the length field should be initialized
- // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
+ // to a number (e.g. Smi::kZero) and the elements initialized to a
// fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
// verification code has to cope with (temporarily) invalid objects. See
// for example, JSArray::JSArrayVerify).
@@ -4036,13 +3996,7 @@ AllocationResult Heap::AllocateSymbol() {
result->set_map_no_write_barrier(symbol_map());
// Generate a random hash value.
- int hash;
- int attempts = 0;
- do {
- hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
- attempts++;
- } while (hash == 0 && attempts < 30);
- if (hash == 0) hash = 1; // never return 0
+ int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
Symbol::cast(result)
->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
@@ -4165,16 +4119,16 @@ bool Heap::HasLowAllocationRate() {
bool Heap::HasHighFragmentation() {
- intptr_t used = PromotedSpaceSizeOfObjects();
- intptr_t committed = CommittedOldGenerationMemory();
+ size_t used = PromotedSpaceSizeOfObjects();
+ size_t committed = CommittedOldGenerationMemory();
return HasHighFragmentation(used, committed);
}
-
-bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
- const intptr_t kSlack = 16 * MB;
+bool Heap::HasHighFragmentation(size_t used, size_t committed) {
+ const size_t kSlack = 16 * MB;
// Fragmentation is high if committed > 2 * used + kSlack.
// Rewrite the exression to avoid overflow.
+ DCHECK_GE(committed, used);
return committed - used > used + kSlack;
}
@@ -4229,8 +4183,7 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
(!incremental_marking()->finalize_marking_completed() &&
MarkingDequesAreEmpty()))) {
FinalizeIncrementalMarking(gc_reason);
- } else if (incremental_marking()->IsComplete() ||
- (mark_compact_collector()->marking_deque()->IsEmpty())) {
+ } else if (incremental_marking()->IsComplete() || MarkingDequesAreEmpty()) {
CollectAllGarbage(current_gc_flags_, gc_reason);
}
}
@@ -4457,7 +4410,8 @@ void Heap::CheckMemoryPressure() {
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
- isolate()->optimizing_compile_dispatcher()->Flush();
+ isolate()->optimizing_compile_dispatcher()->Flush(
+ OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
}
}
if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
@@ -4784,51 +4738,44 @@ void Heap::ZapFromSpace() {
}
}
-void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
- Address end, bool record_slots,
- ObjectSlotCallback callback) {
- Address slot_address = start;
- Page* page = Page::FromAddress(start);
-
- while (slot_address < end) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* target = *slot;
- if (target->IsHeapObject()) {
- if (Heap::InFromSpace(target)) {
- callback(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(target));
- Object* new_target = *slot;
- if (InNewSpace(new_target)) {
- SLOW_DCHECK(Heap::InToSpace(new_target));
- SLOW_DCHECK(new_target->IsHeapObject());
- RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
+class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
+ public:
+ IterateAndScavengePromotedObjectsVisitor(Heap* heap, HeapObject* target,
+ bool record_slots)
+ : heap_(heap), target_(target), record_slots_(record_slots) {}
+
+ inline void VisitPointers(Object** start, Object** end) override {
+ Address slot_address = reinterpret_cast<Address>(start);
+ Page* page = Page::FromAddress(slot_address);
+
+ while (slot_address < reinterpret_cast<Address>(end)) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ Object* target = *slot;
+
+ if (target->IsHeapObject()) {
+ if (heap_->InFromSpace(target)) {
+ Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(slot),
+ HeapObject::cast(target));
+ target = *slot;
+ if (heap_->InNewSpace(target)) {
+ SLOW_DCHECK(heap_->InToSpace(target));
+ SLOW_DCHECK(target->IsHeapObject());
+ RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
+ }
+ SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target)));
+ } else if (record_slots_ &&
+ MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target))) {
+ heap_->mark_compact_collector()->RecordSlot(target_, slot, target);
}
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
- } else if (record_slots &&
- MarkCompactCollector::IsOnEvacuationCandidate(target)) {
- mark_compact_collector()->RecordSlot(object, slot, target);
}
- }
- slot_address += kPointerSize;
- }
-}
-
-class IteratePromotedObjectsVisitor final : public ObjectVisitor {
- public:
- IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
- bool record_slots, ObjectSlotCallback callback)
- : heap_(heap),
- target_(target),
- record_slots_(record_slots),
- callback_(callback) {}
- V8_INLINE void VisitPointers(Object** start, Object** end) override {
- heap_->IteratePromotedObjectPointers(
- target_, reinterpret_cast<Address>(start),
- reinterpret_cast<Address>(end), record_slots_, callback_);
+ slot_address += kPointerSize;
+ }
}
- V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
+ inline void VisitCodeEntry(Address code_entry_slot) override {
// Black allocation requires us to process objects referenced by
// promoted objects.
if (heap_->incremental_marking()->black_allocation()) {
@@ -4841,12 +4788,10 @@ class IteratePromotedObjectsVisitor final : public ObjectVisitor {
Heap* heap_;
HeapObject* target_;
bool record_slots_;
- ObjectSlotCallback callback_;
};
-void Heap::IteratePromotedObject(HeapObject* target, int size,
- bool was_marked_black,
- ObjectSlotCallback callback) {
+void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size,
+ bool was_marked_black) {
// We are not collecting slots on new space objects during mutation
// thus we have to scan for pointers to evacuation candidates when we
// promote objects. But we should not record any slots in non-black
@@ -4859,8 +4804,14 @@ void Heap::IteratePromotedObject(HeapObject* target, int size,
record_slots = Marking::IsBlack(mark_bit);
}
- IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
- target->IterateBody(target->map()->instance_type(), size, &visitor);
+ IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots);
+ if (target->IsJSFunction()) {
+ // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
+ // this links are recorded during processing of weak lists.
+ JSFunction::BodyDescriptorWeakCode::IterateBody(target, size, &visitor);
+ } else {
+ target->IterateBody(target->map()->instance_type(), size, &visitor);
+ }
// When black allocations is on, we have to visit not already marked black
// objects (in new space) promoted to black pages to keep their references
@@ -5032,31 +4983,31 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
- int max_executable_size, size_t code_range_size) {
+bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
+ size_t max_executable_size, size_t code_range_size) {
if (HasBeenSetUp()) return false;
// Overwrite default configuration.
- if (max_semi_space_size > 0) {
+ if (max_semi_space_size != 0) {
max_semi_space_size_ = max_semi_space_size * MB;
}
- if (max_old_space_size > 0) {
- max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
+ if (max_old_space_size != 0) {
+ max_old_generation_size_ = max_old_space_size * MB;
}
- if (max_executable_size > 0) {
- max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
+ if (max_executable_size != 0) {
+ max_executable_size_ = max_executable_size * MB;
}
// If max space size flags are specified overwrite the configuration.
if (FLAG_max_semi_space_size > 0) {
- max_semi_space_size_ = FLAG_max_semi_space_size * MB;
+ max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
}
if (FLAG_max_old_space_size > 0) {
max_old_generation_size_ =
- static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
+ static_cast<size_t>(FLAG_max_old_space_size) * MB;
}
if (FLAG_max_executable_size > 0) {
- max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
+ max_executable_size_ = static_cast<size_t>(FLAG_max_executable_size) * MB;
}
if (Page::kPageSize > MB) {
@@ -5073,17 +5024,18 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
// The new space size must be a power of two to support single-bit testing
// for containment.
- max_semi_space_size_ =
- base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
+ max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
+ static_cast<uint32_t>(max_semi_space_size_));
if (FLAG_min_semi_space_size > 0) {
- int initial_semispace_size = FLAG_min_semi_space_size * MB;
+ size_t initial_semispace_size =
+ static_cast<size_t>(FLAG_min_semi_space_size) * MB;
if (initial_semispace_size > max_semi_space_size_) {
initial_semispace_size_ = max_semi_space_size_;
if (FLAG_trace_gc) {
PrintIsolate(isolate_,
"Min semi-space size cannot be more than the maximum "
- "semi-space size of %d MB\n",
+ "semi-space size of %" PRIuS " MB\n",
max_semi_space_size_ / MB);
}
} else {
@@ -5101,7 +5053,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
// The old generation is paged and needs at least one page for each space.
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
max_old_generation_size_ =
- Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
+ Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
max_old_generation_size_);
// The max executable size must be less than or equal to the max old
@@ -5200,16 +5152,15 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
}
-
-intptr_t Heap::PromotedSpaceSizeOfObjects() {
+size_t Heap::PromotedSpaceSizeOfObjects() {
return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
}
-
-int64_t Heap::PromotedExternalMemorySize() {
+uint64_t Heap::PromotedExternalMemorySize() {
if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
- return external_memory_ - external_memory_at_last_mark_compact_;
+ return static_cast<uint64_t>(external_memory_ -
+ external_memory_at_last_mark_compact_);
}
@@ -5277,29 +5228,29 @@ double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
return factor;
}
-
-intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
- intptr_t old_gen_size) {
+size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
+ size_t old_gen_size) {
CHECK(factor > 1.0);
CHECK(old_gen_size > 0);
- intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
- limit = Max(limit, old_gen_size + MinimumAllocationLimitGrowingStep());
+ uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
+ limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
+ MinimumAllocationLimitGrowingStep());
limit += new_space_->Capacity();
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
- return Min(limit, halfway_to_the_max);
+ uint64_t halfway_to_the_max =
+ (static_cast<uint64_t>(old_gen_size) + max_old_generation_size_) / 2;
+ return static_cast<size_t>(Min(limit, halfway_to_the_max));
}
-intptr_t Heap::MinimumAllocationLimitGrowingStep() {
- const double kRegularAllocationLimitGrowingStep = 8;
- const double kLowMemoryAllocationLimitGrowingStep = 2;
- intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
+size_t Heap::MinimumAllocationLimitGrowingStep() {
+ const size_t kRegularAllocationLimitGrowingStep = 8;
+ const size_t kLowMemoryAllocationLimitGrowingStep = 2;
+ size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
return limit * (ShouldOptimizeForMemoryUsage()
? kLowMemoryAllocationLimitGrowingStep
: kRegularAllocationLimitGrowingStep);
}
-void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
- double gc_speed,
+void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
double mutator_speed) {
double factor = HeapGrowingFactor(gc_speed, mutator_speed);
@@ -5332,24 +5283,23 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
if (FLAG_trace_gc_verbose) {
- isolate_->PrintWithTimestamp("Grow: old size: %" V8PRIdPTR
- " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
- old_gen_size / KB,
- old_generation_allocation_limit_ / KB, factor);
+ isolate_->PrintWithTimestamp(
+ "Grow: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
+ old_gen_size / KB, old_generation_allocation_limit_ / KB, factor);
}
}
-void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
+void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
double gc_speed,
double mutator_speed) {
double factor = HeapGrowingFactor(gc_speed, mutator_speed);
- intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
+ size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
if (limit < old_generation_allocation_limit_) {
if (FLAG_trace_gc_verbose) {
isolate_->PrintWithTimestamp(
- "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
+ "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
" KB, "
- "new limit: %" V8PRIdPTR " KB (%.1f)\n",
+ "new limit: %" PRIuS " KB (%.1f)\n",
old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
factor);
}
@@ -5362,12 +5312,16 @@ void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
// major GC. It happens when the old generation allocation limit is reached and
// - either we need to optimize for memory usage,
// - or the incremental marking is not in progress and we cannot start it.
-bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
+bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
// We reached the old generation allocation limit.
if (ShouldOptimizeForMemoryUsage()) return false;
+ if (incremental_marking()->NeedsFinalization()) {
+ return !AllocationLimitOvershotByLargeMargin();
+ }
+
if (incremental_marking()->IsStopped() &&
IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
// We cannot start incremental marking.
@@ -5383,7 +5337,8 @@ bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
// The kHardLimit means that incremental marking should be started immediately.
Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (!incremental_marking()->CanBeActivated() ||
- PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
+ PromotedSpaceSizeOfObjects() <=
+ IncrementalMarking::kActivationThreshold) {
// Incremental marking is disabled or it is too early to start.
return IncrementalMarkingLimit::kNoLimit;
}
@@ -5393,13 +5348,13 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
// start marking immediately.
return IncrementalMarkingLimit::kHardLimit;
}
- intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
+ size_t old_generation_space_available = OldGenerationSpaceAvailable();
if (old_generation_space_available > new_space_->Capacity()) {
return IncrementalMarkingLimit::kNoLimit;
}
// We are close to the allocation limit.
// Choose between the hard and the soft limits.
- if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
+ if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {
return IncrementalMarkingLimit::kHardLimit;
}
return IncrementalMarkingLimit::kSoftLimit;
@@ -5434,8 +5389,7 @@ V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
Scavenger::Initialize();
- StaticScavengeVisitor<DEFAULT_PROMOTION>::Initialize();
- StaticScavengeVisitor<PROMOTE_MARKED>::Initialize();
+ StaticScavengeVisitor::Initialize();
MarkCompactCollector::Initialize();
}
@@ -5518,7 +5472,7 @@ bool Heap::SetUp() {
mark_compact_collector_ = new MarkCompactCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);
- if (FLAG_track_gc_object_stats) {
+ if (V8_UNLIKELY(FLAG_gc_stats)) {
live_object_stats_ = new ObjectStats(this);
dead_object_stats_ = new ObjectStats(this);
}
@@ -5570,8 +5524,8 @@ void Heap::SetStackLimits() {
}
void Heap::ClearStackLimits() {
- roots_[kStackLimitRootIndex] = Smi::FromInt(0);
- roots_[kRealStackLimitRootIndex] = Smi::FromInt(0);
+ roots_[kStackLimitRootIndex] = Smi::kZero;
+ roots_[kRealStackLimitRootIndex] = Smi::kZero;
}
void Heap::PrintAlloctionsHash() {
@@ -5597,8 +5551,7 @@ void Heap::NotifyDeserializationComplete() {
}
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
- DCHECK_NOT_NULL(tracer);
- CHECK_NULL(embedder_heap_tracer_);
+ DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
embedder_heap_tracer_ = tracer;
}
@@ -5756,9 +5709,6 @@ void Heap::TearDown() {
delete memory_allocator_;
memory_allocator_ = nullptr;
-
- delete embedder_reference_reporter_;
- embedder_reference_reporter_ = nullptr;
}
@@ -5951,11 +5901,10 @@ void Heap::CheckHandleCount() {
void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
if (!InNewSpace(object)) {
- store_buffer()->MoveEntriesToRememberedSet();
Address slot_addr = reinterpret_cast<Address>(slot);
Page* page = Page::FromAddress(slot_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
- RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
+ store_buffer()->DeleteEntry(slot_addr);
RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
}
}
@@ -5963,10 +5912,10 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
if (!page->InNewSpace()) {
- store_buffer()->MoveEntriesToRememberedSet();
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end);
- RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end);
+ store_buffer()->DeleteEntry(start, end);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end,
+ SlotSet::FREE_EMPTY_BUCKETS);
}
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index ba89686672..013cd9a8fe 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -48,8 +48,6 @@ using v8::MemoryPressureLevel;
V(Map, one_byte_string_map, OneByteStringMap) \
V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
V(Map, scope_info_map, ScopeInfoMap) \
- V(Map, module_info_entry_map, ModuleInfoEntryMap) \
- V(Map, module_info_map, ModuleInfoMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, code_map, CodeMap) \
V(Map, function_context_map, FunctionContextMap) \
@@ -62,13 +60,13 @@ using v8::MemoryPressureLevel;
V(FixedArray, empty_literals_array, EmptyLiteralsArray) \
V(FixedArray, empty_type_feedback_vector, EmptyTypeFeedbackVector) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
- V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
- V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
/* Entries beyond the first 32 */ \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
/* being compacted. */ \
+ /* Empty scope info */ \
+ V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
/* Oddballs */ \
V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Oddball, arguments_marker, ArgumentsMarker) \
@@ -92,9 +90,9 @@ using v8::MemoryPressureLevel;
V(Map, unseeded_number_dictionary_map, UnseededNumberDictionaryMap) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, message_object_map, JSMessageObjectMap) \
- V(Map, neander_map, NeanderMap) \
V(Map, external_map, ExternalMap) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
+ V(Map, module_info_map, ModuleInfoMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -168,6 +166,8 @@ using v8::MemoryPressureLevel;
V(PropertyCell, has_instance_protector, HasInstanceProtector) \
V(Cell, species_protector, SpeciesProtector) \
V(PropertyCell, string_length_protector, StringLengthProtector) \
+ V(Cell, fast_array_iteration_protector, FastArrayIterationProtector) \
+ V(Cell, array_iterator_protector, ArrayIteratorProtector) \
/* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, hole_nan_value, HoleNanValue) \
@@ -278,7 +278,6 @@ using v8::MemoryPressureLevel;
V(FixedArrayMap) \
V(CodeMap) \
V(ScopeInfoMap) \
- V(ModuleInfoEntryMap) \
V(ModuleInfoMap) \
V(FixedCOWArrayMap) \
V(FixedDoubleArrayMap) \
@@ -307,7 +306,6 @@ using v8::MemoryPressureLevel;
V(ArgumentsMarkerMap) \
V(JSMessageObjectMap) \
V(ForeignMap) \
- V(NeanderMap) \
V(NanValue) \
V(InfinityValue) \
V(MinusZeroValue) \
@@ -342,8 +340,6 @@ class WeakObjectRetainer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
-
enum ArrayStorageAllocationMode {
DONT_INITIALIZE_ARRAY_ELEMENTS,
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
@@ -626,8 +622,8 @@ class Heap {
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
- static const double kMinHeapGrowingFactor;
- static const double kMaxHeapGrowingFactor;
+ V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
+ V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
static const double kMaxHeapGrowingFactorMemoryConstrained;
static const double kMaxHeapGrowingFactorIdle;
static const double kConservativeHeapGrowingFactor;
@@ -690,7 +686,28 @@ class Heap {
#endif
}
- static double HeapGrowingFactor(double gc_speed, double mutator_speed);
+ static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
+ return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
+ }
+
+ static inline GarbageCollector YoungGenerationCollector() {
+ return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
+ }
+
+ static inline const char* CollectorName(GarbageCollector collector) {
+ switch (collector) {
+ case SCAVENGER:
+ return "Scavenger";
+ case MARK_COMPACTOR:
+ return "Mark-Compact";
+ case MINOR_MARK_COMPACTOR:
+ return "Minor Mark-Compact";
+ }
+ return "Unknown collector";
+ }
+
+ V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
+ double mutator_speed);
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
@@ -835,11 +852,8 @@ class Heap {
// An object should be promoted if the object has survived a
// scavenge operation.
- template <PromotionMode promotion_mode>
inline bool ShouldBePromoted(Address old_address, int object_size);
- inline PromotionMode CurrentPromotionMode();
-
void ClearNormalizedMapCaches();
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
@@ -916,7 +930,7 @@ class Heap {
bool HasLowAllocationRate();
bool HasHighFragmentation();
- bool HasHighFragmentation(intptr_t used, intptr_t committed);
+ bool HasHighFragmentation(size_t used, size_t committed);
void ActivateMemoryReducerIfNeeded();
@@ -940,8 +954,8 @@ class Heap {
// Configure heap size in MB before setup. Return false if the heap has been
// set up already.
- bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
- int max_executable_size, size_t code_range_size);
+ bool ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
+ size_t max_executable_size, size_t code_range_size);
bool ConfigureHeapDefault();
// Prepares the heap, setting up memory areas that are needed in the isolate
@@ -952,6 +966,9 @@ class Heap {
// Returns whether it succeeded.
bool CreateHeapObjects();
+ // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
+ V8_INLINE void CreateObjectStats();
+
// Destroys all memory allocated by the heap.
void TearDown();
@@ -1023,6 +1040,14 @@ class Heap {
Handle<Object> root_handle(RootListIndex index) {
return Handle<Object>(&roots_[index]);
}
+ template <typename T>
+ bool IsRootHandle(Handle<T> handle, RootListIndex* index) const {
+ Object** const handle_location = bit_cast<Object**>(handle.address());
+ if (handle_location >= &roots_[kRootListLength]) return false;
+ if (handle_location < &roots_[0]) return false;
+ *index = static_cast<RootListIndex>(handle_location - &roots_[0]);
+ return true;
+ }
// Generated code can embed this address to get access to the roots.
Object** roots_array_start() { return roots_; }
@@ -1127,13 +1152,8 @@ class Heap {
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
// Iterate pointers of promoted objects.
- void IteratePromotedObject(HeapObject* target, int size,
- bool was_marked_black,
- ObjectSlotCallback callback);
-
- void IteratePromotedObjectPointers(HeapObject* object, Address start,
- Address end, bool record_slots,
- ObjectSlotCallback callback);
+ void IterateAndScavengePromotedObject(HeapObject* target, int size,
+ bool was_marked_black);
// ===========================================================================
// Store buffer API. =========================================================
@@ -1199,10 +1219,6 @@ class Heap {
EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
- EmbedderReachableReferenceReporter* embedder_reachable_reference_reporter() {
- return embedder_reference_reporter_;
- }
-
size_t wrappers_to_trace() { return wrappers_to_trace_.size(); }
// ===========================================================================
@@ -1274,20 +1290,20 @@ class Heap {
// ===========================================================================
// Returns the maximum amount of memory reserved for the heap.
- intptr_t MaxReserved() {
+ size_t MaxReserved() {
return 2 * max_semi_space_size_ + max_old_generation_size_;
}
- int MaxSemiSpaceSize() { return max_semi_space_size_; }
- int InitialSemiSpaceSize() { return initial_semispace_size_; }
- intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
- intptr_t MaxExecutableSize() { return max_executable_size_; }
+ size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
+ size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
+ size_t MaxOldGenerationSize() { return max_old_generation_size_; }
+ size_t MaxExecutableSize() { return max_executable_size_; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
- intptr_t Capacity();
+ size_t Capacity();
// Returns the capacity of the old generation.
- intptr_t OldGenerationCapacity();
+ size_t OldGenerationCapacity();
// Returns the amount of memory currently committed for the heap.
size_t CommittedMemory();
@@ -1311,28 +1327,26 @@ class Heap {
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
- intptr_t Available();
+ size_t Available();
// Returns of size of all objects residing in the heap.
- intptr_t SizeOfObjects();
+ size_t SizeOfObjects();
void UpdateSurvivalStatistics(int start_new_space_size);
- inline void IncrementPromotedObjectsSize(intptr_t object_size) {
- DCHECK_GE(object_size, 0);
+ inline void IncrementPromotedObjectsSize(size_t object_size) {
promoted_objects_size_ += object_size;
}
- inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
+ inline size_t promoted_objects_size() { return promoted_objects_size_; }
- inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) {
- DCHECK_GE(object_size, 0);
+ inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
semi_space_copied_object_size_ += object_size;
}
- inline intptr_t semi_space_copied_object_size() {
+ inline size_t semi_space_copied_object_size() {
return semi_space_copied_object_size_;
}
- inline intptr_t SurvivedNewSpaceObjectSize() {
+ inline size_t SurvivedNewSpaceObjectSize() {
return promoted_objects_size_ + semi_space_copied_object_size_;
}
@@ -1342,20 +1356,13 @@ class Heap {
inline void IncrementNodesPromoted() { nodes_promoted_++; }
- inline void IncrementYoungSurvivorsCounter(intptr_t survived) {
- DCHECK_GE(survived, 0);
+ inline void IncrementYoungSurvivorsCounter(size_t survived) {
survived_last_scavenge_ = survived;
survived_since_last_expansion_ += survived;
}
- inline intptr_t PromotedTotalSize() {
- int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
- if (total > std::numeric_limits<intptr_t>::max()) {
- // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
- return std::numeric_limits<intptr_t>::max();
- }
- if (total < 0) return 0;
- return static_cast<intptr_t>(total);
+ inline uint64_t PromotedTotalSize() {
+ return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
inline void UpdateNewSpaceAllocationCounter();
@@ -1389,7 +1396,7 @@ class Heap {
int gc_count() const { return gc_count_; }
// Returns the size of objects residing in non new spaces.
- intptr_t PromotedSpaceSizeOfObjects();
+ size_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return total_regexp_code_generated_; }
void IncreaseTotalRegexpCodeGenerated(int size) {
@@ -1439,6 +1446,10 @@ class Heap {
// ArrayBuffer tracking. =====================================================
// ===========================================================================
+ // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
+ // in the registration/unregistration APIs. Consider dropping the "New" from
+ // "RegisterNewArrayBuffer" because one can re-register a previously
+ // unregistered buffer, too, and the name is confusing.
void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
void UnregisterArrayBuffer(JSArrayBuffer* buffer);
@@ -1770,6 +1781,8 @@ class Heap {
// Performs a major collection in the whole heap.
void MarkCompact();
+ // Performs a minor collection of just the young generation.
+ void MinorMarkCompact();
// Code to be run before and after mark-compact.
void MarkCompactPrologue();
@@ -1778,8 +1791,7 @@ class Heap {
// Performs a minor collection in new generation.
void Scavenge();
- Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front,
- PromotionMode promotion_mode);
+ Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
@@ -1797,8 +1809,27 @@ class Heap {
// GC statistics. ============================================================
// ===========================================================================
- inline intptr_t OldGenerationSpaceAvailable() {
- return old_generation_allocation_limit_ - PromotedTotalSize();
+ inline size_t OldGenerationSpaceAvailable() {
+ if (old_generation_allocation_limit_ <= PromotedTotalSize()) return 0;
+ return old_generation_allocation_limit_ -
+ static_cast<size_t>(PromotedTotalSize());
+ }
+
+ // We allow incremental marking to overshoot the allocation limit for
+ // performace reasons. If the overshoot is too large then we are more
+ // eager to finalize incremental marking.
+ inline bool AllocationLimitOvershotByLargeMargin() {
+ // This guards against too eager finalization in small heaps.
+ // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
+ size_t kMarginForSmallHeaps = 32u * MB;
+ if (old_generation_allocation_limit_ >= PromotedTotalSize()) return false;
+ uint64_t overshoot = PromotedTotalSize() - old_generation_allocation_limit_;
+ // Overshoot margin is 50% of allocation limit or half-way to the max heap
+ // with special handling of small heaps.
+ uint64_t margin =
+ Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
+ (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
+ return overshoot >= margin;
}
void UpdateTotalGCTime(double duration);
@@ -1811,23 +1842,21 @@ class Heap {
// Decrease the allocation limit if the new limit based on the given
// parameters is lower than the current limit.
- void DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
- double gc_speed,
+ void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
double mutator_speed);
-
// Calculates the allocation limit based on a given growing factor and a
// given old generation size.
- intptr_t CalculateOldGenerationAllocationLimit(double factor,
- intptr_t old_gen_size);
+ size_t CalculateOldGenerationAllocationLimit(double factor,
+ size_t old_gen_size);
// Sets the allocation limit to trigger the next full garbage collection.
- void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
+ void SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
double mutator_speed);
- intptr_t MinimumAllocationLimitGrowingStep();
+ size_t MinimumAllocationLimitGrowingStep();
- intptr_t old_generation_allocation_limit() const {
+ size_t old_generation_allocation_limit() const {
return old_generation_allocation_limit_;
}
@@ -1842,7 +1871,7 @@ class Heap {
return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
}
- bool ShouldExpandOldGenerationOnAllocationFailure();
+ bool ShouldExpandOldGenerationOnSlowAllocation();
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
IncrementalMarkingLimit IncrementalMarkingLimitReached();
@@ -2096,20 +2125,20 @@ class Heap {
Object* roots_[kRootListLength];
size_t code_range_size_;
- int max_semi_space_size_;
- int initial_semispace_size_;
- intptr_t max_old_generation_size_;
- intptr_t initial_old_generation_size_;
+ size_t max_semi_space_size_;
+ size_t initial_semispace_size_;
+ size_t max_old_generation_size_;
+ size_t initial_old_generation_size_;
bool old_generation_size_configured_;
- intptr_t max_executable_size_;
+ size_t max_executable_size_;
size_t maximum_committed_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
- intptr_t survived_since_last_expansion_;
+ size_t survived_since_last_expansion_;
// ... and since the last scavenge.
- intptr_t survived_last_scavenge_;
+ size_t survived_last_scavenge_;
// This is not the depth of nested AlwaysAllocateScope's but rather a single
// count, as scopes can be acquired from multiple tasks (read: threads).
@@ -2141,7 +2170,7 @@ class Heap {
Address new_space_top_after_last_gc_;
// Returns the amount of external memory registered since last global gc.
- int64_t PromotedExternalMemorySize();
+ uint64_t PromotedExternalMemorySize();
// How many "runtime allocations" happened.
uint32_t allocations_count_;
@@ -2170,7 +2199,7 @@ class Heap {
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
// generation and on every allocation in large object space.
- intptr_t old_generation_allocation_limit_;
+ size_t old_generation_allocation_limit_;
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
@@ -2200,11 +2229,11 @@ class Heap {
GCTracer* tracer_;
- intptr_t promoted_objects_size_;
+ size_t promoted_objects_size_;
double promotion_ratio_;
double promotion_rate_;
- intptr_t semi_space_copied_object_size_;
- intptr_t previous_semi_space_copied_object_size_;
+ size_t semi_space_copied_object_size_;
+ size_t previous_semi_space_copied_object_size_;
double semi_space_copied_rate_;
int nodes_died_in_new_space_;
int nodes_copied_in_new_space_;
@@ -2310,11 +2339,11 @@ class Heap {
int heap_iterator_depth_;
EmbedderHeapTracer* embedder_heap_tracer_;
- EmbedderReachableReferenceReporter* embedder_reference_reporter_;
std::vector<std::pair<void*, void*>> wrappers_to_trace_;
// Used for testing purposes.
bool force_oom_;
+ bool delay_sweeper_tasks_for_testing_;
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
@@ -2324,7 +2353,6 @@ class Heap {
friend class IdleScavengeObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
- friend class IteratePromotedObjectsVisitor;
friend class LargeObjectSpace;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
@@ -2633,18 +2661,6 @@ class AllocationObserver {
DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
};
-class TracePossibleWrapperReporter : public EmbedderReachableReferenceReporter {
- public:
- explicit TracePossibleWrapperReporter(Heap* heap) : heap_(heap) {}
- void ReportExternalReference(Value* object) override {
- heap_->RegisterExternallyReferencedObject(
- reinterpret_cast<Object**>(object));
- }
-
- private:
- Heap* heap_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 99be9d0123..4b1d7712a7 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -519,17 +519,15 @@ void IncrementalMarking::StartMarking() {
"[IncrementalMarking] Start marking\n");
}
- is_compacting_ = !FLAG_never_compact &&
- heap_->mark_compact_collector()->StartCompaction(
- MarkCompactCollector::INCREMENTAL_COMPACTION);
+ is_compacting_ =
+ !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
state_ = MARKING;
if (heap_->UsingEmbedderHeapTracer()) {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
- heap_->embedder_heap_tracer()->TracePrologue(
- heap_->embedder_reachable_reference_reporter());
+ heap_->embedder_heap_tracer()->TracePrologue();
}
RecordWriteStub::Mode mode = is_compacting_
@@ -538,8 +536,7 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
- heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
- MarkCompactCollector::kMaxMarkingDequeSize);
+ heap_->mark_compact_collector()->marking_deque()->StartUsing();
ActivateIncrementalWriteBarrier();
@@ -587,9 +584,6 @@ void IncrementalMarking::FinishBlackAllocation() {
}
void IncrementalMarking::AbortBlackAllocation() {
- for (Page* page : *heap()->old_space()) {
- page->ReleaseBlackAreaEndMarkerMap();
- }
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation aborted\n");
@@ -628,9 +622,9 @@ void IncrementalMarking::ProcessWeakCells() {
Object* the_hole_value = heap()->the_hole_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
- Object* weak_cell_head = Smi::FromInt(0);
+ Object* weak_cell_head = Smi::kZero;
WeakCell* prev_weak_cell_obj = NULL;
- while (weak_cell_obj != Smi::FromInt(0)) {
+ while (weak_cell_obj != Smi::kZero) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
// We do not insert cleared weak cells into the list, so the value
// cannot be a Smi here.
@@ -648,7 +642,7 @@ void IncrementalMarking::ProcessWeakCells() {
weak_cell_obj = weak_cell->next();
weak_cell->clear_next(the_hole_value);
} else {
- if (weak_cell_head == Smi::FromInt(0)) {
+ if (weak_cell_head == Smi::kZero) {
weak_cell_head = weak_cell;
}
prev_weak_cell_obj = weak_cell;
@@ -1053,7 +1047,7 @@ void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
(!FLAG_concurrent_sweeping ||
- heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) {
+ !heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index c2290c4d82..7ce0ae2379 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -66,6 +66,11 @@ class IncrementalMarking {
return request_type_ == FINALIZATION && !finalize_marking_completed_;
}
+ inline bool NeedsFinalization() {
+ return IsMarking() &&
+ (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING);
+ }
+
GCRequestType request_type() const { return request_type_; }
void reset_request_type() { request_type_ = NONE; }
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index fe71fb1177..784a76f8bd 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -163,12 +163,14 @@ HeapObject* LiveObjectIterator<T>::Next() {
current_cell_ = *it_.CurrentCell();
}
+ Map* map = nullptr;
if (current_cell_ & second_bit_index) {
// We found a black object. If the black object is within a black area,
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject* black_object = HeapObject::FromAddress(addr);
- Address end = addr + black_object->Size() - kPointerSize;
+ map = base::NoBarrierAtomicValue<Map*>::FromAddress(addr)->Value();
+ Address end = addr + black_object->SizeFromMap(map) - kPointerSize;
// One word filler objects do not borrow the second mark bit. We have
// to jump over the advancing and clearing part.
// Note that we know that we are at a one word filler when
@@ -198,9 +200,9 @@ HeapObject* LiveObjectIterator<T>::Next() {
// We found a live object.
if (object != nullptr) {
- if (object->IsFiller()) {
- // Black areas together with slack tracking may result in black filler
- // objects. We filter these objects out in the iterator.
+ if (map != nullptr && map == heap()->one_pointer_filler_map()) {
+ // Black areas together with slack tracking may result in black one
+ // word filler objects. We filter these objects out in the iterator.
object = nullptr;
} else {
break;
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 7e5ef96fc9..88e6983035 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -25,6 +25,7 @@
#include "src/heap/spaces-inl.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/tracing/tracing-category-observer.h"
#include "src/utils-inl.h"
#include "src/v8.h"
@@ -58,8 +59,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
compacting_(false),
black_allocation_(false),
have_code_to_deoptimize_(false),
- marking_deque_memory_(NULL),
- marking_deque_memory_committed_(0),
+ marking_deque_(heap),
code_flusher_(nullptr),
sweeper_(heap) {
}
@@ -240,9 +240,7 @@ void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- EnsureMarkingDequeIsReserved();
- EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
+ marking_deque()->SetUp();
if (FLAG_flush_code) {
code_flusher_ = new CodeFlusher(isolate());
@@ -255,7 +253,7 @@ void MarkCompactCollector::SetUp() {
void MarkCompactCollector::TearDown() {
AbortCompaction();
- delete marking_deque_memory_;
+ marking_deque()->TearDown();
delete code_flusher_;
}
@@ -276,8 +274,7 @@ static void TraceFragmentation(PagedSpace* space) {
static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
}
-
-bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
+bool MarkCompactCollector::StartCompaction() {
if (!compacting_) {
DCHECK(evacuation_candidates_.length() == 0);
@@ -293,33 +290,12 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
TraceFragmentation(heap()->map_space());
}
- heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
- heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
-
compacting_ = evacuation_candidates_.length() > 0;
}
return compacting_;
}
-void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
- RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
- }
-// There is not need to filter the old to old set because
-// it is completely cleared after the mark-compact GC.
-// The slots that become invalid due to runtime transitions are
-// cleared eagerly immediately after the transition.
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
- RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
- }
-#endif
-}
-
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
@@ -339,7 +315,7 @@ void MarkCompactCollector::CollectGarbage() {
}
#endif
- SweepSpaces();
+ StartSweepSpaces();
EvacuateNewSpaceAndCandidates();
@@ -469,22 +445,20 @@ void MarkCompactCollector::Sweeper::StartSweeping() {
std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
[](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
});
- if (FLAG_concurrent_sweeping) {
+}
+
+void MarkCompactCollector::Sweeper::StartSweeperTasks() {
+ if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) return;
- StartSweepingHelper(space);
+ num_sweeping_tasks_.Increment(1);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space),
+ v8::Platform::kShortRunningTask);
});
}
}
-void MarkCompactCollector::Sweeper::StartSweepingHelper(
- AllocationSpace space_to_start) {
- num_sweeping_tasks_.Increment(1);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
- v8::Platform::kShortRunningTask);
-}
-
void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
Page* page) {
if (!page->SweepingDone()) {
@@ -499,7 +473,8 @@ void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
}
void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
- if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) {
+ if (FLAG_concurrent_sweeping &&
+ !sweeper().IsSweepingCompleted(space->identity())) {
sweeper().ParallelSweepSpace(space->identity(), 0);
space->RefillFreeList();
}
@@ -519,10 +494,11 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
// If sweeping is not completed or not running at all, we try to complete it
// here.
- if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
- ForAllSweepingSpaces(
- [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
- }
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ if (!FLAG_concurrent_sweeping || !this->IsSweepingCompleted(space)) {
+ ParallelSweepSpace(space, 0);
+ }
+ });
if (FLAG_concurrent_sweeping) {
while (num_sweeping_tasks_.Value() > 0) {
@@ -537,13 +513,12 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
}
DCHECK(sweeping_list_[space].empty());
});
- late_pages_ = false;
sweeping_in_progress_ = false;
}
void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
if (!sweeping_in_progress_) return;
- if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
+ if (!FLAG_concurrent_sweeping || !IsSweepingCompleted(NEW_SPACE)) {
for (Page* p : *heap_->new_space()) {
SweepOrWaitUntilSweepingCompleted(p);
}
@@ -565,13 +540,20 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
#endif
}
-bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
+bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
DCHECK(FLAG_concurrent_sweeping);
while (pending_sweeper_tasks_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
num_sweeping_tasks_.Increment(-1);
}
- return num_sweeping_tasks_.Value() == 0;
+ return num_sweeping_tasks_.Value() != 0;
+}
+
+bool MarkCompactCollector::Sweeper::IsSweepingCompleted(AllocationSpace space) {
+ DCHECK(FLAG_concurrent_sweeping);
+ if (AreSweeperTasksRunning()) return false;
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return sweeping_list_[space].empty();
}
const char* AllocationSpaceName(AllocationSpace space) {
@@ -593,22 +575,21 @@ const char* AllocationSpaceName(AllocationSpace space) {
return NULL;
}
-
void MarkCompactCollector::ComputeEvacuationHeuristics(
- int area_size, int* target_fragmentation_percent,
- int* max_evacuated_bytes) {
+ size_t area_size, int* target_fragmentation_percent,
+ size_t* max_evacuated_bytes) {
// For memory reducing and optimize for memory mode we directly define both
// constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
- const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
+ const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kTargetFragmentationPercentForOptimizeMemory = 20;
- const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
+ const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
// For regular mode (which is latency critical) we define less aggressive
// defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70;
- const int kMaxEvacuatedBytes = 4 * MB;
+ const size_t kMaxEvacuatedBytes = 4 * MB;
// Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples.
const float kTargetMsPerArea = .5;
@@ -647,15 +628,22 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
int number_of_pages = space->CountTotalPages();
- int area_size = space->AreaSize();
+ size_t area_size = space->AreaSize();
// Pairs of (live_bytes_in_page, page).
- typedef std::pair<int, Page*> LiveBytesPagePair;
+ typedef std::pair<size_t, Page*> LiveBytesPagePair;
std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
+ DCHECK(!sweeping_in_progress());
+ DCHECK(!FLAG_concurrent_sweeping ||
+ sweeper().IsSweepingCompleted(space->identity()));
+ Page* owner_of_linear_allocation_area =
+ space->top() == space->limit()
+ ? nullptr
+ : Page::FromAllocationAreaAddress(space->top());
for (Page* p : *space) {
- if (p->NeverEvacuate()) continue;
+ if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
// Invariant: Evacuation candidates are just created when marking is
// started. This means that sweeping has finished. Furthermore, at the end
// of a GC all evacuation candidates are cleared and their slot buffers are
@@ -669,7 +657,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
int candidate_count = 0;
- int total_live_bytes = 0;
+ size_t total_live_bytes = 0;
const bool reduce_memory = heap()->ShouldReduceMemory();
if (FLAG_manual_evacuation_candidates_selection) {
@@ -705,12 +693,12 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
// them starting with the page with the most free memory, adding them to the
// set of evacuation candidates as long as both conditions (fragmentation
// and quota) hold.
- int max_evacuated_bytes;
+ size_t max_evacuated_bytes;
int target_fragmentation_percent;
ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
&max_evacuated_bytes);
- const intptr_t free_bytes_threshold =
+ const size_t free_bytes_threshold =
target_fragmentation_percent * (area_size / 100);
// Sort pages from the most free to the least free, then select
@@ -723,8 +711,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
return a.first < b.first;
});
for (size_t i = 0; i < pages.size(); i++) {
- int live_bytes = pages[i].first;
- int free_bytes = area_size - live_bytes;
+ size_t live_bytes = pages[i].first;
+ DCHECK_GE(area_size, live_bytes);
+ size_t free_bytes = area_size - live_bytes;
if (FLAG_always_compact ||
((free_bytes >= free_bytes_threshold) &&
((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
@@ -733,10 +722,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
if (FLAG_trace_fragmentation_verbose) {
PrintIsolate(isolate(),
- "compaction-selection-page: space=%s free_bytes_page=%d "
- "fragmentation_limit_kb=%" V8PRIdPTR
- " fragmentation_limit_percent=%d sum_compaction_kb=%d "
- "compaction_limit_kb=%d\n",
+ "compaction-selection-page: space=%s free_bytes_page=%zu "
+ "fragmentation_limit_kb=%" PRIuS
+ " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
+ "compaction_limit_kb=%zu\n",
AllocationSpaceName(space->identity()), free_bytes / KB,
free_bytes_threshold / KB, target_fragmentation_percent,
total_live_bytes / KB, max_evacuated_bytes / KB);
@@ -744,7 +733,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
// How many pages we will allocated for the evacuated objects
// in the worst case: ceil(total_live_bytes / area_size)
- int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
+ int estimated_new_pages =
+ static_cast<int>((total_live_bytes + area_size - 1) / area_size);
DCHECK_LE(estimated_new_pages, candidate_count);
int estimated_released_pages = candidate_count - estimated_new_pages;
// Avoid (compact -> expand) cycles.
@@ -759,7 +749,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
if (FLAG_trace_fragmentation) {
PrintIsolate(isolate(),
"compaction-selection: space=%s reduce_memory=%d pages=%d "
- "total_live_bytes=%d\n",
+ "total_live_bytes=%zu\n",
AllocationSpaceName(space->identity()), reduce_memory,
candidate_count, total_live_bytes / KB);
}
@@ -794,6 +784,10 @@ void MarkCompactCollector::Prepare() {
EnsureSweepingCompleted();
}
+ if (heap()->incremental_marking()->IsSweeping()) {
+ heap()->incremental_marking()->Stop();
+ }
+
// If concurrent unmapping tasks are still running, we should wait for
// them here.
heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
@@ -810,14 +804,14 @@ void MarkCompactCollector::Prepare() {
if (heap_->UsingEmbedderHeapTracer()) {
heap_->embedder_heap_tracer()->AbortTracing();
}
+ marking_deque()->Clear();
was_marked_incrementally_ = false;
}
if (!was_marked_incrementally_) {
if (heap_->UsingEmbedderHeapTracer()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
- heap_->embedder_heap_tracer()->TracePrologue(
- heap_->embedder_reachable_reference_reporter());
+ heap_->embedder_heap_tracer()->TracePrologue();
}
}
@@ -828,7 +822,7 @@ void MarkCompactCollector::Prepare() {
// Don't start compaction if we are in the middle of incremental
// marking cycle. We did not collect any slots.
if (!FLAG_never_compact && !was_marked_incrementally_) {
- StartCompaction(NON_INCREMENTAL_COMPACTION);
+ StartCompaction();
}
PagedSpaces spaces(heap());
@@ -849,10 +843,8 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
- if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) {
- // If we added some more pages during MC, we need to start at least one
- // more task as all other tasks might already be finished.
- sweeper().StartSweepingHelper(OLD_SPACE);
+ if (!heap()->delay_sweeper_tasks_for_testing_) {
+ sweeper().StartSweeperTasks();
}
// The hashing of weak_object_to_code_table is no longer valid.
@@ -1647,7 +1639,7 @@ class MarkCompactCollector::EvacuateVisitorBase
DCHECK_OBJECT_SIZE(size);
DCHECK(IsAligned(size, kPointerSize));
heap_->CopyBlock(dst_addr, src_addr, size);
- if ((mode == kProfiled) && FLAG_ignition && dst->IsBytecodeArray()) {
+ if ((mode == kProfiled) && dst->IsBytecodeArray()) {
PROFILE(heap_->isolate(),
CodeMoveEvent(AbstractCode::cast(src), dst_addr));
}
@@ -1720,7 +1712,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
local_pretenuring_feedback_);
int size = object->Size();
HeapObject* target_object = nullptr;
- if (heap_->ShouldBePromoted<DEFAULT_PROMOTION>(object->address(), size) &&
+ if (heap_->ShouldBePromoted(object->address(), size) &&
TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
&target_object)) {
promoted_size_ += size;
@@ -1841,41 +1833,48 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
base::HashMap* local_pretenuring_feedback_;
};
+template <PageEvacuationMode mode>
class MarkCompactCollector::EvacuateNewSpacePageVisitor final
: public MarkCompactCollector::HeapObjectVisitor {
public:
- explicit EvacuateNewSpacePageVisitor(Heap* heap)
- : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
-
- static void MoveToOldSpace(Page* page, PagedSpace* owner) {
- page->Unlink();
- Page* new_page = Page::ConvertNewToOld(page, owner);
- new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- }
+ explicit EvacuateNewSpacePageVisitor(
+ Heap* heap, base::HashMap* local_pretenuring_feedback)
+ : heap_(heap),
+ moved_bytes_(0),
+ local_pretenuring_feedback_(local_pretenuring_feedback) {}
- static void MoveToToSpace(Page* page) {
- page->heap()->new_space()->MovePageFromSpaceToSpace(page);
- page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ static void Move(Page* page) {
+ switch (mode) {
+ case NEW_TO_NEW:
+ page->heap()->new_space()->MovePageFromSpaceToSpace(page);
+ page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ break;
+ case NEW_TO_OLD: {
+ page->Unlink();
+ Page* new_page = Page::ConvertNewToOld(page);
+ new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ break;
+ }
+ }
}
inline bool Visit(HeapObject* object) {
- RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
- object->IterateBodyFast(&visitor);
- promoted_size_ += object->Size();
+ heap_->UpdateAllocationSite<Heap::kCached>(object,
+ local_pretenuring_feedback_);
+ if (mode == NEW_TO_OLD) {
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+ object->IterateBodyFast(&visitor);
+ }
return true;
}
- intptr_t promoted_size() { return promoted_size_; }
- intptr_t semispace_copied_size() { return semispace_copied_size_; }
-
- void account_semispace_copied(intptr_t copied) {
- semispace_copied_size_ += copied;
- }
+ intptr_t moved_bytes() { return moved_bytes_; }
+ void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
private:
Heap* heap_;
- intptr_t promoted_size_;
- intptr_t semispace_copied_size_;
+ intptr_t moved_bytes_;
+ base::HashMap* local_pretenuring_feedback_;
};
class MarkCompactCollector::EvacuateOldSpaceVisitor final
@@ -2121,85 +2120,87 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
}
-
-void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
- DCHECK(!marking_deque()->in_use());
- if (marking_deque_memory_ == NULL) {
- marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
- marking_deque_memory_committed_ = 0;
- }
- if (marking_deque_memory_ == NULL) {
- V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
+void MarkingDeque::SetUp() {
+ backing_store_ = new base::VirtualMemory(kMaxSize);
+ backing_store_committed_size_ = 0;
+ if (backing_store_ == nullptr) {
+ V8::FatalProcessOutOfMemory("MarkingDeque::SetUp");
}
}
+void MarkingDeque::TearDown() {
+ delete backing_store_;
+}
-void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
- // If the marking deque is too small, we try to allocate a bigger one.
- // If that fails, make do with a smaller one.
- CHECK(!marking_deque()->in_use());
- for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
- base::VirtualMemory* memory = marking_deque_memory_;
- size_t currently_committed = marking_deque_memory_committed_;
-
- if (currently_committed == size) return;
-
- if (currently_committed > size) {
- bool success = marking_deque_memory_->Uncommit(
- reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
- currently_committed - size);
- if (success) {
- marking_deque_memory_committed_ = size;
- return;
- }
- UNREACHABLE();
- }
-
- bool success = memory->Commit(
- reinterpret_cast<Address>(memory->address()) + currently_committed,
- size - currently_committed,
- false); // Not executable.
- if (success) {
- marking_deque_memory_committed_ = size;
- return;
- }
+void MarkingDeque::StartUsing() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (in_use_) {
+ // This can happen in mark-compact GC if the incremental marker already
+ // started using the marking deque.
+ return;
}
- V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
+ in_use_ = true;
+ EnsureCommitted();
+ array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
+ size_t size = FLAG_force_marking_deque_overflows
+ ? 64 * kPointerSize
+ : backing_store_committed_size_;
+ DCHECK(
+ base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
+ mask_ = static_cast<int>((size / kPointerSize) - 1);
+ top_ = bottom_ = 0;
+ overflowed_ = false;
}
-
-void MarkCompactCollector::InitializeMarkingDeque() {
- DCHECK(!marking_deque()->in_use());
- DCHECK(marking_deque_memory_committed_ > 0);
- Address addr = static_cast<Address>(marking_deque_memory_->address());
- size_t size = marking_deque_memory_committed_;
- if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
- marking_deque()->Initialize(addr, addr + size);
+void MarkingDeque::StopUsing() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK(IsEmpty());
+ DCHECK(!overflowed_);
+ top_ = bottom_ = mask_ = 0;
+ in_use_ = false;
+ if (FLAG_concurrent_sweeping) {
+ StartUncommitTask();
+ } else {
+ Uncommit();
+ }
}
-
-void MarkingDeque::Initialize(Address low, Address high) {
- DCHECK(!in_use_);
- HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
- HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
- array_ = obj_low;
- mask_ = base::bits::RoundDownToPowerOfTwo32(
- static_cast<uint32_t>(obj_high - obj_low)) -
- 1;
+void MarkingDeque::Clear() {
+ DCHECK(in_use_);
top_ = bottom_ = 0;
overflowed_ = false;
- in_use_ = true;
}
+void MarkingDeque::Uncommit() {
+ DCHECK(!in_use_);
+ bool success = backing_store_->Uncommit(backing_store_->address(),
+ backing_store_committed_size_);
+ backing_store_committed_size_ = 0;
+ CHECK(success);
+}
-void MarkingDeque::Uninitialize(bool aborting) {
- if (!aborting) {
- DCHECK(IsEmpty());
- DCHECK(!overflowed_);
- }
+void MarkingDeque::EnsureCommitted() {
DCHECK(in_use_);
- top_ = bottom_ = 0xdecbad;
- in_use_ = false;
+ if (backing_store_committed_size_ > 0) return;
+
+ for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
+ if (backing_store_->Commit(backing_store_->address(), size, false)) {
+ backing_store_committed_size_ = size;
+ break;
+ }
+ }
+ if (backing_store_committed_size_ == 0) {
+ V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted");
+ }
+}
+
+void MarkingDeque::StartUncommitTask() {
+ if (!uncommit_task_pending_) {
+ uncommit_task_pending_ = true;
+ UncommitTask* task = new UncommitTask(heap_->isolate(), this);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
}
class MarkCompactCollector::ObjectStatsVisitor
@@ -2242,17 +2243,21 @@ void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
}
void MarkCompactCollector::RecordObjectStats() {
- if (FLAG_track_gc_object_stats) {
+ if (V8_UNLIKELY(FLAG_gc_stats)) {
+ heap()->CreateObjectStats();
ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
heap()->dead_object_stats_);
VisitAllObjects(&visitor);
- std::stringstream live, dead;
- heap()->live_object_stats_->Dump(live);
- heap()->dead_object_stats_->Dump(dead);
- TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
- "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
- "live", TRACE_STR_COPY(live.str().c_str()), "dead",
- TRACE_STR_COPY(dead.str().c_str()));
+ if (V8_UNLIKELY(FLAG_gc_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ std::stringstream live, dead;
+ heap()->live_object_stats_->Dump(live);
+ heap()->dead_object_stats_->Dump(dead);
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
+ "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
+ "live", TRACE_STR_COPY(live.str().c_str()), "dead",
+ TRACE_STR_COPY(dead.str().c_str()));
+ }
if (FLAG_trace_gc_object_stats) {
heap()->live_object_stats_->PrintJSON("live");
heap()->dead_object_stats_->PrintJSON("dead");
@@ -2275,11 +2280,7 @@ void MarkCompactCollector::MarkLiveObjects() {
if (was_marked_incrementally_) {
incremental_marking->Finalize();
} else {
- // Abort any pending incremental activities e.g. incremental sweeping.
- incremental_marking->Stop();
- if (marking_deque()->in_use()) {
- marking_deque()->Uninitialize(true);
- }
+ CHECK(incremental_marking->IsStopped());
}
}
@@ -2288,8 +2289,7 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS;
#endif
- EnsureMarkingDequeIsCommittedAndInitialize(
- MarkCompactCollector::kMaxMarkingDequeSize);
+ marking_deque()->StartUsing();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
@@ -2410,8 +2410,6 @@ void MarkCompactCollector::ClearNonLiveReferences() {
MarkDependentCodeForDeoptimization(dependent_code_list);
ClearWeakCollections();
-
- ClearInvalidRememberedSetSlots();
}
@@ -2480,7 +2478,7 @@ void MarkCompactCollector::ClearSimpleMapTransitions(
Object* non_live_map_list) {
Object* the_hole_value = heap()->the_hole_value();
Object* weak_cell_obj = non_live_map_list;
- while (weak_cell_obj != Smi::FromInt(0)) {
+ while (weak_cell_obj != Smi::kZero) {
WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
Map* map = Map::cast(weak_cell->value());
DCHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(map)));
@@ -2504,7 +2502,7 @@ void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
// A previously existing simple transition (stored in a WeakCell) is going
// to be cleared. Clear the useless cell pointer, and take ownership
// of the descriptor array.
- map->set_raw_transitions(Smi::FromInt(0));
+ map->set_raw_transitions(Smi::kZero);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
DescriptorArray* descriptors = map->instance_descriptors();
if (descriptors == dead_transition->instance_descriptors() &&
@@ -2519,7 +2517,7 @@ void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
void MarkCompactCollector::ClearFullMapTransitions() {
HeapObject* undefined = heap()->undefined_value();
Object* obj = heap()->encountered_transition_arrays();
- while (obj != Smi::FromInt(0)) {
+ while (obj != Smi::kZero) {
TransitionArray* array = TransitionArray::cast(obj);
int num_transitions = array->number_of_entries();
DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
@@ -2539,7 +2537,7 @@ void MarkCompactCollector::ClearFullMapTransitions() {
obj = array->next_link();
array->set_next_link(undefined, SKIP_WRITE_BARRIER);
}
- heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+ heap()->set_encountered_transition_arrays(Smi::kZero);
}
@@ -2643,7 +2641,7 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
void MarkCompactCollector::ProcessWeakCollections() {
Object* weak_collection_obj = heap()->encountered_weak_collections();
- while (weak_collection_obj != Smi::FromInt(0)) {
+ while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
DCHECK(MarkCompactCollector::IsMarked(weak_collection));
@@ -2669,7 +2667,7 @@ void MarkCompactCollector::ProcessWeakCollections() {
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
- while (weak_collection_obj != Smi::FromInt(0)) {
+ while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
DCHECK(MarkCompactCollector::IsMarked(weak_collection));
@@ -2685,19 +2683,19 @@ void MarkCompactCollector::ClearWeakCollections() {
weak_collection_obj = weak_collection->next();
weak_collection->set_next(heap()->undefined_value());
}
- heap()->set_encountered_weak_collections(Smi::FromInt(0));
+ heap()->set_encountered_weak_collections(Smi::kZero);
}
void MarkCompactCollector::AbortWeakCollections() {
Object* weak_collection_obj = heap()->encountered_weak_collections();
- while (weak_collection_obj != Smi::FromInt(0)) {
+ while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
weak_collection_obj = weak_collection->next();
weak_collection->set_next(heap()->undefined_value());
}
- heap()->set_encountered_weak_collections(Smi::FromInt(0));
+ heap()->set_encountered_weak_collections(Smi::kZero);
}
@@ -2709,8 +2707,8 @@ void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
Object* the_hole_value = heap->the_hole_value();
DependentCode* dependent_code_head =
DependentCode::cast(heap->empty_fixed_array());
- Object* non_live_map_head = Smi::FromInt(0);
- while (weak_cell_obj != Smi::FromInt(0)) {
+ Object* non_live_map_head = Smi::kZero;
+ while (weak_cell_obj != Smi::kZero) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
Object* next_weak_cell = weak_cell->next();
bool clear_value = true;
@@ -2770,7 +2768,7 @@ void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
}
weak_cell_obj = next_weak_cell;
}
- heap->set_encountered_weak_cells(Smi::FromInt(0));
+ heap->set_encountered_weak_cells(Smi::kZero);
*non_live_map_list = non_live_map_head;
*dependent_code_list = dependent_code_head;
}
@@ -2779,24 +2777,24 @@ void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
void MarkCompactCollector::AbortWeakCells() {
Object* the_hole_value = heap()->the_hole_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
- while (weak_cell_obj != Smi::FromInt(0)) {
+ while (weak_cell_obj != Smi::kZero) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
weak_cell_obj = weak_cell->next();
weak_cell->clear_next(the_hole_value);
}
- heap()->set_encountered_weak_cells(Smi::FromInt(0));
+ heap()->set_encountered_weak_cells(Smi::kZero);
}
void MarkCompactCollector::AbortTransitionArrays() {
HeapObject* undefined = heap()->undefined_value();
Object* obj = heap()->encountered_transition_arrays();
- while (obj != Smi::FromInt(0)) {
+ while (obj != Smi::kZero) {
TransitionArray* array = TransitionArray::cast(obj);
obj = array->next_link();
array->set_next_link(undefined, SKIP_WRITE_BARRIER);
}
- heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+ heap()->set_encountered_transition_arrays(Smi::kZero);
}
void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
@@ -2889,128 +2887,6 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
return String::cast(*p);
}
-bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
- Space* owner = p->owner();
- DCHECK(owner != heap_->lo_space() && owner != nullptr);
- USE(owner);
-
- // We may be part of a black area.
- if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(slot))) {
- return true;
- }
-
- uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
- unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
- MarkBit::CellType* cells = p->markbits()->cells();
- Address base_address = p->area_start();
- unsigned int base_address_cell_index = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
-
- // Check if the slot points to the start of an object. This can happen e.g.
- // when we left trim a fixed array. Such slots are invalid and we can remove
- // them.
- if (index_mask > 1) {
- if ((cells[cell_index] & index_mask) != 0 &&
- (cells[cell_index] & (index_mask >> 1)) == 0) {
- return false;
- }
- } else {
- // Left trimming moves the mark bits so we cannot be in the very first cell.
- DCHECK(cell_index != base_address_cell_index);
- if ((cells[cell_index] & index_mask) != 0 &&
- (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
- return false;
- }
- }
-
- // Check if the object is in the current cell.
- MarkBit::CellType slot_mask;
- if ((cells[cell_index] == 0) ||
- (base::bits::CountTrailingZeros32(cells[cell_index]) >
- base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
- // If we are already in the first cell, there is no live object.
- if (cell_index == base_address_cell_index) return false;
-
- // If not, find a cell in a preceding cell slot that has a mark bit set.
- do {
- cell_index--;
- } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
-
- // The slot must be in a dead object if there are no preceding cells that
- // have mark bits set.
- if (cells[cell_index] == 0) {
- return false;
- }
-
- // The object is in a preceding cell. Set the mask to find any object.
- slot_mask = ~0u;
- } else {
- // We are interested in object mark bits right before the slot.
- slot_mask = index_mask + (index_mask - 1);
- }
-
- MarkBit::CellType current_cell = cells[cell_index];
- CHECK(current_cell != 0);
-
- // Find the last live object in the cell.
- unsigned int leading_zeros =
- base::bits::CountLeadingZeros32(current_cell & slot_mask);
- CHECK(leading_zeros != Bitmap::kBitsPerCell);
- int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
-
- base_address += (cell_index - base_address_cell_index) *
- Bitmap::kBitsPerCell * kPointerSize;
- Address address = base_address + offset * kPointerSize;
-
- // If the found mark bit is part of a black area, the slot cannot be part
- // of a live object since it is not marked.
- if (p->IsBlackAreaEndMarker(address + kPointerSize)) return false;
-
- HeapObject* object = HeapObject::FromAddress(address);
- CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
- CHECK(object->address() < reinterpret_cast<Address>(slot));
- if ((object->address() + kPointerSize) <= slot &&
- (object->address() + object->Size()) > slot) {
- // If the slot is within the last found object in the cell, the slot is
- // in a live object.
- // Slots pointing to the first word of an object are invalid and removed.
- // This can happen when we move the object header while left trimming.
- return true;
- }
- return false;
-}
-
-HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
- Page* p = Page::FromAddress(slot);
- Space* owner = p->owner();
- if (owner == heap_->lo_space() || owner == nullptr) {
- Object* large_object = heap_->lo_space()->FindObject(slot);
- // This object has to exist, otherwise we would not have recorded a slot
- // for it.
- CHECK(large_object->IsHeapObject());
- HeapObject* large_heap_object = HeapObject::cast(large_object);
-
- if (IsMarked(large_heap_object)) {
- return large_heap_object;
- }
- return nullptr;
- }
-
- LiveObjectIterator<kBlackObjects> it(p);
- HeapObject* object = nullptr;
- while ((object = it.Next()) != nullptr) {
- int size = object->Size();
- if (object->address() > slot) return nullptr;
- if (object->address() <= slot && slot < (object->address() + size)) {
- return object;
- }
- }
-
- return nullptr;
-}
-
-
void MarkCompactCollector::EvacuateNewSpacePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
@@ -3055,7 +2931,11 @@ class MarkCompactCollector::Evacuator : public Malloced {
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
&local_pretenuring_feedback_),
- new_space_page_visitor(collector->heap()),
+ new_to_new_page_visitor_(collector->heap(),
+ &local_pretenuring_feedback_),
+ new_to_old_page_visitor_(collector->heap(),
+ &local_pretenuring_feedback_),
+
old_space_visitor_(collector->heap(), &compaction_spaces_),
duration_(0.0),
bytes_compacted_(0) {}
@@ -3086,7 +2966,10 @@ class MarkCompactCollector::Evacuator : public Malloced {
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
- EvacuateNewSpacePageVisitor new_space_page_visitor;
+ EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
+ new_to_new_page_visitor_;
+ EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
+ new_to_old_page_visitor_;
EvacuateOldSpaceVisitor old_space_visitor_;
// Book keeping info.
@@ -3107,20 +2990,23 @@ bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
case kObjectsNewToOld:
success = collector_->VisitLiveObjects(page, &new_space_visitor_,
kClearMarkbits);
+ DCHECK(success);
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
- DCHECK(success);
break;
case kPageNewToOld:
- success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
+ success = collector_->VisitLiveObjects(page, &new_to_old_page_visitor_,
kKeepMarking);
- // ArrayBufferTracker will be updated during sweeping.
DCHECK(success);
+ new_to_old_page_visitor_.account_moved_bytes(page->LiveBytes());
+ // ArrayBufferTracker will be updated during sweeping.
break;
case kPageNewToNew:
- new_space_page_visitor.account_semispace_copied(page->LiveBytes());
+ success = collector_->VisitLiveObjects(page, &new_to_new_page_visitor_,
+ kKeepMarking);
+ DCHECK(success);
+ new_to_new_page_visitor_.account_moved_bytes(page->LiveBytes());
// ArrayBufferTracker will be updated during sweeping.
- success = true;
break;
case kObjectsOldToOld:
success = collector_->VisitLiveObjects(page, &old_space_visitor_,
@@ -3145,8 +3031,6 @@ bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
}
break;
- default:
- UNREACHABLE();
}
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
@@ -3172,15 +3056,15 @@ void MarkCompactCollector::Evacuator::Finalize() {
compaction_spaces_.Get(CODE_SPACE));
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
- new_space_page_visitor.promoted_size());
+ new_to_old_page_visitor_.moved_bytes());
heap()->IncrementSemiSpaceCopiedObjectSize(
new_space_visitor_.semispace_copied_size() +
- new_space_page_visitor.semispace_copied_size());
+ new_to_new_page_visitor_.moved_bytes());
heap()->IncrementYoungSurvivorsCounter(
new_space_visitor_.promoted_size() +
new_space_visitor_.semispace_copied_size() +
- new_space_page_visitor.promoted_size() +
- new_space_page_visitor.semispace_copied_size());
+ new_to_old_page_visitor_.moved_bytes() +
+ new_to_new_page_visitor_.moved_bytes());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
}
@@ -3192,17 +3076,15 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
//
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
- // - (#cores - 1)
+ // - #cores
const double kTargetCompactionTimeInMs = .5;
- const int kNumSweepingTasks = 3;
double compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
const int available_cores = Max(
1, static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
- kNumSweepingTasks - 1);
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
int tasks;
if (compaction_speed > 0) {
tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
@@ -3279,9 +3161,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
!page->Contains(age_mark)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
- EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
+ EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else {
- EvacuateNewSpacePageVisitor::MoveToToSpace(page);
+ EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
@@ -3335,6 +3217,18 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
+MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode
+MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) {
+ AllocationSpace identity = p->owner()->identity();
+ if (p->old_to_new_slots() &&
+ (identity == OLD_SPACE || identity == MAP_SPACE)) {
+ return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS;
+ } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) {
+ return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS;
+ }
+ return MarkCompactCollector::Sweeper::DO_NOT_CLEAR;
+}
+
int MarkCompactCollector::Sweeper::RawSweep(
Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode) {
@@ -3344,13 +3238,17 @@ int MarkCompactCollector::Sweeper::RawSweep(
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
+ // If there are old-to-new slots in that page, we have to filter out slots
+ // that are in dead memory which is freed by the sweeper.
+ ClearOldToNewSlotsMode slots_clearing_mode = GetClearOldToNewSlotsMode(p);
+
+ // The free ranges map is used for filtering typed slots.
+ std::map<uint32_t, uint32_t> free_ranges;
+
// Before we sweep objects on the page, we free dead array buffers which
// requires valid mark bits.
ArrayBufferTracker::FreeDead(p);
- // We also release the black area markers here.
- p->ReleaseBlackAreaEndMarkerMap();
-
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
@@ -3370,11 +3268,13 @@ int MarkCompactCollector::Sweeper::RawSweep(
LiveObjectIterator<kBlackObjects> it(p);
HeapObject* object = NULL;
+
while ((object = it.Next()) != NULL) {
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
Address free_end = object->address();
if (free_end != free_start) {
- int size = static_cast<int>(free_end - free_start);
+ CHECK_GT(free_end, free_start);
+ size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
@@ -3383,9 +3283,18 @@ int MarkCompactCollector::Sweeper::RawSweep(
free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
- p->heap()->CreateFillerObjectAt(free_start, size,
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
+
+ if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
+ free_ranges.insert(std::pair<uint32_t, uint32_t>(
+ static_cast<uint32_t>(free_start - p->address()),
+ static_cast<uint32_t>(free_end - p->address())));
+ }
}
Map* map = object->synchronized_map();
int size = object->SizeFromMap(map);
@@ -3401,11 +3310,9 @@ int MarkCompactCollector::Sweeper::RawSweep(
free_start = free_end + size;
}
- // Clear the mark bits of that page and reset live bytes count.
- p->ClearLiveness();
-
if (free_start != p->area_end()) {
- int size = static_cast<int>(p->area_end() - free_start);
+ CHECK_GT(p->area_end(), free_start);
+ size_t size = static_cast<size_t>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
@@ -3414,13 +3321,31 @@ int MarkCompactCollector::Sweeper::RawSweep(
free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
- p->heap()->CreateFillerObjectAt(free_start, size,
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
+
+ if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
+ free_ranges.insert(std::pair<uint32_t, uint32_t>(
+ static_cast<uint32_t>(free_start - p->address()),
+ static_cast<uint32_t>(p->area_end() - p->address())));
+ }
}
+
+ // Clear invalid typed slots after collection all free ranges.
+ if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
+ p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges);
+ }
+
+ // Clear the mark bits of that page and reset live bytes count.
+ p->ClearLiveness();
+
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
- return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
+ return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
}
void MarkCompactCollector::InvalidateCode(Code* code) {
@@ -3480,7 +3405,8 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
page->AddressToMarkbitIndex(object->address()));
if (page->old_to_new_slots() != nullptr) {
page->old_to_new_slots()->RemoveRange(
- 0, static_cast<int>(object->address() - page->address()));
+ 0, static_cast<int>(object->address() - page->address()),
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
if (page->typed_old_to_new_slots() != nullptr) {
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
@@ -3545,12 +3471,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
for (Page* p : newspace_evacuation_candidates_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- sweeper().AddLatePage(p->owner()->identity(), p);
+ sweeper().AddPage(p->owner()->identity(), p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
- sweeper().AddLatePage(p->owner()->identity(), p);
+ sweeper().AddPage(p->owner()->identity(), p);
}
}
newspace_evacuation_candidates_.Rewind(0);
@@ -3562,7 +3488,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
SkipList* list = p->skip_list();
if (list != NULL) list->Clear();
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- sweeper().AddLatePage(p->owner()->identity(), p);
+ sweeper().AddPage(p->owner()->identity(), p);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
@@ -3631,34 +3557,48 @@ class PointerUpdateJobTraits {
static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
Address slot_address) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- if (heap->InFromSpace(*slot)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
+ // There may be concurrent action on slots in dead objects. Concurrent
+ // sweeper threads may overwrite the slot content with a free space object.
+ // Moreover, the pointed-to object may also get concurrently overwritten
+ // with a free space object. The sweeper always gets priority performing
+ // these writes.
+ base::NoBarrierAtomicValue<Object*>* slot =
+ base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
+ Object* slot_reference = slot->Value();
+ if (heap->InFromSpace(slot_reference)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
DCHECK(heap_object->IsHeapObject());
MapWord map_word = heap_object->map_word();
// There could still be stale pointers in large object space, map space,
// and old space for pages that have been promoted.
if (map_word.IsForwardingAddress()) {
- // Update the corresponding slot.
- *slot = map_word.ToForwardingAddress();
+ // A sweeper thread may concurrently write a size value which looks like
+ // a forwarding pointer. We have to ignore these values.
+ if (map_word.ToRawValue() < Page::kPageSize) {
+ return REMOVE_SLOT;
+ }
+ // Update the corresponding slot only if the slot content did not
+ // change in the meantime. This may happen when a concurrent sweeper
+ // thread stored a free space object at that memory location.
+ slot->TrySetValue(slot_reference, map_word.ToForwardingAddress());
}
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
- if (heap->InToSpace(*slot)) {
+ if (heap->InToSpace(slot->Value())) {
return KEEP_SLOT;
}
- } else if (heap->InToSpace(*slot)) {
+ } else if (heap->InToSpace(slot_reference)) {
// Slots can point to "to" space if the page has been moved, or if the
// slot has been recorded multiple times in the remembered set. Since
// there is no forwarding information present we need to check the
// markbits to determine liveness.
- if (Marking::IsBlack(
- ObjectMarking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
+ if (Marking::IsBlack(ObjectMarking::MarkBitFrom(
+ reinterpret_cast<HeapObject*>(slot_reference))))
return KEEP_SLOT;
} else {
- DCHECK(!heap->InNewSpace(*slot));
+ DCHECK(!heap->InNewSpace(slot_reference));
}
return REMOVE_SLOT;
}
@@ -3666,9 +3606,11 @@ class PointerUpdateJobTraits {
int NumberOfPointerUpdateTasks(int pages) {
if (!FLAG_parallel_pointer_update) return 1;
- const int kMaxTasks = 4;
+ const int available_cores = Max(
+ 1, static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
const int kPagesPerTask = 4;
- return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
+ return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
}
template <PointerDirection direction>
@@ -3813,24 +3755,21 @@ int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
AllocationSpace identity) {
int max_freed = 0;
- if (page->mutex()->TryLock()) {
+ {
+ base::LockGuard<base::Mutex> guard(page->mutex());
// If this page was already swept in the meantime, we can return here.
- if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
- page->mutex()->Unlock();
- return 0;
- }
+ if (page->SweepingDone()) return 0;
+ DCHECK_EQ(Page::kSweepingPending,
+ page->concurrent_sweeping_state().Value());
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
const Sweeper::FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
if (identity == NEW_SPACE) {
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
- } else if (identity == OLD_SPACE) {
- max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
- } else if (identity == CODE_SPACE) {
- max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
} else {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
}
+ DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
if (page->typed_old_to_new_slots()) {
@@ -3839,35 +3778,26 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
if (page->old_to_new_slots()) {
page->old_to_new_slots()->FreeToBeFreedBuckets();
}
+ }
- {
- base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[identity].Add(page);
- }
- page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
- page->mutex()->Unlock();
+ {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ swept_list_[identity].Add(page);
}
return max_freed;
}
void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
- DCHECK(!sweeping_in_progress_);
+ DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
PrepareToBeSweptPage(space, page);
sweeping_list_[space].push_back(page);
}
-void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
- Page* page) {
- DCHECK(sweeping_in_progress_);
- PrepareToBeSweptPage(space, page);
- late_pages_ = true;
- AddSweepingPageSafe(space, page);
-}
-
void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
Page* page) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
- int to_sweep = page->area_size() - page->LiveBytes();
+ DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes()));
+ size_t to_sweep = page->area_size() - page->LiveBytes();
if (space != NEW_SPACE)
heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
}
@@ -3903,7 +3833,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (p->IsEvacuationCandidate()) {
// Will be processed in EvacuateNewSpaceAndCandidates.
DCHECK(evacuation_candidates_.length() > 0);
- DCHECK(!p->HasBlackAreas());
continue;
}
@@ -3943,8 +3872,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
}
}
-
-void MarkCompactCollector::SweepSpaces() {
+void MarkCompactCollector::StartSweepSpaces() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
#ifdef DEBUG
state_ = SWEEP_SPACES;
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 2cbb369f76..de182073ea 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -8,6 +8,8 @@
#include <deque>
#include "src/base/bits.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/cancelable-task.h"
#include "src/heap/marking.h"
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
@@ -52,16 +54,26 @@ class ObjectMarking : public AllStatic {
// Marking deque for tracing live objects.
class MarkingDeque {
public:
- MarkingDeque()
- : array_(NULL),
+ explicit MarkingDeque(Heap* heap)
+ : backing_store_(nullptr),
+ backing_store_committed_size_(0),
+ array_(nullptr),
top_(0),
bottom_(0),
mask_(0),
overflowed_(false),
- in_use_(false) {}
+ in_use_(false),
+ uncommit_task_pending_(false),
+ heap_(heap) {}
- void Initialize(Address low, Address high);
- void Uninitialize(bool aborting = false);
+ void SetUp();
+ void TearDown();
+
+ // Ensures that the marking deque is committed and will stay committed until
+ // StopUsing() is called.
+ void StartUsing();
+ void StopUsing();
+ void Clear();
inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
@@ -69,8 +81,6 @@ class MarkingDeque {
bool overflowed() const { return overflowed_; }
- bool in_use() const { return in_use_; }
-
void ClearOverflowed() { overflowed_ = false; }
void SetOverflowed() { overflowed_ = true; }
@@ -118,6 +128,43 @@ class MarkingDeque {
void set_top(int top) { top_ = top; }
private:
+ // This task uncommits the marking_deque backing store if
+ // markin_deque->in_use_ is false.
+ class UncommitTask : public CancelableTask {
+ public:
+ explicit UncommitTask(Isolate* isolate, MarkingDeque* marking_deque)
+ : CancelableTask(isolate), marking_deque_(marking_deque) {}
+
+ private:
+ // CancelableTask override.
+ void RunInternal() override {
+ base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
+ if (!marking_deque_->in_use_) {
+ marking_deque_->Uncommit();
+ }
+ marking_deque_->uncommit_task_pending_ = false;
+ }
+
+ MarkingDeque* marking_deque_;
+ DISALLOW_COPY_AND_ASSIGN(UncommitTask);
+ };
+
+ static const size_t kMaxSize = 4 * MB;
+ static const size_t kMinSize = 256 * KB;
+
+ // Must be called with mutex lock.
+ void EnsureCommitted();
+
+ // Must be called with mutex lock.
+ void Uncommit();
+
+ // Must be called with mutex lock.
+ void StartUncommitTask();
+
+ base::Mutex mutex_;
+
+ base::VirtualMemory* backing_store_;
+ size_t backing_store_committed_size_;
HeapObject** array_;
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is
// empty when top_ == bottom_. It is full when top_ + 1 == bottom
@@ -126,7 +173,11 @@ class MarkingDeque {
int bottom_;
int mask_;
bool overflowed_;
+ // in_use_ == true after taking mutex lock implies that the marking deque is
+ // committed and will stay committed at least until in_use_ == false.
bool in_use_;
+ bool uncommit_task_pending_;
+ Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
};
@@ -270,12 +321,16 @@ class LiveObjectIterator BASE_EMBEDDED {
HeapObject* Next();
private:
+ inline Heap* heap() { return chunk_->heap(); }
+
MemoryChunk* chunk_;
MarkBitCellIterator it_;
Address cell_base_;
MarkBit::CellType current_cell_;
};
+enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
+
// -------------------------------------------------------------------------
// Mark-Compact collector
class MarkCompactCollector {
@@ -288,6 +343,11 @@ class MarkCompactCollector {
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
+ enum ClearOldToNewSlotsMode {
+ DO_NOT_CLEAR,
+ CLEAR_REGULAR_SLOTS,
+ CLEAR_TYPED_SLOTS
+ };
typedef std::deque<Page*> SweepingList;
typedef List<Page*> SweptList;
@@ -299,24 +359,25 @@ class MarkCompactCollector {
: heap_(heap),
pending_sweeper_tasks_semaphore_(0),
sweeping_in_progress_(false),
- late_pages_(false),
num_sweeping_tasks_(0) {}
bool sweeping_in_progress() { return sweeping_in_progress_; }
- bool contains_late_pages() { return late_pages_; }
void AddPage(AllocationSpace space, Page* page);
- void AddLatePage(AllocationSpace space, Page* page);
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
+ // After calling this function sweeping is considered to be in progress
+ // and the main thread can sweep lazily, but the background sweeper tasks
+ // are not running yet.
void StartSweeping();
- void StartSweepingHelper(AllocationSpace space_to_start);
+ void StartSweeperTasks();
void EnsureCompleted();
void EnsureNewSpaceCompleted();
- bool IsSweepingCompleted();
+ bool AreSweeperTasksRunning();
+ bool IsSweepingCompleted(AllocationSpace space);
void SweepOrWaitUntilSweepingCompleted(Page* page);
void AddSweptPageSafe(PagedSpace* space, Page* page);
@@ -325,6 +386,8 @@ class MarkCompactCollector {
private:
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
+ static ClearOldToNewSlotsMode GetClearOldToNewSlotsMode(Page* p);
+
template <typename Callback>
void ForAllSweepingSpaces(Callback callback) {
for (int i = 0; i < kAllocationSpaces; i++) {
@@ -343,7 +406,6 @@ class MarkCompactCollector {
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
bool sweeping_in_progress_;
- bool late_pages_;
base::AtomicNumber<intptr_t> num_sweeping_tasks_;
};
@@ -369,9 +431,7 @@ class MarkCompactCollector {
// Performs a global garbage collection.
void CollectGarbage();
- enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
-
- bool StartCompaction(CompactionMode mode);
+ bool StartCompaction();
void AbortCompaction();
@@ -412,7 +472,7 @@ class MarkCompactCollector {
->ShouldSkipEvacuationSlotRecording();
}
- INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
+ static inline bool IsOnEvacuationCandidate(HeapObject* obj) {
return Page::FromAddress(reinterpret_cast<Address>(obj))
->IsEvacuationCandidate();
}
@@ -463,34 +523,10 @@ class MarkCompactCollector {
MarkingDeque* marking_deque() { return &marking_deque_; }
- static const size_t kMaxMarkingDequeSize = 4 * MB;
- static const size_t kMinMarkingDequeSize = 256 * KB;
-
- void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
- if (!marking_deque()->in_use()) {
- EnsureMarkingDequeIsCommitted(max_size);
- InitializeMarkingDeque();
- }
- }
-
- void EnsureMarkingDequeIsCommitted(size_t max_size);
- void EnsureMarkingDequeIsReserved();
-
- void InitializeMarkingDeque();
-
- // The following two methods can just be called after marking, when the
- // whole transitive closure is known. They must be called before sweeping
- // when mark bits are still intact.
- bool IsSlotInBlackObject(MemoryChunk* p, Address slot);
- HeapObject* FindBlackObjectBySlotSlow(Address slot);
-
- // Removes all the slots in the slot buffers that are within the given
- // address range.
- void RemoveObjectSlots(Address start_slot, Address end_slot);
-
Sweeper& sweeper() { return sweeper_; }
private:
+ template <PageEvacuationMode mode>
class EvacuateNewSpacePageVisitor;
class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor;
@@ -502,11 +538,10 @@ class MarkCompactCollector {
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
- void ClearInvalidRememberedSetSlots();
- void ComputeEvacuationHeuristics(int area_size,
+ void ComputeEvacuationHeuristics(size_t area_size,
int* target_fragmentation_percent,
- int* max_evacuated_bytes);
+ size_t* max_evacuated_bytes);
void VisitAllObjects(HeapObjectVisitor* visitor);
@@ -644,21 +679,10 @@ class MarkCompactCollector {
void AbortTransitionArrays();
- // -----------------------------------------------------------------------
- // Phase 2: Sweeping to clear mark bits and free non-live objects for
- // a non-compacting collection.
- //
- // Before: Live objects are marked and non-live objects are unmarked.
- //
- // After: Live objects are unmarked, non-live regions have been added to
- // their space's free list. Active eden semispace is compacted by
- // evacuation.
- //
-
- // If we are not compacting the heap, we simply sweep the spaces except
- // for the large object space, clearing mark bits and adding unmarked
- // regions to each space's free list.
- void SweepSpaces();
+ // Starts sweeping of spaces by contributing on the main thread and setting
+ // up other pages for sweeping. Does not start sweeper tasks.
+ void StartSweepSpaces();
+ void StartSweepSpace(PagedSpace* space);
void EvacuateNewSpacePrologue();
@@ -681,9 +705,6 @@ class MarkCompactCollector {
void ReleaseEvacuationCandidates();
- // Starts sweeping of a space by contributing on the main thread and setting
- // up other pages for sweeping.
- void StartSweepSpace(PagedSpace* space);
#ifdef DEBUG
friend class MarkObjectVisitor;
@@ -726,8 +747,6 @@ class MarkCompactCollector {
bool have_code_to_deoptimize_;
- base::VirtualMemory* marking_deque_memory_;
- size_t marking_deque_memory_committed_;
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index ba9010e7bc..2aed4c714a 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -24,27 +24,26 @@ MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
void MemoryReducer::TimerTask::RunInternal() {
- const double kJsCallsPerMsThreshold = 0.5;
Heap* heap = memory_reducer_->heap();
Event event;
double time_ms = heap->MonotonicallyIncreasingTimeInMs();
heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
heap->OldGenerationAllocationCounter());
- double js_call_rate = memory_reducer_->SampleAndGetJsCallsPerMs(time_ms);
bool low_allocation_rate = heap->HasLowAllocationRate();
- bool is_idle = js_call_rate < kJsCallsPerMsThreshold && low_allocation_rate;
bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
if (FLAG_trace_gc_verbose) {
- PrintIsolate(heap->isolate(), "Memory reducer: call rate %.3lf, %s, %s\n",
- js_call_rate, low_allocation_rate ? "low alloc" : "high alloc",
- optimize_for_memory ? "background" : "foreground");
+ heap->isolate()->PrintWithTimestamp(
+ "Memory reducer: %s, %s\n",
+ low_allocation_rate ? "low alloc" : "high alloc",
+ optimize_for_memory ? "background" : "foreground");
}
event.type = kTimer;
event.time_ms = time_ms;
// The memory reducer will start incremental markig if
// 1) mutator is likely idle: js call rate is low and allocation rate is low.
// 2) mutator is in background: optimize for memory flag is set.
- event.should_start_incremental_gc = is_idle || optimize_for_memory;
+ event.should_start_incremental_gc =
+ low_allocation_rate || optimize_for_memory;
event.can_start_incremental_gc =
heap->incremental_marking()->IsStopped() &&
(heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
@@ -52,16 +51,6 @@ void MemoryReducer::TimerTask::RunInternal() {
}
-double MemoryReducer::SampleAndGetJsCallsPerMs(double time_ms) {
- unsigned int counter = heap()->isolate()->js_calls_from_api_counter();
- unsigned int call_delta = counter - js_calls_counter_;
- double time_delta_ms = time_ms - js_calls_sample_time_ms_;
- js_calls_counter_ = counter;
- js_calls_sample_time_ms_ = time_ms;
- return time_delta_ms > 0 ? call_delta / time_delta_ms : 0;
-}
-
-
void MemoryReducer::NotifyTimer(const Event& event) {
DCHECK_EQ(kTimer, event.type);
DCHECK_EQ(kWait, state_.action);
@@ -70,8 +59,8 @@ void MemoryReducer::NotifyTimer(const Event& event) {
DCHECK(heap()->incremental_marking()->IsStopped());
DCHECK(FLAG_incremental_marking);
if (FLAG_trace_gc_verbose) {
- PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
- state_.started_gcs);
+ heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n",
+ state_.started_gcs);
}
heap()->StartIdleIncrementalMarking(
GarbageCollectionReason::kMemoryReducer);
@@ -93,8 +82,9 @@ void MemoryReducer::NotifyTimer(const Event& event) {
// Re-schedule the timer.
ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
if (FLAG_trace_gc_verbose) {
- PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n",
- state_.next_gc_start_ms - event.time_ms);
+ heap()->isolate()->PrintWithTimestamp(
+ "Memory reducer: waiting for %.f ms\n",
+ state_.next_gc_start_ms - event.time_ms);
}
}
}
@@ -110,9 +100,9 @@ void MemoryReducer::NotifyMarkCompact(const Event& event) {
}
if (old_action == kRun) {
if (FLAG_trace_gc_verbose) {
- PrintIsolate(heap()->isolate(), "Memory reducer: finished GC #%d (%s)\n",
- state_.started_gcs,
- state_.action == kWait ? "will do more" : "done");
+ heap()->isolate()->PrintWithTimestamp(
+ "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs,
+ state_.action == kWait ? "will do more" : "done");
}
}
}
@@ -194,8 +184,6 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
DCHECK(delay_ms > 0);
- // Record the time and the js call counter.
- SampleAndGetJsCallsPerMs(time_ms);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
@@ -204,7 +192,6 @@ void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
}
-
void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
} // namespace internal
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 0fe53e5fea..0421987a3c 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -8,6 +8,7 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
#include "src/cancelable-task.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -79,7 +80,7 @@ class Heap;
// now_ms is the current time,
// t' is t if the current event is not a GC event and is now_ms otherwise,
// long_delay_ms, short_delay_ms, and watchdog_delay_ms are constants.
-class MemoryReducer {
+class V8_EXPORT_PRIVATE MemoryReducer {
public:
enum Action { kDone, kWait, kRun };
@@ -148,9 +149,6 @@ class MemoryReducer {
static bool WatchdogGC(const State& state, const Event& event);
- // Returns the rate of JS calls initiated from the API.
- double SampleAndGetJsCallsPerMs(double time_ms);
-
Heap* heap_;
State state_;
unsigned int js_calls_counter_;
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 6e4b50ec24..ef5f65734e 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -52,55 +52,59 @@ V8_NOINLINE static void DumpJSONArray(std::stringstream& stream, size_t* array,
stream << "]";
}
+void ObjectStats::PrintKeyAndId(const char* key, int gc_count) {
+ PrintF("\"isolate\": \"%p\", \"id\": %d, \"key\": \"%s\", ",
+ reinterpret_cast<void*>(isolate()), gc_count, key);
+}
+
+void ObjectStats::PrintInstanceTypeJSON(const char* key, int gc_count,
+ const char* name, int index) {
+ PrintF("{ ");
+ PrintKeyAndId(key, gc_count);
+ PrintF("\"type\": \"instance_type_data\", ");
+ PrintF("\"instance_type\": %d, ", index);
+ PrintF("\"instance_type_name\": \"%s\", ", name);
+ PrintF("\"overall\": %zu, ", object_sizes_[index]);
+ PrintF("\"count\": %zu, ", object_counts_[index]);
+ PrintF("\"over_allocated\": %zu, ", over_allocated_[index]);
+ PrintF("\"histogram\": ");
+ PrintJSONArray(size_histogram_[index], kNumberOfBuckets);
+ PrintF(",");
+ PrintF("\"over_allocated_histogram\": ");
+ PrintJSONArray(over_allocated_histogram_[index], kNumberOfBuckets);
+ PrintF(" }\n");
+}
+
void ObjectStats::PrintJSON(const char* key) {
double time = isolate()->time_millis_since_init();
int gc_count = heap()->gc_count();
-#define PRINT_KEY_AND_ID() \
- PrintF("\"isolate\": \"%p\", \"id\": %d, \"key\": \"%s\", ", \
- reinterpret_cast<void*>(isolate()), gc_count, key);
-
// gc_descriptor
PrintF("{ ");
- PRINT_KEY_AND_ID();
+ PrintKeyAndId(key, gc_count);
PrintF("\"type\": \"gc_descriptor\", \"time\": %f }\n", time);
// bucket_sizes
PrintF("{ ");
- PRINT_KEY_AND_ID();
+ PrintKeyAndId(key, gc_count);
PrintF("\"type\": \"bucket_sizes\", \"sizes\": [ ");
for (int i = 0; i < kNumberOfBuckets; i++) {
PrintF("%d", 1 << (kFirstBucketShift + i));
if (i != (kNumberOfBuckets - 1)) PrintF(", ");
}
PrintF(" ] }\n");
-// instance_type_data
-#define PRINT_INSTANCE_TYPE_DATA(name, index) \
- PrintF("{ "); \
- PRINT_KEY_AND_ID(); \
- PrintF("\"type\": \"instance_type_data\", "); \
- PrintF("\"instance_type\": %d, ", index); \
- PrintF("\"instance_type_name\": \"%s\", ", name); \
- PrintF("\"overall\": %zu, ", object_sizes_[index]); \
- PrintF("\"count\": %zu, ", object_counts_[index]); \
- PrintF("\"over_allocated\": %zu, ", over_allocated_[index]); \
- PrintF("\"histogram\": "); \
- PrintJSONArray(size_histogram_[index], kNumberOfBuckets); \
- PrintF(","); \
- PrintF("\"over_allocated_histogram\": "); \
- PrintJSONArray(over_allocated_histogram_[index], kNumberOfBuckets); \
- PrintF(" }\n");
-#define INSTANCE_TYPE_WRAPPER(name) PRINT_INSTANCE_TYPE_DATA(#name, name)
-#define CODE_KIND_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA("*CODE_" #name, \
- FIRST_CODE_KIND_SUB_TYPE + Code::name)
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA("*FIXED_ARRAY_" #name, \
- FIRST_FIXED_ARRAY_SUB_TYPE + name)
-#define CODE_AGE_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA( \
- "*CODE_AGE_" #name, \
- FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge)
+#define INSTANCE_TYPE_WRAPPER(name) \
+ PrintInstanceTypeJSON(key, gc_count, #name, name);
+#define CODE_KIND_WRAPPER(name) \
+ PrintInstanceTypeJSON(key, gc_count, "*CODE_" #name, \
+ FIRST_CODE_KIND_SUB_TYPE + Code::name);
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
+ PrintInstanceTypeJSON(key, gc_count, "*FIXED_ARRAY_" #name, \
+ FIRST_FIXED_ARRAY_SUB_TYPE + name);
+#define CODE_AGE_WRAPPER(name) \
+ PrintInstanceTypeJSON( \
+ key, gc_count, "*CODE_AGE_" #name, \
+ FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER)
CODE_KIND_LIST(CODE_KIND_WRAPPER)
@@ -115,6 +119,20 @@ void ObjectStats::PrintJSON(const char* key) {
#undef PRINT_KEY_AND_ID
}
+void ObjectStats::DumpInstanceTypeData(std::stringstream& stream,
+ const char* name, int index) {
+ stream << "\"" << name << "\":{";
+ stream << "\"type\":" << static_cast<int>(index) << ",";
+ stream << "\"overall\":" << object_sizes_[index] << ",";
+ stream << "\"count\":" << object_counts_[index] << ",";
+ stream << "\"over_allocated\":" << over_allocated_[index] << ",";
+ stream << "\"histogram\":";
+ DumpJSONArray(stream, size_histogram_[index], kNumberOfBuckets);
+ stream << ",\"over_allocated_histogram\":";
+ DumpJSONArray(stream, over_allocated_histogram_[index], kNumberOfBuckets);
+ stream << "},";
+}
+
void ObjectStats::Dump(std::stringstream& stream) {
double time = isolate()->time_millis_since_init();
int gc_count = heap()->gc_count();
@@ -131,29 +149,19 @@ void ObjectStats::Dump(std::stringstream& stream) {
stream << "],";
stream << "\"type_data\":{";
-#define PRINT_INSTANCE_TYPE_DATA(name, index) \
- stream << "\"" << name << "\":{"; \
- stream << "\"type\":" << static_cast<int>(index) << ","; \
- stream << "\"overall\":" << object_sizes_[index] << ","; \
- stream << "\"count\":" << object_counts_[index] << ","; \
- stream << "\"over_allocated\":" << over_allocated_[index] << ","; \
- stream << "\"histogram\":"; \
- DumpJSONArray(stream, size_histogram_[index], kNumberOfBuckets); \
- stream << ",\"over_allocated_histogram\":"; \
- DumpJSONArray(stream, over_allocated_histogram_[index], kNumberOfBuckets); \
- stream << "},";
+#define INSTANCE_TYPE_WRAPPER(name) DumpInstanceTypeData(stream, #name, name);
+#define CODE_KIND_WRAPPER(name) \
+ DumpInstanceTypeData(stream, "*CODE_" #name, \
+ FIRST_CODE_KIND_SUB_TYPE + Code::name);
+
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
+ DumpInstanceTypeData(stream, "*FIXED_ARRAY_" #name, \
+ FIRST_FIXED_ARRAY_SUB_TYPE + name);
-#define INSTANCE_TYPE_WRAPPER(name) PRINT_INSTANCE_TYPE_DATA(#name, name)
-#define CODE_KIND_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA("*CODE_" #name, \
- FIRST_CODE_KIND_SUB_TYPE + Code::name)
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA("*FIXED_ARRAY_" #name, \
- FIRST_FIXED_ARRAY_SUB_TYPE + name)
-#define CODE_AGE_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA( \
- "*CODE_AGE_" #name, \
- FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge)
+#define CODE_AGE_WRAPPER(name) \
+ DumpInstanceTypeData( \
+ stream, "*CODE_AGE_" #name, \
+ FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
CODE_KIND_LIST(CODE_KIND_WRAPPER);
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index add5a12b04..7d0cfb5a69 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -75,6 +75,9 @@ class ObjectStats {
over_allocated;
over_allocated_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
[HistogramIndexFromSize(over_allocated)]++;
+ over_allocated_[InstanceType::FIXED_ARRAY_TYPE] += over_allocated;
+ over_allocated_histogram_[InstanceType::FIXED_ARRAY_TYPE]
+ [HistogramIndexFromSize(over_allocated)]++;
}
return true;
}
@@ -97,6 +100,14 @@ class ObjectStats {
static const int kLastBucket = 1 << kLastBucketShift;
static const int kNumberOfBuckets = kLastBucketShift - kFirstBucketShift + 1;
+ void PrintKeyAndId(const char* key, int gc_count);
+ // The following functions are excluded from inline to reduce the overall
+ // binary size of VB. On x64 this save around 80KB.
+ V8_NOINLINE void PrintInstanceTypeJSON(const char* key, int gc_count,
+ const char* name, int index);
+ V8_NOINLINE void DumpInstanceTypeData(std::stringstream& stream,
+ const char* name, int index);
+
int HistogramIndexFromSize(size_t size) {
if (size == 0) return 0;
int idx = static_cast<int>(base::ieee754::log2(static_cast<double>(size))) -
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 252b2fe5e2..f3502568d6 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -84,7 +84,10 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFreeSpace, &VisitFreeSpace);
- table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit);
+ table_.Register(
+ kVisitJSWeakCollection,
+ &FlexibleBodyVisitor<StaticVisitor, JSWeakCollection::BodyDescriptor,
+ int>::Visit);
table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 9393fcc615..d4aa8b2f00 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -107,6 +107,8 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_ARGUMENTS_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_FIXED_ARRAY_ITERATOR_TYPE:
+ case JS_MODULE_NAMESPACE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
@@ -120,6 +122,43 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
+
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+
case JS_PROMISE_TYPE:
case JS_BOUND_FUNCTION_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
diff --git a/deps/v8/src/heap/page-parallel-job.h b/deps/v8/src/heap/page-parallel-job.h
index 440c440b7e..ad1d9b3e30 100644
--- a/deps/v8/src/heap/page-parallel-job.h
+++ b/deps/v8/src/heap/page-parallel-job.h
@@ -103,7 +103,8 @@ class PageParallelJob {
delete main_task;
// Wait for background tasks.
for (int i = 0; i < num_tasks_; i++) {
- if (!cancelable_task_manager_->TryAbort(task_ids[i])) {
+ if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
+ CancelableTaskManager::kTaskAborted) {
pending_tasks_->Wait();
}
}
diff --git a/deps/v8/src/heap/remembered-set.cc b/deps/v8/src/heap/remembered-set.cc
deleted file mode 100644
index c5dab90515..0000000000
--- a/deps/v8/src/heap/remembered-set.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/remembered-set.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/slot-set.h"
-#include "src/heap/spaces.h"
-#include "src/heap/store-buffer.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-template <PointerDirection direction>
-void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
- STATIC_ASSERT(direction == OLD_TO_NEW);
- for (MemoryChunk* chunk : *heap->old_space()) {
- SlotSet* slots = GetSlotSet(chunk);
- if (slots != nullptr) {
- slots->Iterate(
- [heap, chunk](Address addr) {
- Object** slot = reinterpret_cast<Object**>(addr);
- return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
- }
- }
- for (MemoryChunk* chunk : *heap->code_space()) {
- TypedSlotSet* slots = GetTypedSlotSet(chunk);
- if (slots != nullptr) {
- slots->Iterate(
- [heap, chunk](SlotType type, Address host_addr, Address addr) {
- if (Marking::IsBlack(ObjectMarking::MarkBitFrom(host_addr))) {
- return KEEP_SLOT;
- } else {
- return REMOVE_SLOT;
- }
- },
- TypedSlotSet::PREFREE_EMPTY_CHUNKS);
- }
- }
- for (MemoryChunk* chunk : *heap->map_space()) {
- SlotSet* slots = GetSlotSet(chunk);
- if (slots != nullptr) {
- slots->Iterate(
- [heap, chunk](Address addr) {
- Object** slot = reinterpret_cast<Object**>(addr);
- // TODO(mlippautz): In map space all allocations would ideally be
- // map
- // aligned. After establishing this invariant IsValidSlot could just
- // refer to the containing object using alignment and check the mark
- // bits.
- return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
- }
- }
-}
-
-template <PointerDirection direction>
-void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
- Iterate(heap, [heap](Address addr) {
- HeapObject* obj =
- heap->mark_compact_collector()->FindBlackObjectBySlotSlow(addr);
- if (obj == nullptr) {
- // The slot is in dead object.
- MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap, addr);
- AllocationSpace owner = chunk->owner()->identity();
- // The old to old remembered set should not have dead slots.
- CHECK_NE(direction, OLD_TO_OLD);
- // The old to new remembered set is allowed to have slots in dead
- // objects only in map and large object space because these space
- // cannot have raw untagged pointers.
- CHECK(owner == MAP_SPACE || owner == LO_SPACE);
- } else {
- int offset = static_cast<int>(addr - obj->address());
- CHECK(obj->IsValidSlot(offset));
- }
- return KEEP_SLOT;
- });
-}
-
-template <PointerDirection direction>
-bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
- Object** slot) {
- STATIC_ASSERT(direction == OLD_TO_NEW);
- Object* object = *slot;
- if (!heap->InNewSpace(object)) {
- return false;
- }
- HeapObject* heap_object = HeapObject::cast(object);
- // If the target object is not black, the source slot must be part
- // of a non-black (dead) object.
- return Marking::IsBlack(ObjectMarking::MarkBitFrom(heap_object)) &&
- heap->mark_compact_collector()->IsSlotInBlackObject(
- chunk, reinterpret_cast<Address>(slot));
-}
-
-template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
-template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
-template void RememberedSet<OLD_TO_OLD>::VerifyValidSlots(Heap* heap);
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 74791b926b..a625b13dbf 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -45,7 +45,8 @@ class RememberedSet {
// Given a page and a range of slots in that page, this function removes the
// slots from the remembered set.
- static void RemoveRange(MemoryChunk* chunk, Address start, Address end) {
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+ SlotSet::EmptyBucketMode mode) {
SlotSet* slot_set = GetSlotSet(chunk);
if (slot_set != nullptr) {
uintptr_t start_offset = start - chunk->address();
@@ -53,7 +54,7 @@ class RememberedSet {
DCHECK_LT(start_offset, end_offset);
if (end_offset < static_cast<uintptr_t>(Page::kPageSize)) {
slot_set->RemoveRange(static_cast<int>(start_offset),
- static_cast<int>(end_offset));
+ static_cast<int>(end_offset), mode);
} else {
// The large page has multiple slot sets.
// Compute slot set indicies for the range [start_offset, end_offset).
@@ -67,17 +68,17 @@ class RememberedSet {
end_offset - static_cast<uintptr_t>(end_chunk) * Page::kPageSize);
if (start_chunk == end_chunk) {
slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
- offset_in_end_chunk);
+ offset_in_end_chunk, mode);
} else {
// Clear all slots from start_offset to the end of first chunk.
slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
- Page::kPageSize);
+ Page::kPageSize, mode);
// Clear all slots in intermediate chunks.
for (int i = start_chunk + 1; i < end_chunk; i++) {
- slot_set[i].RemoveRange(0, Page::kPageSize);
+ slot_set[i].RemoveRange(0, Page::kPageSize, mode);
}
// Clear slots from the beginning of the last page to end_offset.
- slot_set[end_chunk].RemoveRange(0, offset_in_end_chunk);
+ slot_set[end_chunk].RemoveRange(0, offset_in_end_chunk, mode);
}
}
}
@@ -201,9 +202,7 @@ class RememberedSet {
// slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and
// must be called before sweeping when mark bits are still intact.
- static void ClearInvalidSlots(Heap* heap);
-
- static void VerifyValidSlots(Heap* heap);
+ static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
private:
static SlotSet* GetSlotSet(MemoryChunk* chunk) {
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index fadfccdcc4..f7fbfc1480 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SCAVENGE_JOB_H_
#include "src/cancelable-task.h"
+#include "src/globals.h"
#include "src/heap/gc-tracer.h"
namespace v8 {
@@ -16,7 +17,7 @@ class Isolate;
// This class posts idle tasks and performs scavenges in the idle tasks.
-class ScavengeJob {
+class V8_EXPORT_PRIVATE ScavengeJob {
public:
class IdleTask : public CancelableIdleTask {
public:
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 9671f3615f..4cc215a83e 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -62,10 +62,8 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
}
// static
-template <PromotionMode promotion_mode>
-void StaticScavengeVisitor<promotion_mode>::VisitPointer(Heap* heap,
- HeapObject* obj,
- Object** p) {
+void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
+ Object** p) {
Object* object = *p;
if (!heap->InNewSpace(object)) return;
Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 59d04300e6..cad0e8af25 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -22,7 +22,7 @@ enum LoggingAndProfiling {
enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-template <MarksHandling marks_handling, PromotionMode promotion_mode,
+template <MarksHandling marks_handling,
LoggingAndProfiling logging_and_profiling_mode>
class ScavengingVisitor : public StaticVisitorBase {
public:
@@ -185,8 +185,12 @@ class ScavengingVisitor : public StaticVisitorBase {
if (allocation.To(&target)) {
MigrateObject(heap, object, target, object_size);
- // Update slot to new target.
- *slot = target;
+ // Update slot to new target using CAS. A concurrent sweeper thread my
+ // filter the slot concurrently.
+ HeapObject* old = *slot;
+ base::Release_CompareAndSwap(reinterpret_cast<base::AtomicWord*>(slot),
+ reinterpret_cast<base::AtomicWord>(old),
+ reinterpret_cast<base::AtomicWord>(target));
if (object_contents == POINTER_OBJECT) {
heap->promotion_queue()->insert(
@@ -206,8 +210,7 @@ class ScavengingVisitor : public StaticVisitorBase {
SLOW_DCHECK(object->Size() == object_size);
Heap* heap = map->GetHeap();
- if (!heap->ShouldBePromoted<promotion_mode>(object->address(),
- object_size)) {
+ if (!heap->ShouldBePromoted(object->address(), object_size)) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
@@ -219,9 +222,7 @@ class ScavengingVisitor : public StaticVisitorBase {
object_size)) {
return;
}
- if (promotion_mode == PROMOTE_MARKED) {
- FatalProcessOutOfMemory("Scavenger: promoting marked\n");
- }
+
// If promotion failed, we try to copy the object to the other semi-space
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
@@ -358,21 +359,19 @@ class ScavengingVisitor : public StaticVisitorBase {
static VisitorDispatchTable<ScavengingCallback> table_;
};
-template <MarksHandling marks_handling, PromotionMode promotion_mode,
+template <MarksHandling marks_handling,
LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback> ScavengingVisitor<
- marks_handling, promotion_mode, logging_and_profiling_mode>::table_;
+VisitorDispatchTable<ScavengingCallback>
+ ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
// static
void Scavenger::Initialize() {
- ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
- LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+ ScavengingVisitor<TRANSFER_MARKS,
LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
- LOGGING_AND_PROFILING_ENABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+ ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+ ScavengingVisitor<TRANSFER_MARKS,
LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
}
@@ -397,21 +396,21 @@ void Scavenger::SelectScavengingVisitorsTable() {
if (!heap()->incremental_marking()->IsMarking()) {
if (!logging_and_profiling) {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+ ScavengingVisitor<IGNORE_MARKS,
LOGGING_AND_PROFILING_DISABLED>::GetTable());
} else {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+ ScavengingVisitor<IGNORE_MARKS,
LOGGING_AND_PROFILING_ENABLED>::GetTable());
}
} else {
if (!logging_and_profiling) {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
+ ScavengingVisitor<TRANSFER_MARKS,
LOGGING_AND_PROFILING_DISABLED>::GetTable());
} else {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
+ ScavengingVisitor<TRANSFER_MARKS,
LOGGING_AND_PROFILING_ENABLED>::GetTable());
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index f2213b8a36..54fe6ffdf9 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -63,9 +63,8 @@ class ScavengeVisitor : public ObjectVisitor {
// Helper class for turning the scavenger into an object visitor that is also
// filtering out non-HeapObjects and objects which do not reside in new space.
-template <PromotionMode promotion_mode>
class StaticScavengeVisitor
- : public StaticNewSpaceVisitor<StaticScavengeVisitor<promotion_mode>> {
+ : public StaticNewSpaceVisitor<StaticScavengeVisitor> {
public:
static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
};
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 017667b482..da61052b8a 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -5,6 +5,7 @@
#ifndef V8_SLOT_SET_H
#define V8_SLOT_SET_H
+#include <map>
#include <stack>
#include "src/allocation.h"
@@ -25,7 +26,13 @@ enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
public:
- enum IterationMode { PREFREE_EMPTY_BUCKETS, KEEP_EMPTY_BUCKETS };
+ enum EmptyBucketMode {
+ FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
+ PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot
+ // set, but deallocated on demand by a sweeper
+ // thread.
+ KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
+ };
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
@@ -76,7 +83,7 @@ class SlotSet : public Malloced {
// The slot offsets specify a range of slots at addresses:
// [page_start_ + start_offset ... page_start_ + end_offset).
- void RemoveRange(int start_offset, int end_offset) {
+ void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
CHECK_LE(end_offset, 1 << kPageSizeBits);
DCHECK_LE(start_offset, end_offset);
int start_bucket, start_cell, start_bit;
@@ -93,12 +100,10 @@ class SlotSet : public Malloced {
int current_cell = start_cell;
ClearCell(current_bucket, current_cell, ~start_mask);
current_cell++;
+ base::AtomicValue<uint32_t>* bucket_ptr = bucket[current_bucket].Value();
if (current_bucket < end_bucket) {
- if (bucket[current_bucket].Value() != nullptr) {
- while (current_cell < kCellsPerBucket) {
- bucket[current_bucket].Value()[current_cell].SetValue(0);
- current_cell++;
- }
+ if (bucket_ptr != nullptr) {
+ ClearBucket(bucket_ptr, current_cell, kCellsPerBucket);
}
// The rest of the current bucket is cleared.
// Move on to the next bucket.
@@ -108,17 +113,27 @@ class SlotSet : public Malloced {
DCHECK(current_bucket == end_bucket ||
(current_bucket < end_bucket && current_cell == 0));
while (current_bucket < end_bucket) {
- ReleaseBucket(current_bucket);
+ if (mode == PREFREE_EMPTY_BUCKETS) {
+ PreFreeEmptyBucket(current_bucket);
+ } else if (mode == FREE_EMPTY_BUCKETS) {
+ ReleaseBucket(current_bucket);
+ } else {
+ DCHECK(mode == KEEP_EMPTY_BUCKETS);
+ bucket_ptr = bucket[current_bucket].Value();
+ if (bucket_ptr) {
+ ClearBucket(bucket_ptr, 0, kCellsPerBucket);
+ }
+ }
current_bucket++;
}
// All buckets between start_bucket and end_bucket are cleared.
+ bucket_ptr = bucket[current_bucket].Value();
DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
- if (current_bucket == kBuckets ||
- bucket[current_bucket].Value() == nullptr) {
+ if (current_bucket == kBuckets || bucket_ptr == nullptr) {
return;
}
while (current_cell < end_cell) {
- bucket[current_bucket].Value()[current_cell].SetValue(0);
+ bucket_ptr[current_cell].SetValue(0);
current_cell++;
}
// All cells between start_cell and end_cell are cleared.
@@ -148,19 +163,19 @@ class SlotSet : public Malloced {
// else return REMOVE_SLOT;
// });
template <typename Callback>
- int Iterate(Callback callback, IterationMode mode) {
+ int Iterate(Callback callback, EmptyBucketMode mode) {
int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
- if (bucket[bucket_index].Value() != nullptr) {
+ base::AtomicValue<uint32_t>* current_bucket =
+ bucket[bucket_index].Value();
+ if (current_bucket != nullptr) {
int in_bucket_count = 0;
- base::AtomicValue<uint32_t>* current_bucket =
- bucket[bucket_index].Value();
int cell_offset = bucket_index * kBitsPerBucket;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
if (current_bucket[i].Value()) {
uint32_t cell = current_bucket[i].Value();
uint32_t old_cell = cell;
- uint32_t new_cell = cell;
+ uint32_t mask = 0;
while (cell) {
int bit_offset = base::bits::CountTrailingZeros32(cell);
uint32_t bit_mask = 1u << bit_offset;
@@ -168,10 +183,11 @@ class SlotSet : public Malloced {
if (callback(page_start_ + slot) == KEEP_SLOT) {
++in_bucket_count;
} else {
- new_cell ^= bit_mask;
+ mask |= bit_mask;
}
cell ^= bit_mask;
}
+ uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) {
while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
// If TrySetValue fails, the cell must have changed. We just
@@ -180,17 +196,13 @@ class SlotSet : public Malloced {
// method will only be called on the main thread and filtering
// threads will only remove slots.
old_cell = current_bucket[i].Value();
- new_cell &= old_cell;
+ new_cell = old_cell & ~mask;
}
}
}
}
if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
- base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
- base::AtomicValue<uint32_t>* bucket_ptr =
- bucket[bucket_index].Value();
- to_be_freed_buckets_.push(bucket_ptr);
- bucket[bucket_index].SetValue(nullptr);
+ PreFreeEmptyBucket(bucket_index);
}
new_count += in_bucket_count;
}
@@ -226,6 +238,26 @@ class SlotSet : public Malloced {
return result;
}
+ void ClearBucket(base::AtomicValue<uint32_t>* bucket, int start_cell,
+ int end_cell) {
+ DCHECK_GE(start_cell, 0);
+ DCHECK_LE(end_cell, kCellsPerBucket);
+ int current_cell = start_cell;
+ while (current_cell < kCellsPerBucket) {
+ bucket[current_cell].SetValue(0);
+ current_cell++;
+ }
+ }
+
+ void PreFreeEmptyBucket(int bucket_index) {
+ base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
+ if (bucket_ptr != nullptr) {
+ base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+ to_be_freed_buckets_.push(bucket_ptr);
+ bucket[bucket_index].SetValue(nullptr);
+ }
+ }
+
void ReleaseBucket(int bucket_index) {
DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
bucket[bucket_index].SetValue(nullptr);
@@ -429,6 +461,28 @@ class TypedSlotSet {
}
}
+ void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
+ Chunk* chunk = chunk_.Value();
+ while (chunk != nullptr) {
+ TypedSlot* buffer = chunk->buffer.Value();
+ int count = chunk->count.Value();
+ for (int i = 0; i < count; i++) {
+ uint32_t host_offset = buffer[i].host_offset();
+ std::map<uint32_t, uint32_t>::iterator upper_bound =
+ invalid_ranges.upper_bound(host_offset);
+ if (upper_bound == invalid_ranges.begin()) continue;
+ // upper_bounds points to the invalid range after the given slot. Hence,
+ // we have to go to the previous element.
+ upper_bound--;
+ DCHECK_LE(upper_bound->first, host_offset);
+ if (upper_bound->second > host_offset) {
+ buffer[i].Clear();
+ }
+ }
+ chunk = chunk->next.Value();
+ }
+ }
+
private:
static const int kInitialBufferSize = 100;
static const int kMaxBufferSize = 16 * KB;
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 314d22f9a6..f3f9215f3d 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -203,14 +203,15 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
return page;
}
-Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
+Page* Page::ConvertNewToOld(Page* old_page) {
+ OldSpace* old_space = old_page->heap()->old_space();
DCHECK(old_page->InNewSpace());
- old_page->set_owner(new_owner);
+ old_page->set_owner(old_space);
old_page->SetFlags(0, ~0);
- new_owner->AccountCommitted(old_page->size());
+ old_space->AccountCommitted(old_page->size());
Page* new_page = Page::Initialize<kDoNotFreeMemory>(
- old_page->heap(), old_page, NOT_EXECUTABLE, new_owner);
- new_page->InsertAfter(new_owner->anchor()->prev_page());
+ old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
+ new_page->InsertAfter(old_space->anchor()->prev_page());
return new_page;
}
@@ -279,6 +280,7 @@ intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
added += category->available();
category->Relink();
});
+ DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
return added;
}
@@ -597,8 +599,7 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
return static_cast<LargePage*>(chunk);
}
-
-intptr_t LargeObjectSpace::Available() {
+size_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->memory_allocator()->Available());
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index c2043ed902..e0e6d12fda 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -107,7 +107,7 @@ bool CodeRange::SetUp(size_t requested) {
}
const size_t reserved_area =
- kReservedCodeRangePages * base::OS::CommitPageSize();
+ kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
if (requested < (kMaximalCodeRangeSize - reserved_area))
requested += reserved_area;
@@ -294,8 +294,8 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
highest_ever_allocated_(reinterpret_cast<void*>(0)),
unmapper_(this) {}
-bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
- intptr_t code_range_size) {
+bool MemoryAllocator::SetUp(size_t capacity, size_t capacity_executable,
+ size_t code_range_size) {
capacity_ = RoundUp(capacity, Page::kPageSize);
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
DCHECK_GE(capacity_, capacity_executable_);
@@ -304,23 +304,17 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
size_executable_ = 0;
code_range_ = new CodeRange(isolate_);
- if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
+ if (!code_range_->SetUp(code_range_size)) return false;
return true;
}
void MemoryAllocator::TearDown() {
- unmapper()->WaitUntilCompleted();
-
- MemoryChunk* chunk = nullptr;
- while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
- FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
- NOT_EXECUTABLE);
- }
+ unmapper()->TearDown();
// Check that spaces were torn down before MemoryAllocator.
- DCHECK_EQ(size_.Value(), 0);
+ DCHECK_EQ(size_.Value(), 0u);
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK(size_executable_ == 0);
capacity_ = 0;
@@ -384,6 +378,13 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
}
+void MemoryAllocator::Unmapper::TearDown() {
+ WaitUntilCompleted();
+ ReconsiderDelayedChunks();
+ CHECK(delayed_regular_chunks_.empty());
+ PerformFreeMemoryOnQueuedChunks();
+}
+
void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
// Move constructed, so the permanent list should be empty.
@@ -395,11 +396,12 @@ void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
- // We cannot free memory chunks in new space while the sweeper is running
- // since a sweeper thread might be stuck right before trying to lock the
- // corresponding page.
- return !chunk->InNewSpace() || (mc == nullptr) || !FLAG_concurrent_sweeping ||
- mc->sweeper().IsSweepingCompleted();
+ // We cannot free a memory chunk in new space while the sweeper is running
+ // because the memory chunk can be in the queue of a sweeper task.
+ // Chunks in old generation are unmapped if they are empty.
+ DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
+ return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping ||
+ mc->sweeper().IsSweepingCompleted(NEW_SPACE);
}
bool MemoryAllocator::CommitMemory(Address base, size_t size,
@@ -478,6 +480,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
// Failed to commit the body. Release the mapping and any partially
// commited regions inside it.
reservation.Release();
+ size_.Decrement(reserve_size);
return NULL;
}
@@ -513,7 +516,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->typed_old_to_new_slots_.SetValue(nullptr);
chunk->typed_old_to_old_slots_ = nullptr;
chunk->skip_list_ = nullptr;
- chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
@@ -525,7 +527,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
chunk->local_tracker_ = nullptr;
- chunk->black_area_end_marker_map_ = nullptr;
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
@@ -547,9 +548,9 @@ bool MemoryChunk::CommitArea(size_t requested) {
IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
size_t header_size = area_start() - address() - guard_size;
size_t commit_size =
- RoundUp(header_size + requested, base::OS::CommitPageSize());
+ RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize());
size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
- base::OS::CommitPageSize());
+ MemoryAllocator::GetCommitPageSize());
if (commit_size > committed_size) {
// Commit size should be less or equal than the reserved size.
@@ -617,8 +618,8 @@ void MemoryChunk::Unlink() {
}
void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
- DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
- DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
+ DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
+ DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
Address free_start = chunk->area_end_ - bytes_to_shrink;
// Don't adjust the size of the page. The area is just uncomitted but not
// released.
@@ -628,22 +629,22 @@ void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
if (chunk->reservation_.IsReserved())
chunk->reservation_.Guard(chunk->area_end_);
else
- base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
+ base::OS::Guard(chunk->area_end_, GetCommitPageSize());
}
}
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
- intptr_t commit_area_size,
+MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
+ size_t commit_area_size,
Executability executable,
Space* owner) {
- DCHECK(commit_area_size <= reserve_area_size);
+ DCHECK_LE(commit_area_size, reserve_area_size);
size_t chunk_size;
Heap* heap = isolate_->heap();
- Address base = NULL;
+ Address base = nullptr;
base::VirtualMemory reservation;
- Address area_start = NULL;
- Address area_end = NULL;
+ Address area_start = nullptr;
+ Address area_end = nullptr;
//
// MemoryChunk layout:
@@ -677,7 +678,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
- base::OS::CommitPageSize()) +
+ GetCommitPageSize()) +
CodePageGuardSize();
// Check executable memory limit.
@@ -689,7 +690,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
// Size of header (not executable) plus area (executable).
size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
- base::OS::CommitPageSize());
+ GetCommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
#ifdef V8_TARGET_ARCH_MIPS64
@@ -725,10 +726,10 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
area_end = area_start + commit_area_size;
} else {
chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
- base::OS::CommitPageSize());
+ GetCommitPageSize());
size_t commit_size =
RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
- base::OS::CommitPageSize());
+ GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, &reservation);
@@ -777,6 +778,14 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0;
}
+size_t Page::AvailableInFreeList() {
+ size_t sum = 0;
+ ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
+ sum += category->available();
+ });
+ return sum;
+}
+
size_t Page::ShrinkToHighWaterMark() {
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
@@ -805,7 +814,7 @@ size_t Page::ShrinkToHighWaterMark() {
size_t unused = RoundDown(
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
- base::OS::CommitPageSize());
+ MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
@@ -914,11 +923,11 @@ template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
MemoryChunk* chunk);
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
-Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
+Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
Executability executable) {
MemoryChunk* chunk = nullptr;
if (alloc_mode == kPooled) {
- DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
+ DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
DCHECK_EQ(executable, NOT_EXECUTABLE);
chunk = AllocatePagePooled(owner);
}
@@ -931,15 +940,15 @@ Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- intptr_t size, PagedSpace* owner, Executability executable);
+ size_t size, PagedSpace* owner, Executability executable);
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- intptr_t size, SemiSpace* owner, Executability executable);
+ size_t size, SemiSpace* owner, Executability executable);
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- intptr_t size, SemiSpace* owner, Executability executable);
+ size_t size, SemiSpace* owner, Executability executable);
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
+LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
@@ -1000,30 +1009,35 @@ void MemoryAllocator::ReportStatistics() {
}
#endif
-
-int MemoryAllocator::CodePageGuardStartOffset() {
+size_t MemoryAllocator::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
- return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
+ return RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
}
-
-int MemoryAllocator::CodePageGuardSize() {
- return static_cast<int>(base::OS::CommitPageSize());
+size_t MemoryAllocator::CodePageGuardSize() {
+ return static_cast<int>(GetCommitPageSize());
}
-
-int MemoryAllocator::CodePageAreaStartOffset() {
+size_t MemoryAllocator::CodePageAreaStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return CodePageGuardStartOffset() + CodePageGuardSize();
}
-
-int MemoryAllocator::CodePageAreaEndOffset() {
+size_t MemoryAllocator::CodePageAreaEndOffset() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
- return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
+ return Page::kPageSize - static_cast<int>(GetCommitPageSize());
+}
+
+intptr_t MemoryAllocator::GetCommitPageSize() {
+ if (FLAG_v8_os_page_size != 0) {
+ DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
+ return FLAG_v8_os_page_size * KB;
+ } else {
+ return base::OS::CommitPageSize();
+ }
}
@@ -1250,6 +1264,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
p->set_owner(this);
p->InsertAfter(anchor_.prev_page());
RelinkFreeListCategories(p);
+ DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list());
}
}
@@ -1277,7 +1292,7 @@ Object* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on iterable spaces.
DCHECK(!heap()->mark_compact_collector()->in_use());
- if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
+ if (!Contains(addr)) return Smi::kZero; // Signaling not found.
Page* p = Page::FromAddress(addr);
HeapObjectIterator it(p);
@@ -1288,7 +1303,7 @@ Object* PagedSpace::FindObject(Address addr) {
}
UNREACHABLE();
- return Smi::FromInt(0);
+ return Smi::kZero;
}
void PagedSpace::ShrinkImmortalImmovablePages() {
@@ -1378,12 +1393,6 @@ void PagedSpace::EmptyAllocationInfo() {
if (heap()->incremental_marking()->black_allocation()) {
Page* page = Page::FromAllocationAreaAddress(current_top);
- // We have to remember the end of the current black allocation area if
- // something was allocated in the current bump pointer range.
- if (allocation_info_.original_top() != current_top) {
- Address end_black_area = current_top - kPointerSize;
- page->AddBlackAreaEndMarker(end_black_area);
- }
// Clear the bits in the unused black area.
if (current_top != current_limit) {
@@ -1394,7 +1403,8 @@ void PagedSpace::EmptyAllocationInfo() {
}
SetTopAndLimit(NULL, NULL);
- Free(current_top, static_cast<int>(current_limit - current_top));
+ DCHECK_GE(current_limit, current_top);
+ Free(current_top, current_limit - current_top);
}
void PagedSpace::IncreaseCapacity(size_t bytes) {
@@ -1408,8 +1418,6 @@ void PagedSpace::ReleasePage(Page* page) {
free_list_.EvictFreeListItems(page);
DCHECK(!free_list_.ContainsPageFreeListItems(page));
- page->ReleaseBlackAreaEndMarkerMap();
-
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
allocation_info_.Reset(nullptr, nullptr);
}
@@ -1481,10 +1489,11 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// -----------------------------------------------------------------------------
// NewSpace implementation
-bool NewSpace::SetUp(int initial_semispace_capacity,
- int maximum_semispace_capacity) {
+bool NewSpace::SetUp(size_t initial_semispace_capacity,
+ size_t maximum_semispace_capacity) {
DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
- DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
+ DCHECK(base::bits::IsPowerOfTwo32(
+ static_cast<uint32_t>(maximum_semispace_capacity)));
to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
@@ -1529,9 +1538,9 @@ void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() {
// Double the semispace size but only up to maximum capacity.
DCHECK(TotalCapacity() < MaximumCapacity());
- int new_capacity =
+ size_t new_capacity =
Min(MaximumCapacity(),
- FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
+ static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
if (to_space_.GrowTo(new_capacity)) {
// Only grow from space if we managed to grow to-space.
if (!from_space_.GrowTo(new_capacity)) {
@@ -1549,8 +1558,8 @@ void NewSpace::Grow() {
void NewSpace::Shrink() {
- int new_capacity = Max(InitialTotalCapacity(), 2 * static_cast<int>(Size()));
- int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
+ size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
+ size_t rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
if (rounded_new_capacity < TotalCapacity() &&
to_space_.ShrinkTo(rounded_new_capacity)) {
// Only shrink from-space if we managed to shrink to-space.
@@ -1577,7 +1586,8 @@ bool NewSpace::Rebalance() {
bool SemiSpace::EnsureCurrentCapacity() {
if (is_committed()) {
- const int expected_pages = current_capacity_ / Page::kPageSize;
+ const int expected_pages =
+ static_cast<int>(current_capacity_ / Page::kPageSize);
int actual_pages = 0;
Page* current_page = anchor()->next_page();
while (current_page != anchor()) {
@@ -1604,7 +1614,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
current_page->SetFlags(anchor()->prev_page()->GetFlags(),
Page::kCopyAllFlags);
heap()->CreateFillerObjectAt(current_page->area_start(),
- current_page->area_size(),
+ static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo);
}
}
@@ -1878,8 +1888,8 @@ void NewSpace::Verify() {
// -----------------------------------------------------------------------------
// SemiSpace implementation
-void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
- DCHECK_GE(maximum_capacity, Page::kPageSize);
+void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
+ DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
current_capacity_ = minimum_capacity_;
maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
@@ -1902,7 +1912,7 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() {
DCHECK(!is_committed());
Page* current = anchor();
- const int num_pages = current_capacity_ / Page::kPageSize;
+ const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
@@ -1948,17 +1958,16 @@ size_t SemiSpace::CommittedPhysicalMemory() {
return size;
}
-
-bool SemiSpace::GrowTo(int new_capacity) {
+bool SemiSpace::GrowTo(size_t new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
- DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+ DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
- const int delta = new_capacity - current_capacity_;
+ const size_t delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
- const int delta_pages = delta / Page::kPageSize;
+ const int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* last_page = anchor()->prev_page();
DCHECK_NE(last_page, anchor());
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
@@ -1993,14 +2002,14 @@ void SemiSpace::RewindPages(Page* start, int num_pages) {
}
}
-bool SemiSpace::ShrinkTo(int new_capacity) {
- DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+bool SemiSpace::ShrinkTo(size_t new_capacity) {
+ DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
DCHECK_GE(new_capacity, minimum_capacity_);
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
- const int delta = current_capacity_ - new_capacity;
+ const size_t delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
- int delta_pages = delta / Page::kPageSize;
+ int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* new_last_page;
Page* last_page;
while (delta_pages > 0) {
@@ -2343,7 +2352,7 @@ void FreeListCategory::Reset() {
available_ = 0;
}
-FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
+FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* node = top();
@@ -2354,8 +2363,8 @@ FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
return node;
}
-FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
- int* node_size) {
+FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
+ size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* node = PickNodeFromList(node_size);
@@ -2367,15 +2376,16 @@ FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
return node;
}
-FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
- int* node_size) {
+FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
+ size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* prev_non_evac_node = nullptr;
for (FreeSpace* cur_node = top(); cur_node != nullptr;
cur_node = cur_node->next()) {
- int size = cur_node->size();
+ size_t size = cur_node->size();
if (size >= minimum_size) {
+ DCHECK_GE(available_, size);
available_ -= size;
if (cur_node == top()) {
set_top(cur_node->next());
@@ -2392,7 +2402,7 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
return nullptr;
}
-bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
+bool FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
FreeMode mode) {
if (!page()->CanAllocate()) return false;
@@ -2425,7 +2435,7 @@ void FreeListCategory::Relink() {
}
void FreeListCategory::Invalidate() {
- page()->add_available_in_free_list(-available());
+ page()->remove_available_in_free_list(available());
Reset();
type_ = kInvalidCategory;
}
@@ -2447,10 +2457,10 @@ void FreeList::Reset() {
ResetStats();
}
-int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
+size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
if (size_in_bytes == 0) return 0;
- owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
+ owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
Page* page = Page::FromAddress(start);
@@ -2469,10 +2479,11 @@ int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
page->add_available_in_free_list(size_in_bytes);
}
+ DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
return 0;
}
-FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
FreeListCategoryIterator it(this, type);
FreeSpace* node = nullptr;
while (it.HasNext()) {
@@ -2480,7 +2491,7 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
node = current->PickNodeFromList(node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
- ->add_available_in_free_list(-(*node_size));
+ ->remove_available_in_free_list(*node_size);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
@@ -2489,21 +2500,22 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
return node;
}
-FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
- int minimum_size) {
+FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
+ size_t minimum_size) {
if (categories_[type] == nullptr) return nullptr;
FreeSpace* node =
categories_[type]->TryPickNodeFromList(minimum_size, node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
- ->add_available_in_free_list(-(*node_size));
+ ->remove_available_in_free_list(*node_size);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
}
return node;
}
FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
- int* node_size, int minimum_size) {
+ size_t* node_size,
+ size_t minimum_size) {
FreeListCategoryIterator it(this, type);
FreeSpace* node = nullptr;
while (it.HasNext()) {
@@ -2511,7 +2523,7 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
node = current->SearchForNodeInList(minimum_size, node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
- ->add_available_in_free_list(-(*node_size));
+ ->remove_available_in_free_list(*node_size);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
@@ -2522,7 +2534,7 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
return node;
}
-FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
FreeSpace* node = nullptr;
// First try the allocation fast path: try to allocate the minimum element
@@ -2559,12 +2571,19 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
// or allocate a new page before retrying.
-HeapObject* FreeList::Allocate(int size_in_bytes) {
- DCHECK(0 < size_in_bytes);
+HeapObject* FreeList::Allocate(size_t size_in_bytes) {
DCHECK(size_in_bytes <= kMaxBlockSize);
DCHECK(IsAligned(size_in_bytes, kPointerSize));
+ DCHECK_LE(owner_->top(), owner_->limit());
+#ifdef DEBUG
+ if (owner_->top() != owner_->limit()) {
+ DCHECK_EQ(Page::FromAddress(owner_->top()),
+ Page::FromAddress(owner_->limit() - 1));
+ }
+#endif
// Don't free list allocate if there is linear space available.
- DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
+ DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
+ size_in_bytes);
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
@@ -2574,15 +2593,15 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
Heap::kNoGCFlags, kNoGCCallbackFlags);
- int new_node_size = 0;
+ size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == nullptr) return nullptr;
- int bytes_left = new_node_size - size_in_bytes;
- DCHECK(bytes_left >= 0);
+ DCHECK_GE(new_node_size, size_in_bytes);
+ size_t bytes_left = new_node_size - size_in_bytes;
#ifdef DEBUG
- for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+ for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
reinterpret_cast<Object**>(new_node->address())[i] =
Smi::FromInt(kCodeZapValue);
}
@@ -2593,11 +2612,11 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
- const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+ const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
- owner_->Allocate(new_node_size);
+ owner_->Allocate(static_cast<int>(new_node_size));
if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just
@@ -2608,17 +2627,17 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
} else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking) {
- int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+ size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
+ DCHECK_GE(new_node_size, size_in_bytes + linear_size);
owner_->Free(new_node->address() + size_in_bytes + linear_size,
new_node_size - size_in_bytes - linear_size);
owner_->SetAllocationInfo(
new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
} else {
- DCHECK(bytes_left >= 0);
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
@@ -2628,8 +2647,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
return new_node;
}
-intptr_t FreeList::EvictFreeListItems(Page* page) {
- intptr_t sum = 0;
+size_t FreeList::EvictFreeListItems(Page* page) {
+ size_t sum = 0;
page->ForAllFreeListCategories(
[this, &sum, page](FreeListCategory* category) {
DCHECK_EQ(this, category->owner());
@@ -2703,8 +2722,8 @@ void FreeList::PrintCategories(FreeListCategoryType type) {
#ifdef DEBUG
-intptr_t FreeListCategory::SumFreeList() {
- intptr_t sum = 0;
+size_t FreeListCategory::SumFreeList() {
+ size_t sum = 0;
FreeSpace* cur = top();
while (cur != NULL) {
DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
@@ -2741,8 +2760,8 @@ bool FreeList::IsVeryLong() {
// This can take a very long time because it is linear in the number of entries
// on the free list, so it should not be called if FreeListLength returns
// kVeryLongFreeList.
-intptr_t FreeList::SumFreeLists() {
- intptr_t sum = 0;
+size_t FreeList::SumFreeLists() {
+ size_t sum = 0;
ForAllFreeListCategories(
[&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
return sum;
@@ -2762,13 +2781,10 @@ void PagedSpace::PrepareForMarkCompact() {
free_list_.Reset();
}
-
-intptr_t PagedSpace::SizeOfObjects() {
- const intptr_t size = Size() - (limit() - top());
+size_t PagedSpace::SizeOfObjects() {
CHECK_GE(limit(), top());
- CHECK_GE(size, 0);
- USE(size);
- return size;
+ DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
+ return Size() - (limit() - top());
}
@@ -2781,24 +2797,12 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
// Each page may have a small free space that is not tracked by a free list.
// Update the maps for those free space objects.
for (Page* page : *this) {
- int size = static_cast<int>(page->wasted_memory());
+ size_t size = page->wasted_memory();
if (size == 0) continue;
+ DCHECK_GE(static_cast<size_t>(Page::kPageSize), size);
Address address = page->OffsetToAddress(Page::kPageSize - size);
- heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
- }
-}
-
-
-void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
- if (allocation_info_.top() >= allocation_info_.limit()) return;
-
- if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
- // Create filler object to keep page iterable if it was iterable.
- int remaining =
- static_cast<int>(allocation_info_.limit() - allocation_info_.top());
- heap()->CreateFillerObjectAt(allocation_info_.top(), remaining,
+ heap()->CreateFillerObjectAt(address, static_cast<int>(size),
ClearRecordedSlots::kNo);
- allocation_info_.Reset(nullptr, nullptr);
}
}
@@ -2826,8 +2830,8 @@ HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
return nullptr;
}
-
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+ DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
// Allocation in this space has failed.
@@ -2840,7 +2844,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
RefillFreeList();
// Retry the free list allocation.
- HeapObject* object = free_list_.Allocate(size_in_bytes);
+ HeapObject* object =
+ free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
@@ -2848,15 +2853,15 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
- object = free_list_.Allocate(size_in_bytes);
+ object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
}
}
- if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
+ if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
- (size_in_bytes <= free_list_.Available()));
- return free_list_.Allocate(size_in_bytes);
+ (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
+ return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
}
// If sweeper threads are active, wait for them at that point and steal
@@ -2897,7 +2902,7 @@ Address LargePage::GetAddressToShrink() {
return 0;
}
size_t used_size = RoundUp((object->address() - address()) + object->Size(),
- base::OS::CommitPageSize());
+ MemoryAllocator::GetCommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
@@ -2905,8 +2910,10 @@ Address LargePage::GetAddressToShrink() {
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end());
- RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end());
+ RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
+ SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
}
@@ -2967,14 +2974,15 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (!heap()->CanExpandOldGeneration(object_size)) {
+ if (!heap()->CanExpandOldGeneration(object_size) ||
+ !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
return AllocationResult::Retry(identity());
}
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
if (page == NULL) return AllocationResult::Retry(identity());
- DCHECK(page->area_size() >= object_size);
+ DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
size_ += static_cast<int>(page->size());
AccountCommitted(page->size());
@@ -2993,7 +3001,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
// We only need to do this in debug builds or if verify_heap is on.
reinterpret_cast<Object**>(object->address())[0] =
heap()->fixed_array_map();
- reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+ reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
}
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
@@ -3022,7 +3030,7 @@ Object* LargeObjectSpace::FindObject(Address a) {
if (page != NULL) {
return page->GetObject();
}
- return Smi::FromInt(0); // Signaling not found.
+ return Smi::kZero; // Signaling not found.
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 732ba7ead5..f5701adc69 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -135,7 +135,8 @@ enum FreeMode { kLinkCategory, kDoNotLinkCategory };
class FreeListCategory {
public:
static const int kSize = kIntSize + // FreeListCategoryType type_
- kIntSize + // int available_
+ kIntSize + // padding for type_
+ kSizetSize + // size_t available_
kPointerSize + // FreeSpace* top_
kPointerSize + // FreeListCategory* prev_
kPointerSize; // FreeListCategory* next_
@@ -167,28 +168,28 @@ class FreeListCategory {
// category is currently unlinked.
void Relink();
- bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
+ bool Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
// Picks a node from the list and stores its size in |node_size|. Returns
// nullptr if the category is empty.
- FreeSpace* PickNodeFromList(int* node_size);
+ FreeSpace* PickNodeFromList(size_t* node_size);
// Performs a single try to pick a node of at least |minimum_size| from the
// category. Stores the actual size in |node_size|. Returns nullptr if no
// node is found.
- FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
+ FreeSpace* TryPickNodeFromList(size_t minimum_size, size_t* node_size);
// Picks a node of at least |minimum_size| from the category. Stores the
// actual size in |node_size|. Returns nullptr if no node is found.
- FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
+ FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
inline FreeList* owner();
inline bool is_linked();
bool is_empty() { return top() == nullptr; }
- int available() const { return available_; }
+ size_t available() const { return available_; }
#ifdef DEBUG
- intptr_t SumFreeList();
+ size_t SumFreeList();
int FreeListLength();
#endif
@@ -211,7 +212,7 @@ class FreeListCategory {
// |available_|: Total available bytes in all blocks of this free list
// category.
- int available_;
+ size_t available_;
// |top_|: Points to the top FreeSpace* in the free list category.
FreeSpace* top_;
@@ -310,11 +311,6 @@ class MemoryChunk {
kSweepingInProgress,
};
- // Every n write barrier invocations we go to runtime even though
- // we could have handled it in generated code. This lets us check
- // whether we have hit the limit and should do some more marking.
- static const int kWriteBarrierCounterGranularity = 500;
-
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -324,36 +320,30 @@ class MemoryChunk {
static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
- static const size_t kWriteBarrierCounterOffset =
- kSizeOffset + kPointerSize // size_t size
- + kIntptrSize // Flags flags_
- + kPointerSize // Address area_start_
- + kPointerSize // Address area_end_
- + 2 * kPointerSize // base::VirtualMemory reservation_
- + kPointerSize // Address owner_
- + kPointerSize // Heap* heap_
- + kIntSize // int progress_bar_
- + kIntSize // int live_bytes_count_
- + kPointerSize // SlotSet* old_to_new_slots_;
- + kPointerSize // SlotSet* old_to_old_slots_;
- + kPointerSize // TypedSlotSet* typed_old_to_new_slots_;
- + kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
- + kPointerSize; // SkipList* skip_list_;
-
static const size_t kMinHeaderSize =
- kWriteBarrierCounterOffset +
- kIntptrSize // intptr_t write_barrier_counter_
- + kPointerSize // AtomicValue high_water_mark_
- + kPointerSize // base::Mutex* mutex_
- + kPointerSize // base::AtomicWord concurrent_sweeping_
- + 2 * kPointerSize // AtomicNumber free-list statistics
- + kPointerSize // AtomicValue next_chunk_
- + kPointerSize // AtomicValue prev_chunk_
+ kSizeOffset + kSizetSize // size_t size
+ + kIntptrSize // Flags flags_
+ + kPointerSize // Address area_start_
+ + kPointerSize // Address area_end_
+ + 2 * kPointerSize // base::VirtualMemory reservation_
+ + kPointerSize // Address owner_
+ + kPointerSize // Heap* heap_
+ + kIntSize // int progress_bar_
+ + kIntSize // int live_bytes_count_
+ + kPointerSize // SlotSet* old_to_new_slots_
+ + kPointerSize // SlotSet* old_to_old_slots_
+ + kPointerSize // TypedSlotSet* typed_old_to_new_slots_
+ + kPointerSize // TypedSlotSet* typed_old_to_old_slots_
+ + kPointerSize // SkipList* skip_list_
+ + kPointerSize // AtomicValue high_water_mark_
+ + kPointerSize // base::Mutex* mutex_
+ + kPointerSize // base::AtomicWord concurrent_sweeping_
+ + 2 * kSizetSize // AtomicNumber free-list statistics
+ + kPointerSize // AtomicValue next_chunk_
+ + kPointerSize // AtomicValue prev_chunk_
// FreeListCategory categories_[kNumberOfCategories]
+ FreeListCategory::kSize * kNumberOfCategories +
- kPointerSize // LocalArrayBufferTracker* local_tracker_
- // std::unordered_set<Address>* black_area_end_marker_map_
- + kPointerSize;
+ kPointerSize; // LocalArrayBufferTracker* local_tracker_
// We add some more space to the computed header size to amount for missing
// alignment requirements in our computation.
@@ -421,6 +411,10 @@ class MemoryChunk {
return concurrent_sweeping_;
}
+ bool SweepingDone() {
+ return concurrent_sweeping_state().Value() == kSweepingDone;
+ }
+
// Manage live byte count, i.e., count of bytes in black objects.
inline void ResetLiveBytes();
inline void IncrementLiveBytes(int by);
@@ -436,14 +430,6 @@ class MemoryChunk {
live_byte_count_ = live_bytes;
}
- int write_barrier_counter() {
- return static_cast<int>(write_barrier_counter_);
- }
-
- void set_write_barrier_counter(int counter) {
- write_barrier_counter_ = counter;
- }
-
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
@@ -465,7 +451,7 @@ class MemoryChunk {
V8_EXPORT_PRIVATE void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
- void AllocateOldToOldSlots();
+ V8_EXPORT_PRIVATE void AllocateOldToOldSlots();
void ReleaseOldToOldSlots();
void AllocateTypedOldToNewSlots();
void ReleaseTypedOldToNewSlots();
@@ -476,7 +462,7 @@ class MemoryChunk {
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
- int area_size() { return static_cast<int>(area_end() - area_start()); }
+ size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
bool CommitArea(size_t requested);
@@ -588,33 +574,6 @@ class MemoryChunk {
void InsertAfter(MemoryChunk* other);
void Unlink();
- void ReleaseBlackAreaEndMarkerMap() {
- if (black_area_end_marker_map_) {
- delete black_area_end_marker_map_;
- black_area_end_marker_map_ = nullptr;
- }
- }
-
- bool IsBlackAreaEndMarker(Address address) {
- if (black_area_end_marker_map_) {
- return black_area_end_marker_map_->find(address) !=
- black_area_end_marker_map_->end();
- }
- return false;
- }
-
- void AddBlackAreaEndMarker(Address address) {
- if (!black_area_end_marker_map_) {
- black_area_end_marker_map_ = new std::unordered_set<Address>();
- }
- auto ret = black_area_end_marker_map_->insert(address);
- USE(ret);
- // Check that we inserted a new black area end marker.
- DCHECK(ret.second);
- }
-
- bool HasBlackAreas() { return black_area_end_marker_map_ != nullptr; }
-
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -660,8 +619,6 @@ class MemoryChunk {
SkipList* skip_list_;
- intptr_t write_barrier_counter_;
-
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
base::AtomicValue<intptr_t> high_water_mark_;
@@ -683,9 +640,6 @@ class MemoryChunk {
LocalArrayBufferTracker* local_tracker_;
- // Stores the end addresses of black areas.
- std::unordered_set<Address>* black_area_end_marker_map_;
-
private:
void InitializeReservedMemory() { reservation_.Reset(); }
@@ -713,7 +667,7 @@ class Page : public MemoryChunk {
static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
+ static inline Page* ConvertNewToOld(Page* old_page);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
@@ -768,13 +722,10 @@ class Page : public MemoryChunk {
}
// Returns the offset of a given address to this page.
- inline int Offset(Address a) {
- int offset = static_cast<int>(a - address());
- return offset;
- }
+ inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
// Returns the address for a given offset to the this page.
- Address OffsetToAddress(int offset) {
+ Address OffsetToAddress(size_t offset) {
DCHECK_PAGE_OFFSET(offset);
return address() + offset;
}
@@ -788,15 +739,13 @@ class Page : public MemoryChunk {
DCHECK(SweepingDone());
}
- bool SweepingDone() {
- return concurrent_sweeping_state().Value() == kSweepingDone;
- }
-
void ResetFreeListStatistics();
- int LiveBytesFromFreeList() {
- return static_cast<int>(area_size() - wasted_memory() -
- available_in_free_list());
+ size_t AvailableInFreeList();
+
+ size_t LiveBytesFromFreeList() {
+ DCHECK_GE(area_size(), wasted_memory() + available_in_free_list());
+ return area_size() - wasted_memory() - available_in_free_list();
}
FreeListCategory* free_list_category(FreeListCategoryType type) {
@@ -805,12 +754,18 @@ class Page : public MemoryChunk {
bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
- intptr_t wasted_memory() { return wasted_memory_.Value(); }
- void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
- intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
- void add_available_in_free_list(intptr_t available) {
+ size_t wasted_memory() { return wasted_memory_.Value(); }
+ void add_wasted_memory(size_t waste) { wasted_memory_.Increment(waste); }
+ size_t available_in_free_list() { return available_in_free_list_.Value(); }
+ void add_available_in_free_list(size_t available) {
+ DCHECK_LE(available, area_size());
available_in_free_list_.Increment(available);
}
+ void remove_available_in_free_list(size_t available) {
+ DCHECK_LE(available, area_size());
+ DCHECK_GE(available_in_free_list(), available);
+ available_in_free_list_.Decrement(available);
+ }
size_t ShrinkToHighWaterMark();
@@ -914,17 +869,17 @@ class Space : public Malloced {
virtual size_t MaximumCommittedMemory() { return max_committed_; }
// Returns allocated size.
- virtual intptr_t Size() = 0;
+ virtual size_t Size() = 0;
// Returns size of objects. Can differ from the allocated size
// (e.g. see LargeObjectSpace).
- virtual intptr_t SizeOfObjects() { return Size(); }
+ virtual size_t SizeOfObjects() { return Size(); }
// Approximate amount of physical memory committed for this space.
virtual size_t CommittedPhysicalMemory() = 0;
// Return the available bytes without growing.
- virtual intptr_t Available() = 0;
+ virtual size_t Available() = 0;
virtual int RoundSizeDownToObjectAlignment(int size) {
if (id_ == CODE_SPACE) {
@@ -973,8 +928,6 @@ class Space : public Malloced {
class MemoryChunkValidator {
// Computed offsets should match the compiler generated ones.
STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
- STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
- offsetof(MemoryChunk, write_barrier_counter_));
// Validate our estimates on the header size.
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
@@ -1137,7 +1090,7 @@ class SkipList {
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
-// allocator allocated and deallocates pages for the paged heap spaces and large
+// allocator allocates and deallocates pages for the paged heap spaces and large
// pages for large object space.
class MemoryAllocator {
public:
@@ -1180,6 +1133,7 @@ class MemoryAllocator {
void FreeQueuedChunks();
bool WaitUntilCompleted();
+ void TearDown();
private:
enum ChunkQueueType {
@@ -1237,30 +1191,32 @@ class MemoryAllocator {
kPooledAndQueue,
};
- static int CodePageGuardStartOffset();
+ static size_t CodePageGuardStartOffset();
- static int CodePageGuardSize();
+ static size_t CodePageGuardSize();
- static int CodePageAreaStartOffset();
+ static size_t CodePageAreaStartOffset();
- static int CodePageAreaEndOffset();
+ static size_t CodePageAreaEndOffset();
- static int CodePageAreaSize() {
+ static size_t CodePageAreaSize() {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
- static int PageAreaSize(AllocationSpace space) {
+ static size_t PageAreaSize(AllocationSpace space) {
DCHECK_NE(LO_SPACE, space);
return (space == CODE_SPACE) ? CodePageAreaSize()
: Page::kAllocatableMemory;
}
+ static intptr_t GetCommitPageSize();
+
explicit MemoryAllocator(Isolate* isolate);
// Initializes its internal bookkeeping structures.
// Max capacity of the total space and executable memory limit.
- bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
- intptr_t code_range_size);
+ bool SetUp(size_t max_capacity, size_t capacity_executable,
+ size_t code_range_size);
void TearDown();
@@ -1269,9 +1225,9 @@ class MemoryAllocator {
// should be tried first.
template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
typename SpaceType>
- Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
+ Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
- LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
+ LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable);
template <MemoryAllocator::FreeMode mode = kFull>
@@ -1313,8 +1269,7 @@ class MemoryAllocator {
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
- MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
- intptr_t commit_area_size,
+ MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space);
void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
@@ -1690,7 +1645,7 @@ class FreeList {
public:
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
- static inline int GuaranteedAllocatable(int maximum_freed) {
+ static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
if (maximum_freed <= kTiniestListMax) {
// Since we are not iterating over all list entries, we cannot guarantee
// that we can find the maximum freed block in that free list.
@@ -1715,12 +1670,12 @@ class FreeList {
// was too small. Bookkeeping information will be written to the block, i.e.,
// its contents will be destroyed. The start address should be word aligned,
// and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes, FreeMode mode);
+ size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
// Allocate a block of size {size_in_bytes} from the free list. The block is
// unitialized. A failure is returned if no block is available. The size
// should be a non-zero multiple of the word size.
- MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+ MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes);
// Clear the free list.
void Reset();
@@ -1732,8 +1687,8 @@ class FreeList {
}
// Return the number of bytes available on the free list.
- intptr_t Available() {
- intptr_t available = 0;
+ size_t Available() {
+ size_t available = 0;
ForAllFreeListCategories([&available](FreeListCategory* category) {
available += category->available();
});
@@ -1751,11 +1706,11 @@ class FreeList {
// Used after booting the VM.
void RepairLists(Heap* heap);
- intptr_t EvictFreeListItems(Page* page);
+ size_t EvictFreeListItems(Page* page);
bool ContainsPageFreeListItems(Page* page);
PagedSpace* owner() { return owner_; }
- intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
+ size_t wasted_bytes() { return wasted_bytes_.Value(); }
template <typename Callback>
void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
@@ -1779,7 +1734,7 @@ class FreeList {
void PrintCategories(FreeListCategoryType type);
#ifdef DEBUG
- intptr_t SumFreeLists();
+ size_t SumFreeLists();
bool IsVeryLong();
#endif
@@ -1803,33 +1758,33 @@ class FreeList {
};
// The size range of blocks, in bytes.
- static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kAllocatableMemory;
+ static const size_t kMinBlockSize = 3 * kPointerSize;
+ static const size_t kMaxBlockSize = Page::kAllocatableMemory;
- static const int kTiniestListMax = 0xa * kPointerSize;
- static const int kTinyListMax = 0x1f * kPointerSize;
- static const int kSmallListMax = 0xff * kPointerSize;
- static const int kMediumListMax = 0x7ff * kPointerSize;
- static const int kLargeListMax = 0x3fff * kPointerSize;
- static const int kTinyAllocationMax = kTiniestListMax;
- static const int kSmallAllocationMax = kTinyListMax;
- static const int kMediumAllocationMax = kSmallListMax;
- static const int kLargeAllocationMax = kMediumListMax;
+ static const size_t kTiniestListMax = 0xa * kPointerSize;
+ static const size_t kTinyListMax = 0x1f * kPointerSize;
+ static const size_t kSmallListMax = 0xff * kPointerSize;
+ static const size_t kMediumListMax = 0x7ff * kPointerSize;
+ static const size_t kLargeListMax = 0x3fff * kPointerSize;
+ static const size_t kTinyAllocationMax = kTiniestListMax;
+ static const size_t kSmallAllocationMax = kTinyListMax;
+ static const size_t kMediumAllocationMax = kSmallListMax;
+ static const size_t kLargeAllocationMax = kMediumListMax;
- FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+ FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
// Walks all available categories for a given |type| and tries to retrieve
// a node. Returns nullptr if the category is empty.
- FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
+ FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
// Tries to retrieve a node from the first category in a given |type|.
// Returns nullptr if the category is empty.
- FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
- int minimum_size);
+ FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
+ size_t minimum_size);
// Searches a given |type| for a node of at least |minimum_size|.
- FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
- int minimum_size);
+ FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
+ size_t minimum_size);
FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
if (size_in_bytes <= kTiniestListMax) {
@@ -1862,7 +1817,7 @@ class FreeList {
FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
PagedSpace* owner_;
- base::AtomicNumber<intptr_t> wasted_bytes_;
+ base::AtomicNumber<size_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory;
@@ -1974,7 +1929,7 @@ class PagedSpace : public Space {
void PrepareForMarkCompact();
// Current capacity without growing (Size() + Available()).
- intptr_t Capacity() { return accounting_stats_.Capacity(); }
+ size_t Capacity() { return accounting_stats_.Capacity(); }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
@@ -1996,21 +1951,21 @@ class PagedSpace : public Space {
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
- intptr_t Available() override { return free_list_.Available(); }
+ size_t Available() override { return free_list_.Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
// current linear allocation area (between top and limit) are also counted
// here.
- intptr_t Size() override { return accounting_stats_.Size(); }
+ size_t Size() override { return accounting_stats_.Size(); }
// As size, but the bytes in lazily swept pages are estimated and the bytes
// in the current linear allocation area are not included.
- intptr_t SizeOfObjects() override;
+ size_t SizeOfObjects() override;
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation.
- virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
+ virtual size_t Waste() { return free_list_.wasted_bytes(); }
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top(); }
@@ -2049,14 +2004,16 @@ class PagedSpace : public Space {
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
- int Free(Address start, int size_in_bytes) {
- int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
+ size_t Free(Address start, size_t size_in_bytes) {
+ size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
accounting_stats_.DeallocateBytes(size_in_bytes);
+ DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
- int UnaccountedFree(Address start, int size_in_bytes) {
- int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+ size_t UnaccountedFree(Address start, size_t size_in_bytes) {
+ size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+ DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
@@ -2112,15 +2069,13 @@ class PagedSpace : public Space {
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
- void EvictEvacuationCandidatesFromLinearAllocationArea();
-
bool CanExpand(size_t size);
// Returns the number of total pages in this space.
int CountTotalPages();
// Return size of allocatable area on a page in this space.
- inline int AreaSize() { return area_size_; }
+ inline int AreaSize() { return static_cast<int>(area_size_); }
virtual bool is_local() { return false; }
@@ -2183,7 +2138,7 @@ class PagedSpace : public Space {
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
- int area_size_;
+ size_t area_size_;
// Accounting information for this space.
AllocationStats accounting_stats_;
@@ -2237,7 +2192,7 @@ class SemiSpace : public Space {
inline bool Contains(Object* o);
inline bool ContainsSlow(Address a);
- void SetUp(int initial_capacity, int maximum_capacity);
+ void SetUp(size_t initial_capacity, size_t maximum_capacity);
void TearDown();
bool HasBeenSetUp() { return maximum_capacity_ != 0; }
@@ -2247,12 +2202,12 @@ class SemiSpace : public Space {
// Grow the semispace to the new capacity. The new capacity requested must
// be larger than the current capacity and less than the maximum capacity.
- bool GrowTo(int new_capacity);
+ bool GrowTo(size_t new_capacity);
// Shrinks the semispace to the new capacity. The new capacity requested
// must be more than the amount of used memory in the semispace and less
// than the current capacity.
- bool ShrinkTo(int new_capacity);
+ bool ShrinkTo(size_t new_capacity);
bool EnsureCurrentCapacity();
@@ -2300,13 +2255,13 @@ class SemiSpace : public Space {
void set_age_mark(Address mark);
// Returns the current capacity of the semispace.
- int current_capacity() { return current_capacity_; }
+ size_t current_capacity() { return current_capacity_; }
// Returns the maximum capacity of the semispace.
- int maximum_capacity() { return maximum_capacity_; }
+ size_t maximum_capacity() { return maximum_capacity_; }
// Returns the initial capacity of the semispace.
- int minimum_capacity() { return minimum_capacity_; }
+ size_t minimum_capacity() { return minimum_capacity_; }
SemiSpaceId id() { return id_; }
@@ -2316,14 +2271,14 @@ class SemiSpace : public Space {
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called:
- intptr_t Size() override {
+ size_t Size() override {
UNREACHABLE();
return 0;
}
- intptr_t SizeOfObjects() override { return Size(); }
+ size_t SizeOfObjects() override { return Size(); }
- intptr_t Available() override {
+ size_t Available() override {
UNREACHABLE();
return 0;
}
@@ -2352,20 +2307,22 @@ class SemiSpace : public Space {
void RewindPages(Page* start, int num_pages);
inline Page* anchor() { return &anchor_; }
- inline int max_pages() { return current_capacity_ / Page::kPageSize; }
+ inline int max_pages() {
+ return static_cast<int>(current_capacity_ / Page::kPageSize);
+ }
// Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
// The currently committed space capacity.
- int current_capacity_;
+ size_t current_capacity_;
// The maximum capacity that can be used by this space. A space cannot grow
// beyond that size.
- int maximum_capacity_;
+ size_t maximum_capacity_;
// The minimum capacity for the space. A space cannot shrink below this size.
- int minimum_capacity_;
+ size_t minimum_capacity_;
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
@@ -2426,7 +2383,7 @@ class NewSpace : public Space {
inline bool ContainsSlow(Address a);
inline bool Contains(Object* o);
- bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
+ bool SetUp(size_t initial_semispace_capacity, size_t max_semispace_capacity);
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
@@ -2448,15 +2405,16 @@ class NewSpace : public Space {
void Shrink();
// Return the allocated bytes in the active semispace.
- intptr_t Size() override {
+ size_t Size() override {
+ DCHECK_GE(top(), to_space_.page_low());
return to_space_.pages_used() * Page::kAllocatableMemory +
- static_cast<int>(top() - to_space_.page_low());
+ static_cast<size_t>(top() - to_space_.page_low());
}
- intptr_t SizeOfObjects() override { return Size(); }
+ size_t SizeOfObjects() override { return Size(); }
// Return the allocatable capacity of a semispace.
- intptr_t Capacity() {
+ size_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) *
Page::kAllocatableMemory;
@@ -2464,7 +2422,7 @@ class NewSpace : public Space {
// Return the current size of a semispace, allocatable and non-allocatable
// memory.
- intptr_t TotalCapacity() {
+ size_t TotalCapacity() {
DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return to_space_.current_capacity();
}
@@ -2484,7 +2442,10 @@ class NewSpace : public Space {
size_t CommittedPhysicalMemory() override;
// Return the available bytes without growing.
- intptr_t Available() override { return Capacity() - Size(); }
+ size_t Available() override {
+ DCHECK_GE(Capacity(), Size());
+ return Capacity() - Size();
+ }
size_t AllocatedSinceLastGC() {
bool seen_age_mark = false;
@@ -2510,17 +2471,18 @@ class NewSpace : public Space {
// Top was reset at some point, invalidating this metric.
return 0;
}
- intptr_t allocated = age_mark_page->area_end() - age_mark;
+ DCHECK_GE(age_mark_page->area_end(), age_mark);
+ size_t allocated = age_mark_page->area_end() - age_mark;
DCHECK_EQ(current_page, age_mark_page);
current_page = age_mark_page->next_page();
while (current_page != last_page) {
allocated += Page::kAllocatableMemory;
current_page = current_page->next_page();
}
+ DCHECK_GE(top(), current_page->area_start());
allocated += top() - current_page->area_start();
- DCHECK_LE(0, allocated);
DCHECK_LE(allocated, Size());
- return static_cast<size_t>(allocated);
+ return allocated;
}
void MovePageFromSpaceToSpace(Page* page) {
@@ -2532,7 +2494,7 @@ class NewSpace : public Space {
bool Rebalance();
// Return the maximum capacity of a semispace.
- int MaximumCapacity() {
+ size_t MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
return to_space_.maximum_capacity();
}
@@ -2540,7 +2502,7 @@ class NewSpace : public Space {
bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
// Returns the initial capacity of a semispace.
- int InitialTotalCapacity() {
+ size_t InitialTotalCapacity() {
DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
return to_space_.minimum_capacity();
}
@@ -2832,7 +2794,7 @@ class LargeObjectSpace : public Space {
// Releases internal resources, frees objects in this space.
void TearDown();
- static intptr_t ObjectSizeFor(intptr_t chunk_size) {
+ static size_t ObjectSizeFor(size_t chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
}
@@ -2843,11 +2805,11 @@ class LargeObjectSpace : public Space {
AllocateRaw(int object_size, Executability executable);
// Available bytes for objects in this space.
- inline intptr_t Available() override;
+ inline size_t Available() override;
- intptr_t Size() override { return size_; }
+ size_t Size() override { return size_; }
- intptr_t SizeOfObjects() override { return objects_size_; }
+ size_t SizeOfObjects() override { return objects_size_; }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
@@ -2905,9 +2867,9 @@ class LargeObjectSpace : public Space {
private:
// The head of the linked list of large object chunks.
LargePage* first_page_;
- intptr_t size_; // allocated bytes
+ size_t size_; // allocated bytes
int page_count_; // number of chunks
- intptr_t objects_size_; // size of objects
+ size_t objects_size_; // size of objects
// Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
base::HashMap chunk_map_;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index a982eb3c40..974b85e1c8 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -16,63 +16,150 @@ namespace v8 {
namespace internal {
StoreBuffer::StoreBuffer(Heap* heap)
- : heap_(heap),
- top_(nullptr),
- start_(nullptr),
- limit_(nullptr),
- virtual_memory_(nullptr) {}
+ : heap_(heap), top_(nullptr), current_(0), virtual_memory_(nullptr) {
+ for (int i = 0; i < kStoreBuffers; i++) {
+ start_[i] = nullptr;
+ limit_[i] = nullptr;
+ lazy_top_[i] = nullptr;
+ }
+ task_running_ = false;
+}
void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of
// the area.
- virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 2);
+ virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
uintptr_t start_as_int =
reinterpret_cast<uintptr_t>(virtual_memory_->address());
- start_ = reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
- limit_ = start_ + (kStoreBufferSize / kPointerSize);
+ start_[0] =
+ reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
+ limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
+ start_[1] = limit_[0];
+ limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
- DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
- DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
Address* vm_limit = reinterpret_cast<Address*>(
reinterpret_cast<char*>(virtual_memory_->address()) +
virtual_memory_->size());
- DCHECK(start_ <= vm_limit);
- DCHECK(limit_ <= vm_limit);
+
USE(vm_limit);
- DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferMask) == 0);
+ for (int i = 0; i < kStoreBuffers; i++) {
+ DCHECK(reinterpret_cast<Address>(start_[i]) >= virtual_memory_->address());
+ DCHECK(reinterpret_cast<Address>(limit_[i]) >= virtual_memory_->address());
+ DCHECK(start_[i] <= vm_limit);
+ DCHECK(limit_[i] <= vm_limit);
+ DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
+ }
- if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
- kStoreBufferSize,
+ if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_[0]),
+ kStoreBufferSize * kStoreBuffers,
false)) { // Not executable.
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
- top_ = start_;
+ current_ = 0;
+ top_ = start_[current_];
}
void StoreBuffer::TearDown() {
delete virtual_memory_;
- top_ = start_ = limit_ = nullptr;
+ top_ = nullptr;
+ for (int i = 0; i < kStoreBuffers; i++) {
+ start_[i] = nullptr;
+ limit_[i] = nullptr;
+ lazy_top_[i] = nullptr;
+ }
}
void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
- isolate->heap()->store_buffer()->MoveEntriesToRememberedSet();
+ isolate->heap()->store_buffer()->FlipStoreBuffers();
isolate->counters()->store_buffer_overflows()->Increment();
}
-void StoreBuffer::MoveEntriesToRememberedSet() {
- if (top_ == start_) return;
- DCHECK(top_ <= limit_);
- for (Address* current = start_; current < top_; current++) {
+void StoreBuffer::FlipStoreBuffers() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ int other = (current_ + 1) % kStoreBuffers;
+ MoveEntriesToRememberedSet(other);
+ lazy_top_[current_] = top_;
+ current_ = other;
+ top_ = start_[current_];
+
+ if (!task_running_) {
+ task_running_ = true;
+ Task* task = new Task(heap_->isolate(), this);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+}
+
+void StoreBuffer::MoveEntriesToRememberedSet(int index) {
+ if (!lazy_top_[index]) return;
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, kStoreBuffers);
+ for (Address* current = start_[index]; current < lazy_top_[index];
+ current++) {
DCHECK(!heap_->code_space()->Contains(*current));
Address addr = *current;
Page* page = Page::FromAnyPointerAddress(heap_, addr);
- RememberedSet<OLD_TO_NEW>::Insert(page, addr);
+ if (IsDeletionAddress(addr)) {
+ current++;
+ Address end = *current;
+ DCHECK(!IsDeletionAddress(end));
+ addr = UnmarkDeletionAddress(addr);
+ if (end) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, addr, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Remove(page, addr);
+ }
+ } else {
+ DCHECK(!IsDeletionAddress(addr));
+ RememberedSet<OLD_TO_NEW>::Insert(page, addr);
+ }
}
- top_ = start_;
+ lazy_top_[index] = nullptr;
+}
+
+void StoreBuffer::MoveAllEntriesToRememberedSet() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ int other = (current_ + 1) % kStoreBuffers;
+ MoveEntriesToRememberedSet(other);
+ lazy_top_[current_] = top_;
+ MoveEntriesToRememberedSet(current_);
+ top_ = start_[current_];
}
+void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ int other = (current_ + 1) % kStoreBuffers;
+ MoveEntriesToRememberedSet(other);
+ task_running_ = false;
+}
+
+void StoreBuffer::DeleteEntry(Address start, Address end) {
+ // Deletions coming from the GC are directly deleted from the remembered
+ // set. Deletions coming from the runtime are added to the store buffer
+ // to allow concurrent processing.
+ if (heap_->gc_state() == Heap::NOT_IN_GC) {
+ if (top_ + sizeof(Address) * 2 > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = MarkDeletionAddress(start);
+ top_++;
+ *top_ = end;
+ top_++;
+ } else {
+ // In GC the store buffer has to be empty at any time.
+ DCHECK(Empty());
+ Page* page = Page::FromAddress(start);
+ if (end) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Remove(page, start);
+ }
+ }
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 1b3fcb0a98..09faf4dcbd 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -8,20 +8,28 @@
#include "src/allocation.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/cancelable-task.h"
#include "src/globals.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
// Intermediate buffer that accumulates old-to-new stores from the generated
-// code. On buffer overflow the slots are moved to the remembered set.
+// code. Moreover, it stores invalid old-to-new slots with two entries.
+// The first is a tagged address of the start of the invalid range, the second
+// one is the end address of the invalid range or null if there is just one slot
+// that needs to be removed from the remembered set. On buffer overflow the
+// slots are moved to the remembered set.
class StoreBuffer {
public:
static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferMask = kStoreBufferSize - 1;
+ static const int kStoreBuffers = 2;
+ static const intptr_t kDeletionTag = 1;
- static void StoreBufferOverflow(Isolate* isolate);
+ V8_EXPORT_PRIVATE static void StoreBufferOverflow(Isolate* isolate);
explicit StoreBuffer(Heap* heap);
void SetUp();
@@ -30,17 +38,109 @@ class StoreBuffer {
// Used to add entries from generated code.
inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
- void MoveEntriesToRememberedSet();
+ // Moves entries from a specific store buffer to the remembered set. This
+ // method takes a lock.
+ void MoveEntriesToRememberedSet(int index);
+
+ // This method ensures that all used store buffer entries are transfered to
+ // the remembered set.
+ void MoveAllEntriesToRememberedSet();
+
+ inline bool IsDeletionAddress(Address address) const {
+ return reinterpret_cast<intptr_t>(address) & kDeletionTag;
+ }
+
+ inline Address MarkDeletionAddress(Address address) {
+ return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) |
+ kDeletionTag);
+ }
+
+ inline Address UnmarkDeletionAddress(Address address) {
+ return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) &
+ ~kDeletionTag);
+ }
+
+ // If we only want to delete a single slot, end should be set to null which
+ // will be written into the second field. When processing the store buffer
+ // the more efficient Remove method will be called in this case.
+ void DeleteEntry(Address start, Address end = nullptr);
+
+ void InsertEntry(Address slot) {
+ // Insertions coming from the GC are directly inserted into the remembered
+ // set. Insertions coming from the runtime are added to the store buffer to
+ // allow concurrent processing.
+ if (heap_->gc_state() == Heap::NOT_IN_GC) {
+ if (top_ + sizeof(Address) > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = slot;
+ top_++;
+ } else {
+ // In GC the store buffer has to be empty at any time.
+ DCHECK(Empty());
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ }
+ }
+
+ // Used by the concurrent processing thread to transfer entries from the
+ // store buffer to the remembered set.
+ void ConcurrentlyProcessStoreBuffer();
+
+ bool Empty() {
+ for (int i = 0; i < kStoreBuffers; i++) {
+ if (lazy_top_[i]) {
+ return false;
+ }
+ }
+ return top_ == start_[current_];
+ }
private:
+ // There are two store buffers. If one store buffer fills up, the main thread
+ // publishes the top pointer of the store buffer that needs processing in its
+ // global lazy_top_ field. After that it start the concurrent processing
+ // thread. The concurrent processing thread uses the pointer in lazy_top_.
+ // It will grab the given mutex and transfer its entries to the remembered
+ // set. If the concurrent thread does not make progress, the main thread will
+ // perform the work.
+ // Important: there is an ordering constrained. The store buffer with the
+ // older entries has to be processed first.
+ class Task : public CancelableTask {
+ public:
+ Task(Isolate* isolate, StoreBuffer* store_buffer)
+ : CancelableTask(isolate), store_buffer_(store_buffer) {}
+ virtual ~Task() {}
+
+ private:
+ void RunInternal() override {
+ store_buffer_->ConcurrentlyProcessStoreBuffer();
+ }
+ StoreBuffer* store_buffer_;
+ DISALLOW_COPY_AND_ASSIGN(Task);
+ };
+
+ void FlipStoreBuffers();
+
Heap* heap_;
Address* top_;
// The start and the limit of the buffer that contains store slots
- // added from the generated code.
- Address* start_;
- Address* limit_;
+ // added from the generated code. We have two chunks of store buffers.
+ // Whenever one fills up, we notify a concurrent processing thread and
+ // use the other empty one in the meantime.
+ Address* start_[kStoreBuffers];
+ Address* limit_[kStoreBuffers];
+
+ // At most one lazy_top_ pointer is set at any time.
+ Address* lazy_top_[kStoreBuffers];
+ base::Mutex mutex_;
+
+ // We only want to have at most one concurrent processing tas running.
+ bool task_running_;
+
+ // Points to the current buffer in use.
+ int current_;
base::VirtualMemory* virtual_memory_;
};