aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-03-21 10:16:54 +0100
committerMichaël Zasso <targos@protonmail.com>2017-03-25 09:44:10 +0100
commitc459d8ea5d402c702948c860d9497b2230ff7e8a (patch)
tree56c282fc4d40e5cb613b47cf7be3ea0526ed5b6f /deps/v8/src/heap
parente0bc5a7361b1d29c3ed034155fd779ce6f44fb13 (diff)
downloadandroid-node-v8-c459d8ea5d402c702948c860d9497b2230ff7e8a.tar.gz
android-node-v8-c459d8ea5d402c702948c860d9497b2230ff7e8a.tar.bz2
android-node-v8-c459d8ea5d402c702948c860d9497b2230ff7e8a.zip
deps: update V8 to 5.7.492.69
PR-URL: https://github.com/nodejs/node/pull/11752 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc4
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc72
-rw-r--r--deps/v8/src/heap/embedder-tracing.h67
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc1
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h2
-rw-r--r--deps/v8/src/heap/gc-tracer.cc14
-rw-r--r--deps/v8/src/heap/gc-tracer.h6
-rw-r--r--deps/v8/src/heap/heap-inl.h15
-rw-r--r--deps/v8/src/heap/heap.cc397
-rw-r--r--deps/v8/src/heap/heap.h129
-rw-r--r--deps/v8/src/heap/incremental-marking.cc126
-rw-r--r--deps/v8/src/heap/incremental-marking.h8
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h9
-rw-r--r--deps/v8/src/heap/mark-compact.cc341
-rw-r--r--deps/v8/src/heap/mark-compact.h62
-rw-r--r--deps/v8/src/heap/memory-reducer.cc40
-rw-r--r--deps/v8/src/heap/memory-reducer.h15
-rw-r--r--deps/v8/src/heap/object-stats.cc13
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h38
-rw-r--r--deps/v8/src/heap/objects-visiting.cc2
-rw-r--r--deps/v8/src/heap/objects-visiting.h15
-rw-r--r--deps/v8/src/heap/remembered-set.h15
-rw-r--r--deps/v8/src/heap/scavenger.cc2
-rw-r--r--deps/v8/src/heap/slot-set.h12
-rw-r--r--deps/v8/src/heap/spaces-inl.h67
-rw-r--r--deps/v8/src/heap/spaces.cc88
-rw-r--r--deps/v8/src/heap/spaces.h72
-rw-r--r--deps/v8/src/heap/store-buffer.cc34
-rw-r--r--deps/v8/src/heap/store-buffer.h93
29 files changed, 1190 insertions, 569 deletions
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 62b848ef70..def84572b6 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -78,8 +78,8 @@ void LocalArrayBufferTracker::Process(Callback callback) {
void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
DCHECK_EQ(heap->gc_state(), Heap::HeapState::SCAVENGE);
- for (Page* page : NewSpacePageRange(heap->new_space()->FromSpaceStart(),
- heap->new_space()->FromSpaceEnd())) {
+ for (Page* page : PageRange(heap->new_space()->FromSpaceStart(),
+ heap->new_space()->FromSpaceEnd())) {
bool empty = ProcessBuffers(page, kUpdateForwardedRemoveOthers);
CHECK(empty);
}
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
new file mode 100644
index 0000000000..2d11724181
--- /dev/null
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -0,0 +1,72 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/embedder-tracing.h"
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+void LocalEmbedderHeapTracer::TracePrologue() {
+ if (!InUse()) return;
+
+ CHECK(cached_wrappers_to_trace_.empty());
+ num_v8_marking_deque_was_empty_ = 0;
+ remote_tracer_->TracePrologue();
+}
+
+void LocalEmbedderHeapTracer::TraceEpilogue() {
+ if (!InUse()) return;
+
+ CHECK(cached_wrappers_to_trace_.empty());
+ remote_tracer_->TraceEpilogue();
+}
+
+void LocalEmbedderHeapTracer::AbortTracing() {
+ if (!InUse()) return;
+
+ cached_wrappers_to_trace_.clear();
+ remote_tracer_->AbortTracing();
+}
+
+void LocalEmbedderHeapTracer::EnterFinalPause() {
+ if (!InUse()) return;
+
+ remote_tracer_->EnterFinalPause();
+}
+
+bool LocalEmbedderHeapTracer::Trace(
+ double deadline, EmbedderHeapTracer::AdvanceTracingActions actions) {
+ if (!InUse()) return false;
+
+ DCHECK_EQ(0, NumberOfCachedWrappersToTrace());
+ return remote_tracer_->AdvanceTracing(deadline, actions);
+}
+
+size_t LocalEmbedderHeapTracer::NumberOfWrappersToTrace() {
+ return (InUse())
+ ? cached_wrappers_to_trace_.size() +
+ remote_tracer_->NumberOfWrappersToTrace()
+ : 0;
+}
+
+void LocalEmbedderHeapTracer::RegisterWrappersWithRemoteTracer() {
+ if (!InUse()) return;
+
+ if (cached_wrappers_to_trace_.empty()) {
+ return;
+ }
+
+ remote_tracer_->RegisterV8References(cached_wrappers_to_trace_);
+ cached_wrappers_to_trace_.clear();
+}
+
+bool LocalEmbedderHeapTracer::RequiresImmediateWrapperProcessing() {
+ const size_t kTooManyWrappers = 16000;
+ return cached_wrappers_to_trace_.size() > kTooManyWrappers;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
new file mode 100644
index 0000000000..5e10d6e2e8
--- /dev/null
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_EMBEDDER_TRACING_H_
+#define V8_HEAP_EMBEDDER_TRACING_H_
+
+#include "include/v8.h"
+#include "src/flags.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
+ public:
+ typedef std::pair<void*, void*> WrapperInfo;
+
+ LocalEmbedderHeapTracer()
+ : remote_tracer_(nullptr), num_v8_marking_deque_was_empty_(0) {}
+
+ void SetRemoteTracer(EmbedderHeapTracer* tracer) { remote_tracer_ = tracer; }
+ bool InUse() { return remote_tracer_ != nullptr; }
+
+ void TracePrologue();
+ void TraceEpilogue();
+ void AbortTracing();
+ void EnterFinalPause();
+ bool Trace(double deadline,
+ EmbedderHeapTracer::AdvanceTracingActions actions);
+
+ size_t NumberOfWrappersToTrace();
+ size_t NumberOfCachedWrappersToTrace() {
+ return cached_wrappers_to_trace_.size();
+ }
+ void AddWrapperToTrace(WrapperInfo entry) {
+ cached_wrappers_to_trace_.push_back(entry);
+ }
+ void ClearCachedWrappersToTrace() { cached_wrappers_to_trace_.clear(); }
+ void RegisterWrappersWithRemoteTracer();
+
+ // In order to avoid running out of memory we force tracing wrappers if there
+ // are too many of them.
+ bool RequiresImmediateWrapperProcessing();
+
+ void NotifyV8MarkingDequeWasEmpty() { num_v8_marking_deque_was_empty_++; }
+ bool ShouldFinalizeIncrementalMarking() {
+ static const size_t kMaxIncrementalFixpointRounds = 3;
+ return !FLAG_incremental_marking_wrappers || !InUse() ||
+ NumberOfWrappersToTrace() == 0 ||
+ num_v8_marking_deque_was_empty_ > kMaxIncrementalFixpointRounds;
+ }
+
+ private:
+ typedef std::vector<WrapperInfo> WrapperCache;
+
+ EmbedderHeapTracer* remote_tracer_;
+ WrapperCache cached_wrappers_to_trace_;
+ size_t num_v8_marking_deque_was_empty_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_EMBEDDER_TRACING_H_
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 0c411f7b4c..905514c4bf 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -146,6 +146,7 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
return GCIdleTimeAction::IncrementalStep();
}
+bool GCIdleTimeHandler::Enabled() { return FLAG_incremental_marking; }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index 7ce0c1a2f6..b730a7bbba 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -125,6 +125,8 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
GCIdleTimeAction Compute(double idle_time_in_ms,
GCIdleTimeHeapState heap_state);
+ bool Enabled();
+
void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
static size_t EstimateMarkingStepSize(double idle_time_in_ms,
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index dcd319fdae..cf881c473b 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -174,8 +174,7 @@ void GCTracer::Start(GarbageCollector collector,
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
- current_.new_space_object_size =
- heap_->new_space()->top() - heap_->new_space()->bottom();
+ current_.new_space_object_size = heap_->new_space()->Size();
current_.incremental_marking_bytes = 0;
current_.incremental_marking_duration = 0;
@@ -510,9 +509,14 @@ void GCTracer::PrintNVP() const {
"pause=%.1f "
"mutator=%.1f "
"gc=%s "
- "reduce_memory=%d\n",
- duration, spent_in_mutator, current_.TypeName(true),
- current_.reduce_memory);
+ "reduce_memory=%d "
+ "mark=%.2f "
+ "mark.roots=%.2f "
+ "mark.old_to_new=%.2f\n",
+ duration, spent_in_mutator, "mmc", current_.reduce_memory,
+ current_.scopes[Scope::MINOR_MC_MARK],
+ current_.scopes[Scope::MINOR_MC_MARK_ROOTS],
+ current_.scopes[Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS]);
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index ed62dee5f1..7aff1cf59d 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -79,6 +79,12 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
+ F(MINOR_MC_MARK) \
+ F(MINOR_MC_MARK_CODE_FLUSH_CANDIDATES) \
+ F(MINOR_MC_MARK_GLOBAL_HANDLES) \
+ F(MINOR_MC_MARK_OLD_TO_NEW_POINTERS) \
+ F(MINOR_MC_MARK_ROOTS) \
+ F(MINOR_MC_MARK_WEAK) \
F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 7d0d241289..4d060f8e43 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -21,6 +21,7 @@
#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/objects/scope-info.h"
#include "src/type-feedback-vector-inl.h"
namespace v8 {
@@ -698,12 +699,15 @@ void Heap::ExternalStringTable::AddString(String* string) {
}
}
-
-void Heap::ExternalStringTable::Iterate(ObjectVisitor* v) {
+void Heap::ExternalStringTable::IterateNewSpaceStrings(ObjectVisitor* v) {
if (!new_space_strings_.is_empty()) {
Object** start = &new_space_strings_[0];
v->VisitPointers(start, start + new_space_strings_.length());
}
+}
+
+void Heap::ExternalStringTable::IterateAll(ObjectVisitor* v) {
+ IterateNewSpaceStrings(v);
if (!old_space_strings_.is_empty()) {
Object** start = &old_space_strings_[0];
v->VisitPointers(start, start + old_space_strings_.length());
@@ -809,9 +813,16 @@ int Heap::GetNextTemplateSerialNumber() {
void Heap::SetSerializedTemplates(FixedArray* templates) {
DCHECK_EQ(empty_fixed_array(), serialized_templates());
+ DCHECK(isolate()->serializer_enabled());
set_serialized_templates(templates);
}
+void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
+ DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
+ DCHECK(isolate()->serializer_enabled());
+ set_serialized_global_proxy_sizes(sizes);
+}
+
void Heap::CreateObjectStats() {
if (V8_LIKELY(FLAG_gc_stats == 0)) return;
if (!live_object_stats_) {
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 2059dae6b7..478be1f03a 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -20,6 +20,7 @@
#include "src/global-handles.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/code-stats.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
@@ -80,6 +81,7 @@ Heap::Heap()
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(MB),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
+ initial_max_old_generation_size_(max_old_generation_size_),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
old_generation_size_configured_(false),
@@ -93,6 +95,8 @@ Heap::Heap()
survived_last_scavenge_(0),
always_allocate_scope_count_(0),
memory_pressure_level_(MemoryPressureLevel::kNone),
+ out_of_memory_callback_(nullptr),
+ out_of_memory_callback_data_(nullptr),
contexts_disposed_(0),
number_of_disposed_maps_(0),
global_ic_age_(0),
@@ -155,7 +159,7 @@ Heap::Heap()
deserialization_complete_(false),
strong_roots_list_(NULL),
heap_iterator_depth_(0),
- embedder_heap_tracer_(nullptr),
+ local_embedder_heap_tracer_(nullptr),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false) {
// Allow build-time customization of the max semispace size. Building
@@ -292,6 +296,9 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return YoungGenerationCollector();
}
+void Heap::SetGCState(HeapState state) {
+ gc_state_ = state;
+}
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
@@ -442,7 +449,6 @@ void Heap::GarbageCollectionPrologue() {
}
CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
- store_buffer()->MoveAllEntriesToRememberedSet();
}
size_t Heap::SizeOfObjects() {
@@ -510,6 +516,22 @@ void Heap::MergeAllocationSitePretenuringFeedback(
}
}
+class Heap::SkipStoreBufferScope {
+ public:
+ explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
+ : store_buffer_(store_buffer) {
+ store_buffer_->MoveAllEntriesToRememberedSet();
+ store_buffer_->SetMode(StoreBuffer::IN_GC);
+ }
+
+ ~SkipStoreBufferScope() {
+ DCHECK(store_buffer_->Empty());
+ store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
+ }
+
+ private:
+ StoreBuffer* store_buffer_;
+};
class Heap::PretenuringScope {
public:
@@ -861,6 +883,10 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
+ if (gc_reason == GarbageCollectionReason::kLastResort) {
+ InvokeOutOfMemoryCallback();
+ }
+ RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC_AllAvailableGarbage);
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
@@ -943,6 +969,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate_);
+ RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC);
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
@@ -1022,6 +1049,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
(committed_memory_before > committed_memory_after + MB) ||
HasHighFragmentation(used_memory_after, committed_memory_after) ||
(detached_contexts()->length() > 0);
+ event.committed_memory = committed_memory_after;
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
}
@@ -1164,7 +1192,7 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
// deserializing.
Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, Map::kSize,
- ClearRecordedSlots::kNo, ClearBlackArea::kNo);
+ ClearRecordedSlots::kNo);
maps->Add(free_space_address);
} else {
perform_gc = true;
@@ -1195,7 +1223,7 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
// deserializing.
Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, size,
- ClearRecordedSlots::kNo, ClearBlackArea::kNo);
+ ClearRecordedSlots::kNo);
DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
chunk.start = free_space_address;
chunk.end = free_space_address + size;
@@ -1313,6 +1341,7 @@ bool Heap::PerformGarbageCollection(
{
Heap::PretenuringScope pretenuring_scope(this);
+ Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
switch (collector) {
case MARK_COMPACTOR:
@@ -1394,6 +1423,7 @@ bool Heap::PerformGarbageCollection(
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
+ RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GCPrologueCallback);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
if (!gc_prologue_callbacks_[i].pass_isolate) {
@@ -1415,6 +1445,7 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
void Heap::CallGCEpilogueCallbacks(GCType gc_type,
GCCallbackFlags gc_callback_flags) {
+ RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GCEpilogueCallback);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
if (!gc_epilogue_callbacks_[i].pass_isolate) {
@@ -1433,7 +1464,8 @@ void Heap::CallGCEpilogueCallbacks(GCType gc_type,
void Heap::MarkCompact() {
PauseAllocationObserversScope pause_observers(this);
- gc_state_ = MARK_COMPACT;
+ SetGCState(MARK_COMPACT);
+
LOG(isolate_, ResourceEvent("markcompact", "begin"));
uint64_t size_of_objects_before_gc = SizeOfObjects();
@@ -1459,7 +1491,7 @@ void Heap::MinorMarkCompact() { UNREACHABLE(); }
void Heap::MarkCompactEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
- gc_state_ = NOT_IN_GC;
+ SetGCState(NOT_IN_GC);
isolate_->counters()->objs_since_last_full()->Set(0);
@@ -1512,21 +1544,6 @@ static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
-
-static bool IsUnmodifiedHeapObject(Object** p) {
- Object* object = *p;
- if (object->IsSmi()) return false;
- HeapObject* heap_object = HeapObject::cast(object);
- if (!object->IsJSObject()) return false;
- JSObject* js_object = JSObject::cast(object);
- if (!js_object->WasConstructedFromApiFunction()) return false;
- JSFunction* constructor =
- JSFunction::cast(js_object->map()->GetConstructor());
-
- return constructor->initial_map() == heap_object->map();
-}
-
-
void PromotionQueue::Initialize() {
// The last to-space page may be used for promotion queue. On promotion
// conflict, we use the emergency stack.
@@ -1605,7 +1622,7 @@ void Heap::Scavenge() {
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
- gc_state_ = SCAVENGE;
+ SetGCState(SCAVENGE);
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -1615,13 +1632,6 @@ void Heap::Scavenge() {
scavenge_collector_->SelectScavengingVisitorsTable();
- if (UsingEmbedderHeapTracer()) {
- // Register found wrappers with embedder so it can add them to its marking
- // deque and correctly manage the case when v8 scavenger collects the
- // wrappers by either keeping wrappables alive, or cleaning marking deque.
- RegisterWrappersWithEmbedderHeapTracer();
- }
-
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_->Flip();
@@ -1701,8 +1711,10 @@ void Heap::Scavenge() {
isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
&IsUnscavengedHeapObject);
- isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
- &scavenge_visitor);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRoots<
+ GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&scavenge_visitor);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
UpdateNewSpaceReferencesInExternalStringTable(
@@ -1727,9 +1739,13 @@ void Heap::Scavenge() {
IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
new_space_->Size() - survived_watermark);
+ // Scavenger may find new wrappers by iterating objects promoted onto a black
+ // page.
+ local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+
LOG(isolate_, ResourceEvent("scavenge", "end"));
- gc_state_ = NOT_IN_GC;
+ SetGCState(NOT_IN_GC);
}
@@ -1882,7 +1898,7 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
v8::ExternalResourceVisitor* visitor_;
} external_string_table_visitor(visitor);
- external_string_table_.Iterate(&external_string_table_visitor);
+ external_string_table_.IterateAll(&external_string_table_visitor);
}
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
@@ -2008,7 +2024,6 @@ void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
ArrayBufferTracker::Unregister(this, buffer);
}
-
void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
old_generation_allocation_limit_ =
@@ -2019,7 +2034,6 @@ void Heap::ConfigureInitialOldGenerationSize() {
}
}
-
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = nullptr;
@@ -2107,8 +2121,7 @@ AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
DCHECK(chunk->owner()->identity() == space);
#endif
- CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo,
- ClearBlackArea::kNo);
+ CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return obj;
}
@@ -2256,6 +2269,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, type_feedback_vector)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
Context::NUMBER_FUNCTION_INDEX)
ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
@@ -2279,6 +2293,9 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
+ ALLOCATE_MAP(JS_PROMISE_CAPABILITY_TYPE, JSPromiseCapability::kSize,
+ js_promise_capability);
+
for (unsigned i = 0; i < arraysize(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
{
@@ -2344,6 +2361,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
@@ -2523,10 +2541,18 @@ AllocationResult Heap::AllocateTransitionArray(int capacity) {
return array;
}
-
-void Heap::CreateApiObjects() {
+bool Heap::CreateApiObjects() {
HandleScope scope(isolate());
set_message_listeners(*TemplateList::New(isolate(), 2));
+ HeapObject* obj = nullptr;
+ {
+ AllocationResult allocation = AllocateStruct(INTERCEPTOR_INFO_TYPE);
+ if (!allocation.To(&obj)) return false;
+ }
+ InterceptorInfo* info = InterceptorInfo::cast(obj);
+ info->set_flags(0);
+ set_noop_interceptor_info(info);
+ return true;
}
@@ -2697,10 +2723,14 @@ void Heap::CreateInitialObjects() {
}
Handle<NameDictionary> empty_properties_dictionary =
- NameDictionary::New(isolate(), 0, TENURED);
+ NameDictionary::NewEmpty(isolate(), TENURED);
empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
set_empty_properties_dictionary(*empty_properties_dictionary);
+ set_public_symbol_table(*empty_properties_dictionary);
+ set_api_symbol_table(*empty_properties_dictionary);
+ set_api_private_symbol_table(*empty_properties_dictionary);
+
set_number_string_cache(
*factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
@@ -2729,9 +2759,6 @@ void Heap::CreateInitialObjects() {
set_undefined_cell(*factory->NewCell(factory->undefined_value()));
- // The symbol registry is initialized lazily.
- set_symbol_registry(Smi::kZero);
-
// Microtask queue uses the empty fixed array as a sentinel for "empty".
// Number of queued microtasks stored in Isolate::pending_microtask_count().
set_microtask_queue(empty_fixed_array());
@@ -2779,6 +2806,7 @@ void Heap::CreateInitialObjects() {
empty_fixed_array());
empty_type_feedback_vector->set(TypeFeedbackVector::kInvocationCountIndex,
Smi::kZero);
+ empty_type_feedback_vector->set_map(type_feedback_vector_map());
set_empty_type_feedback_vector(*empty_type_feedback_vector);
// We use a canonical empty LiteralsArray for all functions that neither
@@ -2817,7 +2845,7 @@ void Heap::CreateInitialObjects() {
set_script_list(Smi::kZero);
Handle<SeededNumberDictionary> slow_element_dictionary =
- SeededNumberDictionary::New(isolate(), 0, TENURED);
+ SeededNumberDictionary::NewEmpty(isolate(), TENURED);
slow_element_dictionary->set_requires_slow_elements();
set_empty_slow_element_dictionary(*slow_element_dictionary);
@@ -2864,7 +2892,12 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_array_iterator_protector(*array_iterator_cell);
+ cell = factory->NewPropertyCell();
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_array_buffer_neutering_protector(*cell);
+
set_serialized_templates(empty_fixed_array());
+ set_serialized_global_proxy_sizes(empty_fixed_array());
set_weak_stack_trace_list(Smi::kZero);
@@ -2878,6 +2911,42 @@ void Heap::CreateInitialObjects() {
// Initialize compilation cache.
isolate_->compilation_cache()->Clear();
+
+ // Finish creating JSPromiseCapabilityMap
+ {
+ // TODO(caitp): This initialization can be removed once PromiseCapability
+ // object is no longer used by builtins implemented in javascript.
+ Handle<Map> map = factory->js_promise_capability_map();
+ map->set_inobject_properties_or_constructor_function_index(3);
+
+ Map::EnsureDescriptorSlack(map, 3);
+
+ PropertyAttributes attrs =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ { // promise
+ Descriptor d = Descriptor::DataField(factory->promise_string(),
+ JSPromiseCapability::kPromiseIndex,
+ attrs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ { // resolve
+ Descriptor d = Descriptor::DataField(factory->resolve_string(),
+ JSPromiseCapability::kResolveIndex,
+ attrs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ { // reject
+ Descriptor d = Descriptor::DataField(factory->reject_string(),
+ JSPromiseCapability::kRejectIndex,
+ attrs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ map->set_is_extensible(false);
+ set_js_promise_capability_map(*map);
+ }
}
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
@@ -2888,7 +2957,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kInstanceofCacheAnswerRootIndex:
case kCodeStubsRootIndex:
case kEmptyScriptRootIndex:
- case kSymbolRegistryRootIndex:
case kScriptListRootIndex:
case kMaterializedObjectsRootIndex:
case kMicrotaskQueueRootIndex:
@@ -2899,6 +2967,10 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
case kSerializedTemplatesRootIndex:
+ case kSerializedGlobalProxySizesRootIndex:
+ case kPublicSymbolTableRootIndex:
+ case kApiSymbolTableRootIndex:
+ case kApiPrivateSymbolTableRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
@@ -2918,6 +2990,18 @@ bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
!InNewSpace(root(root_index));
}
+bool Heap::IsUnmodifiedHeapObject(Object** p) {
+ Object* object = *p;
+ if (object->IsSmi()) return false;
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (!object->IsJSObject()) return false;
+ JSObject* js_object = JSObject::cast(object);
+ if (!js_object->WasConstructedFromApiFunction()) return false;
+ JSFunction* constructor =
+ JSFunction::cast(js_object->map()->GetConstructor());
+
+ return constructor->initial_map() == heap_object->map();
+}
int Heap::FullSizeNumberStringCacheLength() {
// Compute the size of the number string cache based on the max newspace size.
@@ -3042,6 +3126,7 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_parameter_count(parameter_count);
instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
instance->set_osr_loop_nesting_level(0);
+ instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(constant_pool);
instance->set_handler_table(empty_fixed_array());
instance->set_source_position_table(empty_byte_array());
@@ -3050,9 +3135,9 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
return result;
}
-void Heap::CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode,
- ClearBlackArea black_area_mode) {
- if (size == 0) return;
+HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
+ ClearRecordedSlots mode) {
+ if (size == 0) return nullptr;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
filler->set_map_no_write_barrier(
@@ -3070,20 +3155,11 @@ void Heap::CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode,
ClearRecordedSlotRange(addr, addr + size);
}
- // If the location where the filler is created is within a black area we have
- // to clear the mark bits of the filler space.
- if (black_area_mode == ClearBlackArea::kYes &&
- incremental_marking()->black_allocation() &&
- Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(addr))) {
- Page* page = Page::FromAddress(addr);
- page->markbits()->ClearRange(page->AddressToMarkbitIndex(addr),
- page->AddressToMarkbitIndex(addr + size));
- }
-
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are NULL.
DCHECK((filler->map() == NULL && !deserialization_complete_) ||
filler->map()->IsMap());
+ return filler;
}
@@ -3101,8 +3177,12 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
return Page::FromAddress(address)->SweepingDone();
}
+bool Heap::IsImmovable(HeapObject* object) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
+}
-void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
+void Heap::AdjustLiveBytes(HeapObject* object, int by) {
// As long as the inspected object is black and we are currently not iterating
// the heap using HeapIterator, we can update the live byte count. We cannot
// update while using HeapIterator because the iterator is temporarily
@@ -3111,12 +3191,9 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
lo_space()->AdjustLiveBytes(by);
} else if (!in_heap_iterator() &&
!mark_compact_collector()->sweeping_in_progress() &&
- Marking::IsBlack(ObjectMarking::MarkBitFrom(object->address()))) {
- if (mode == SEQUENTIAL_TO_SWEEPER) {
- MemoryChunk::IncrementLiveBytesFromGC(object, by);
- } else {
- MemoryChunk::IncrementLiveBytesFromMutator(object, by);
- }
+ Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
+ DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
+ MemoryChunk::IncrementLiveBytes(object, by);
}
}
@@ -3150,14 +3227,27 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Transfer the mark bits to their new location if the object is not within
// a black area.
if (!incremental_marking()->black_allocation() ||
- !Marking::IsBlack(ObjectMarking::MarkBitFrom(new_start))) {
- IncrementalMarking::TransferMark(this, old_start, new_start);
+ !Marking::IsBlack(
+ ObjectMarking::MarkBitFrom(HeapObject::FromAddress(new_start)))) {
+ IncrementalMarking::TransferMark(this, object,
+ HeapObject::FromAddress(new_start));
}
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
+
+ // Clear the mark bits of the black area that belongs now to the filler.
+ // This is an optimization. The sweeper will release black fillers anyway.
+ if (incremental_marking()->black_allocation() &&
+ Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object))) {
+ Page* page = Page::FromAddress(old_start);
+ page->markbits()->ClearRange(
+ page->AddressToMarkbitIndex(old_start),
+ page->AddressToMarkbitIndex(old_start + bytes_to_trim));
+ }
+
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
@@ -3171,7 +3261,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
// Maintain consistency of live bytes during incremental marking
- AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
+ AdjustLiveBytes(new_object, -bytes_to_trim);
// Remove recorded slots for the new map and length offset.
ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
@@ -3183,15 +3273,6 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
return new_object;
}
-
-// Force instantiation of templatized method.
-template void Heap::RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- FixedArrayBase*, int);
-template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- FixedArrayBase*, int);
-
-
-template<Heap::InvocationMode mode>
void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
const int len = object->length();
DCHECK_LE(elements_to_trim, len);
@@ -3235,7 +3316,18 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// TODO(hpayer): We should shrink the large object page if the size
// of the object changed significantly.
if (!lo_space()->Contains(object)) {
- CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
+ HeapObject* filler =
+ CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
+ DCHECK_NOT_NULL(filler);
+ // Clear the mark bits of the black area that belongs now to the filler.
+ // This is an optimization. The sweeper will release black fillers anyway.
+ if (incremental_marking()->black_allocation() &&
+ Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(filler))) {
+ Page* page = Page::FromAddress(new_end);
+ page->markbits()->ClearRange(
+ page->AddressToMarkbitIndex(new_end),
+ page->AddressToMarkbitIndex(new_end + bytes_to_trim));
+ }
}
// Initialize header of the trimmed array. We are storing the new length
@@ -3244,7 +3336,7 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
object->synchronized_set_length(len - elements_to_trim);
// Maintain consistency of live bytes during incremental marking
- AdjustLiveBytes(object, -bytes_to_trim, mode);
+ AdjustLiveBytes(object, -bytes_to_trim);
// Notify the heap profiler of change in object layout. The array may not be
// moved during GC, and size has to be adjusted nevertheless.
@@ -3331,18 +3423,24 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
if (!allocation.To(&result)) return allocation;
if (immovable) {
Address address = result->address();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(address);
// Code objects which should stay at a fixed address are allocated either
// in the first page of code space (objects on the first page of each space
- // are never moved) or in large object space.
- if (!code_space_->FirstPage()->Contains(address) &&
- MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
- // Discard the first code allocation, which was on a page where it could
- // be moved.
- CreateFillerObjectAt(result->address(), object_size,
- ClearRecordedSlots::kNo);
- allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
- if (!allocation.To(&result)) return allocation;
- OnAllocationEvent(result, object_size);
+ // are never moved), in large object space, or (during snapshot creation)
+ // the containing page is marked as immovable.
+ if (!Heap::IsImmovable(result) &&
+ !code_space_->FirstPage()->Contains(address)) {
+ if (isolate()->serializer_enabled()) {
+ chunk->MarkNeverEvacuate();
+ } else {
+ // Discard the first code allocation, which was on a page where it could
+ // be moved.
+ CreateFillerObjectAt(result->address(), object_size,
+ ClearRecordedSlots::kNo);
+ allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
+ if (!allocation.To(&result)) return allocation;
+ OnAllocationEvent(result, object_size);
+ }
}
}
@@ -3405,6 +3503,7 @@ AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
copy->set_source_position_table(bytecode_array->source_position_table());
copy->set_interrupt_budget(bytecode_array->interrupt_budget());
copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
+ copy->set_bytecode_age(bytecode_array->bytecode_age());
bytecode_array->CopyBytecodesTo(copy);
return copy;
}
@@ -4045,9 +4144,7 @@ void Heap::MakeHeapIterable() {
CollectAllGarbage(kMakeHeapIterableMask,
GarbageCollectionReason::kMakeHeapIterable);
}
- if (mark_compact_collector()->sweeping_in_progress()) {
- mark_compact_collector()->EnsureSweepingCompleted();
- }
+ mark_compact_collector()->EnsureSweepingCompleted();
DCHECK(IsHeapIterable());
}
@@ -4169,21 +4266,18 @@ void Heap::ReduceNewSpaceSize() {
}
}
-bool Heap::MarkingDequesAreEmpty() {
- return mark_compact_collector()->marking_deque()->IsEmpty() &&
- (!UsingEmbedderHeapTracer() ||
- (wrappers_to_trace() == 0 &&
- embedder_heap_tracer()->NumberOfWrappersToTrace() == 0));
-}
-
void Heap::FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason gc_reason) {
if (incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
- MarkingDequesAreEmpty()))) {
+ mark_compact_collector()->marking_deque()->IsEmpty() &&
+ local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
FinalizeIncrementalMarking(gc_reason);
- } else if (incremental_marking()->IsComplete() || MarkingDequesAreEmpty()) {
+ } else if (incremental_marking()->IsComplete() ||
+ (mark_compact_collector()->marking_deque()->IsEmpty() &&
+ local_embedder_heap_tracer()
+ ->ShouldFinalizeIncrementalMarking())) {
CollectAllGarbage(current_gc_flags_, gc_reason);
}
}
@@ -4195,13 +4289,16 @@ bool Heap::TryFinalizeIdleIncrementalMarking(
tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
- MarkingDequesAreEmpty() &&
+ mark_compact_collector()->marking_deque()->IsEmpty() &&
+ local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking() &&
gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
idle_time_in_ms))) {
FinalizeIncrementalMarking(gc_reason);
return true;
} else if (incremental_marking()->IsComplete() ||
- (MarkingDequesAreEmpty() &&
+ (mark_compact_collector()->marking_deque()->IsEmpty() &&
+ local_embedder_heap_tracer()
+ ->ShouldFinalizeIncrementalMarking() &&
gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
idle_time_in_ms, size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
@@ -4484,6 +4581,18 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
}
}
+void Heap::SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
+ void* data) {
+ out_of_memory_callback_ = callback;
+ out_of_memory_callback_data_ = data;
+}
+
+void Heap::InvokeOutOfMemoryCallback() {
+ if (out_of_memory_callback_) {
+ out_of_memory_callback_(out_of_memory_callback_data_);
+ }
+}
+
void Heap::CollectCodeStatistics() {
CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
// We do not look for code in new space, or map space. If code
@@ -4698,10 +4807,8 @@ void Heap::Verify() {
CHECK(HasBeenSetUp());
HandleScope scope(isolate());
- if (mark_compact_collector()->sweeping_in_progress()) {
- // We have to wait here for the sweeper threads to have an iterable heap.
- mark_compact_collector()->EnsureSweepingCompleted();
- }
+ // We have to wait here for the sweeper threads to have an iterable heap.
+ mark_compact_collector()->EnsureSweepingCompleted();
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
@@ -4729,8 +4836,8 @@ void Heap::Verify() {
void Heap::ZapFromSpace() {
if (!new_space_->IsFromSpaceCommitted()) return;
- for (Page* page : NewSpacePageRange(new_space_->FromSpaceStart(),
- new_space_->FromSpaceEnd())) {
+ for (Page* page :
+ PageRange(new_space_->FromSpaceStart(), new_space_->FromSpaceEnd())) {
for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -4838,7 +4945,7 @@ void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kStringTable);
if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
- external_string_table_.Iterate(v);
+ external_string_table_.IterateAll(v);
}
v->Synchronize(VisitorSynchronization::kExternalStringsTable);
}
@@ -4937,8 +5044,9 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
case VISIT_ONLY_STRONG_ROOT_LIST:
UNREACHABLE();
break;
- case VISIT_ONLY_STRONG:
case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
+ break;
+ case VISIT_ONLY_STRONG:
isolate_->global_handles()->IterateStrongRoots(v);
break;
case VISIT_ALL_IN_SCAVENGE:
@@ -5052,7 +5160,7 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
// The old generation is paged and needs at least one page for each space.
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
- max_old_generation_size_ =
+ initial_max_old_generation_size_ = max_old_generation_size_ =
Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
max_old_generation_size_);
@@ -5307,6 +5415,13 @@ void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
}
}
+bool Heap::ShouldOptimizeForLoadTime() {
+ return isolate()->rail_mode() == PERFORMANCE_LOAD &&
+ !AllocationLimitOvershotByLargeMargin() &&
+ MonotonicallyIncreasingTimeInMs() <
+ isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
+}
+
// This predicate is called when an old generation space cannot allocated from
// the free list and is about to add a new page. Returning false will cause a
// major GC. It happens when the old generation allocation limit is reached and
@@ -5318,6 +5433,8 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
if (ShouldOptimizeForMemoryUsage()) return false;
+ if (ShouldOptimizeForLoadTime()) return true;
+
if (incremental_marking()->NeedsFinalization()) {
return !AllocationLimitOvershotByLargeMargin();
}
@@ -5352,9 +5469,13 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (old_generation_space_available > new_space_->Capacity()) {
return IncrementalMarkingLimit::kNoLimit;
}
- // We are close to the allocation limit.
- // Choose between the hard and the soft limits.
- if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {
+ if (ShouldOptimizeForMemoryUsage()) {
+ return IncrementalMarkingLimit::kHardLimit;
+ }
+ if (ShouldOptimizeForLoadTime()) {
+ return IncrementalMarkingLimit::kNoLimit;
+ }
+ if (old_generation_space_available == 0) {
return IncrementalMarkingLimit::kHardLimit;
}
return IncrementalMarkingLimit::kSoftLimit;
@@ -5477,6 +5598,7 @@ bool Heap::SetUp() {
dead_object_stats_ = new ObjectStats(this);
}
scavenge_job_ = new ScavengeJob();
+ local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer();
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -5496,7 +5618,7 @@ bool Heap::SetUp() {
bool Heap::CreateHeapObjects() {
// Create initial maps.
if (!CreateInitialMaps()) return false;
- CreateApiObjects();
+ if (!CreateApiObjects()) return false;
// Create initial objects
CreateInitialObjects();
@@ -5552,16 +5674,7 @@ void Heap::NotifyDeserializationComplete() {
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
- embedder_heap_tracer_ = tracer;
-}
-
-void Heap::RegisterWrappersWithEmbedderHeapTracer() {
- DCHECK(UsingEmbedderHeapTracer());
- if (wrappers_to_trace_.empty()) {
- return;
- }
- embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
- wrappers_to_trace_.clear();
+ local_embedder_heap_tracer()->SetRemoteTracer(tracer);
}
void Heap::TracePossibleWrapper(JSObject* js_object) {
@@ -5571,17 +5684,12 @@ void Heap::TracePossibleWrapper(JSObject* js_object) {
js_object->GetInternalField(0) != undefined_value() &&
js_object->GetInternalField(1) != undefined_value()) {
DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
- wrappers_to_trace_.push_back(std::pair<void*, void*>(
+ local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
reinterpret_cast<void*>(js_object->GetInternalField(0)),
reinterpret_cast<void*>(js_object->GetInternalField(1))));
}
}
-bool Heap::RequiresImmediateWrapperProcessing() {
- const size_t kTooManyWrappers = 16000;
- return wrappers_to_trace_.size() > kTooManyWrappers;
-}
-
void Heap::RegisterExternallyReferencedObject(Object** object) {
HeapObject* heap_object = HeapObject::cast(*object);
DCHECK(Contains(heap_object));
@@ -5658,6 +5766,9 @@ void Heap::TearDown() {
dead_object_stats_ = nullptr;
}
+ delete local_embedder_heap_tracer_;
+ local_embedder_heap_tracer_ = nullptr;
+
delete scavenge_job_;
scavenge_job_ = nullptr;
@@ -5803,8 +5914,6 @@ void Heap::CompactWeakFixedArrays() {
WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
}
- } else if (o->IsScript()) {
- CompactWeakFixedArray(Script::cast(o)->shared_function_infos());
}
}
CompactWeakFixedArray(noscript_shared_function_infos());
@@ -5909,6 +6018,18 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
}
}
+bool Heap::HasRecordedSlot(HeapObject* object, Object** slot) {
+ if (InNewSpace(object)) {
+ return false;
+ }
+ Address slot_addr = reinterpret_cast<Address>(slot);
+ Page* page = Page::FromAddress(slot_addr);
+ DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ store_buffer()->MoveAllEntriesToRememberedSet();
+ return RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr) ||
+ RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr);
+}
+
void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
if (!page->InNewSpace()) {
@@ -6330,7 +6451,7 @@ void Heap::UpdateTotalGCTime(double duration) {
}
}
-void Heap::ExternalStringTable::CleanUp() {
+void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
int last = 0;
Isolate* isolate = heap_->isolate();
for (int i = 0; i < new_space_strings_.length(); ++i) {
@@ -6346,8 +6467,12 @@ void Heap::ExternalStringTable::CleanUp() {
}
new_space_strings_.Rewind(last);
new_space_strings_.Trim();
+}
- last = 0;
+void Heap::ExternalStringTable::CleanUpAll() {
+ CleanUpNewSpaceStrings();
+ int last = 0;
+ Isolate* isolate = heap_->isolate();
for (int i = 0; i < old_space_strings_.length(); ++i) {
if (old_space_strings_[i]->IsTheHole(isolate)) {
continue;
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 013cd9a8fe..d8034891fc 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -14,6 +14,7 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/base/atomic-utils.h"
+#include "src/debug/debug-interface.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
#include "src/list.h"
@@ -77,6 +78,7 @@ using v8::MemoryPressureLevel;
/* Context maps */ \
V(Map, native_context_map, NativeContextMap) \
V(Map, module_context_map, ModuleContextMap) \
+ V(Map, eval_context_map, EvalContextMap) \
V(Map, script_context_map, ScriptContextMap) \
V(Map, block_context_map, BlockContextMap) \
V(Map, catch_context_map, CatchContextMap) \
@@ -93,6 +95,7 @@ using v8::MemoryPressureLevel;
V(Map, external_map, ExternalMap) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
V(Map, module_info_map, ModuleInfoMap) \
+ V(Map, type_feedback_vector_map, TypeFeedbackVectorMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -168,6 +171,8 @@ using v8::MemoryPressureLevel;
V(PropertyCell, string_length_protector, StringLengthProtector) \
V(Cell, fast_array_iteration_protector, FastArrayIterationProtector) \
V(Cell, array_iterator_protector, ArrayIteratorProtector) \
+ V(PropertyCell, array_buffer_neutering_protector, \
+ ArrayBufferNeuteringProtector) \
/* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, hole_nan_value, HoleNanValue) \
@@ -190,7 +195,9 @@ using v8::MemoryPressureLevel;
ExperimentalExtraNativesSourceCache) \
/* Lists and dictionaries */ \
V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
- V(Object, symbol_registry, SymbolRegistry) \
+ V(NameDictionary, public_symbol_table, PublicSymbolTable) \
+ V(NameDictionary, api_symbol_table, ApiSymbolTable) \
+ V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
V(Object, script_list, ScriptList) \
V(UnseededNumberDictionary, code_stubs, CodeStubs) \
V(FixedArray, materialized_objects, MaterializedObjects) \
@@ -206,8 +213,10 @@ using v8::MemoryPressureLevel;
V(Object, weak_stack_trace_list, WeakStackTraceList) \
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
V(FixedArray, serialized_templates, SerializedTemplates) \
+ V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
/* Configured values */ \
V(TemplateList, message_listeners, MessageListeners) \
+ V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
/* Oddball maps */ \
@@ -221,7 +230,10 @@ using v8::MemoryPressureLevel;
V(Map, exception_map, ExceptionMap) \
V(Map, termination_exception_map, TerminationExceptionMap) \
V(Map, optimized_out_map, OptimizedOutMap) \
- V(Map, stale_register_map, StaleRegisterMap)
+ V(Map, stale_register_map, StaleRegisterMap) \
+ /* per-Isolate map for JSPromiseCapability. */ \
+ /* TODO(caitp): Make this a Struct */ \
+ V(Map, js_promise_capability_map, JSPromiseCapabilityMap)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
@@ -297,6 +309,7 @@ using v8::MemoryPressureLevel;
V(WithContextMap) \
V(BlockContextMap) \
V(ModuleContextMap) \
+ V(EvalContextMap) \
V(ScriptContextMap) \
V(UndefinedMap) \
V(TheHoleMap) \
@@ -325,6 +338,7 @@ class HeapObjectsFilter;
class HeapStats;
class HistogramTimer;
class Isolate;
+class LocalEmbedderHeapTracer;
class MemoryAllocator;
class MemoryReducer;
class ObjectIterator;
@@ -347,8 +361,6 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo };
-enum class ClearBlackArea { kYes, kNo };
-
enum class GarbageCollectionReason {
kUnknown = 0,
kAllocationFailure = 1,
@@ -554,12 +566,6 @@ class Heap {
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
- // Indicates whether live bytes adjustment is triggered
- // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
- // - or from within GC (CONCURRENT_TO_SWEEPER),
- // - or mutator code (CONCURRENT_TO_SWEEPER).
- enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
-
enum UpdateAllocationSiteMode { kGlobal, kCached };
// Taking this lock prevents the GC from entering a phase that relocates
@@ -607,7 +613,7 @@ class Heap {
static const int kMaxOldSpaceSizeMediumMemoryDevice =
256 * kPointerMultiplier;
static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeHugeMemoryDevice = 1024 * kPointerMultiplier;
// The executable size has to be a multiple of Page::kPageSize.
// Sizes are in MB.
@@ -673,6 +679,8 @@ class Heap {
// they are in new space.
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
+ static bool IsUnmodifiedHeapObject(Object** p);
+
// Zapping is needed for verify heap, and always done in debug builds.
static inline bool ShouldZapGarbage() {
#ifdef DEBUG
@@ -739,24 +747,22 @@ class Heap {
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in
// the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
- // pass ClearRecordedSlots::kNo. If the filler was created in a black area
- // we may want to clear the corresponding mark bits with ClearBlackArea::kYes,
- // which is the default. ClearBlackArea::kNo does not clear the mark bits.
- void CreateFillerObjectAt(
- Address addr, int size, ClearRecordedSlots mode,
- ClearBlackArea black_area_mode = ClearBlackArea::kYes);
+ // pass ClearRecordedSlots::kNo.
+ HeapObject* CreateFillerObjectAt(Address addr, int size,
+ ClearRecordedSlots mode);
bool CanMoveObjectStart(HeapObject* object);
+ static bool IsImmovable(HeapObject* object);
+
// Maintain consistency of live bytes during incremental marking.
- void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode);
+ void AdjustLiveBytes(HeapObject* object, int by);
// Trim the given array from the left. Note that this relocates the object
// start and hence is only valid if there is only a single reference to it.
FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
// Trim the given array from the right.
- template<Heap::InvocationMode mode>
void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
// Converts the given boolean condition to JavaScript boolean value.
@@ -787,6 +793,9 @@ class Heap {
Object* encountered_weak_collections() const {
return encountered_weak_collections_;
}
+ void VisitEncounteredWeakCollections(ObjectVisitor* visitor) {
+ visitor->VisitPointer(&encountered_weak_collections_);
+ }
void set_encountered_weak_cells(Object* weak_cell) {
encountered_weak_cells_ = weak_cell;
@@ -816,6 +825,7 @@ class Heap {
void PrintShortHeapStatistics();
inline HeapState gc_state() { return gc_state_; }
+ void SetGCState(HeapState state);
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
@@ -831,7 +841,7 @@ class Heap {
// Support for the API.
//
- void CreateApiObjects();
+ bool CreateApiObjects();
// Implements the corresponding V8 API function.
bool IdleNotification(double deadline_in_seconds);
@@ -841,6 +851,9 @@ class Heap {
bool is_isolate_locked);
void CheckMemoryPressure();
+ void SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
+ void* data);
+
double MonotonicallyIncreasingTimeInMs();
void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -874,6 +887,7 @@ class Heap {
inline int GetNextTemplateSerialNumber();
inline void SetSerializedTemplates(FixedArray* templates);
+ inline void SetSerializedGlobalProxySizes(FixedArray* sizes);
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@@ -948,6 +962,30 @@ class Heap {
return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
}
+ size_t HeapLimitForDebugging() {
+ const size_t kDebugHeapSizeFactor = 4;
+ size_t max_limit = std::numeric_limits<size_t>::max() / 4;
+ return Min(max_limit,
+ initial_max_old_generation_size_ * kDebugHeapSizeFactor);
+ }
+
+ void IncreaseHeapLimitForDebugging() {
+ max_old_generation_size_ =
+ Max(max_old_generation_size_, HeapLimitForDebugging());
+ }
+
+ void RestoreOriginalHeapLimit() {
+ // Do not set the limit lower than the live size + some slack.
+ size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
+ max_old_generation_size_ =
+ Min(max_old_generation_size_,
+ Max(initial_max_old_generation_size_, min_limit));
+ }
+
+ bool IsHeapLimitIncreasedForDebugging() {
+ return max_old_generation_size_ == HeapLimitForDebugging();
+ }
+
// ===========================================================================
// Initialization. ===========================================================
// ===========================================================================
@@ -1172,6 +1210,8 @@ class Heap {
void ClearRecordedSlot(HeapObject* object, Object** slot);
void ClearRecordedSlotRange(Address start, Address end);
+ bool HasRecordedSlot(HeapObject* object, Object** slot);
+
// ===========================================================================
// Incremental marking API. ==================================================
// ===========================================================================
@@ -1203,24 +1243,13 @@ class Heap {
// Embedder heap tracer support. =============================================
// ===========================================================================
+ LocalEmbedderHeapTracer* local_embedder_heap_tracer() {
+ return local_embedder_heap_tracer_;
+ }
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
- bool UsingEmbedderHeapTracer() { return embedder_heap_tracer() != nullptr; }
-
void TracePossibleWrapper(JSObject* js_object);
-
void RegisterExternallyReferencedObject(Object** object);
- void RegisterWrappersWithEmbedderHeapTracer();
-
- // In order to avoid running out of memory we force tracing wrappers if there
- // are too many of them.
- bool RequiresImmediateWrapperProcessing();
-
- EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
-
- size_t wrappers_to_trace() { return wrappers_to_trace_.size(); }
-
// ===========================================================================
// External string table API. ================================================
// ===========================================================================
@@ -1501,6 +1530,7 @@ class Heap {
GarbageCollectionReason gc_reason);
private:
+ class SkipStoreBufferScope;
class PretenuringScope;
// External strings table is a place where all external strings are
@@ -1511,11 +1541,13 @@ class Heap {
// Registers an external string.
inline void AddString(String* string);
- inline void Iterate(ObjectVisitor* v);
+ inline void IterateAll(ObjectVisitor* v);
+ inline void IterateNewSpaceStrings(ObjectVisitor* v);
- // Restores internal invariant and gets rid of collected strings.
- // Must be called after each Iterate() that modified the strings.
- void CleanUp();
+ // Restores internal invariant and gets rid of collected strings. Must be
+ // called after each Iterate*() that modified the strings.
+ void CleanUpAll();
+ void CleanUpNewSpaceStrings();
// Destroys all allocated memory.
void TearDown();
@@ -1632,10 +1664,6 @@ class Heap {
return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
}
- // Checks whether both, the internal marking deque, and the embedder provided
- // one are empty. Avoid in fast path as it potentially calls through the API.
- bool MarkingDequesAreEmpty();
-
void PreprocessStackTraces();
// Checks whether a global GC is necessary
@@ -1747,6 +1775,8 @@ class Heap {
void CollectGarbageOnMemoryPressure();
+ void InvokeOutOfMemoryCallback();
+
// Attempt to over-approximate the weak closure by marking object groups and
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
@@ -1840,6 +1870,14 @@ class Heap {
// Growing strategy. =========================================================
// ===========================================================================
+ // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
+ // This constant limits the effect of load RAIL mode on GC.
+ // The value is arbitrary and chosen as the largest load time observed in
+ // v8 browsing benchmarks.
+ static const int kMaxLoadTimeMs = 7000;
+
+ bool ShouldOptimizeForLoadTime();
+
// Decrease the allocation limit if the new limit based on the given
// parameters is lower than the current limit.
void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
@@ -2128,6 +2166,7 @@ class Heap {
size_t max_semi_space_size_;
size_t initial_semispace_size_;
size_t max_old_generation_size_;
+ size_t initial_max_old_generation_size_;
size_t initial_old_generation_size_;
bool old_generation_size_configured_;
size_t max_executable_size_;
@@ -2148,6 +2187,9 @@ class Heap {
// and reset by a mark-compact garbage collection.
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
+ v8::debug::OutOfMemoryCallback out_of_memory_callback_;
+ void* out_of_memory_callback_data_;
+
// For keeping track of context disposals.
int contexts_disposed_;
@@ -2338,8 +2380,7 @@ class Heap {
// The depth of HeapIterator nestings.
int heap_iterator_depth_;
- EmbedderHeapTracer* embedder_heap_tracer_;
- std::vector<std::pair<void*, void*>> wrappers_to_trace_;
+ LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
// Used for testing purposes.
bool force_oom_;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 4b1d7712a7..b0418686bf 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -32,6 +32,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
was_activated_(false),
black_allocation_(false),
finalize_marking_completed_(false),
+ trace_wrappers_toggle_(false),
request_type_(NONE),
new_generation_observer_(*this, kAllocatedThreshold),
old_generation_observer_(*this, kAllocatedThreshold) {}
@@ -129,27 +130,27 @@ static void MarkObjectGreyDoNotEnqueue(Object* obj) {
HeapObject* heap_obj = HeapObject::cast(obj);
MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(obj));
if (Marking::IsBlack(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
+ MemoryChunk::IncrementLiveBytes(heap_obj, -heap_obj->Size());
}
Marking::AnyToGrey(mark_bit);
}
}
-void IncrementalMarking::TransferMark(Heap* heap, Address old_start,
- Address new_start) {
+void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
+ HeapObject* to) {
// This is only used when resizing an object.
- DCHECK(MemoryChunk::FromAddress(old_start) ==
- MemoryChunk::FromAddress(new_start));
+ DCHECK(MemoryChunk::FromAddress(from->address()) ==
+ MemoryChunk::FromAddress(to->address()));
if (!heap->incremental_marking()->IsMarking()) return;
// If the mark doesn't move, we don't check the color of the object.
// It doesn't matter whether the object is black, since it hasn't changed
// size, so the adjustment to the live data count will be zero anyway.
- if (old_start == new_start) return;
+ if (from == to) return;
- MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(new_start);
- MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(old_start);
+ MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(to);
+ MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(from);
#ifdef DEBUG
Marking::ObjectColor old_color = Marking::Color(old_mark_bit);
@@ -161,8 +162,7 @@ void IncrementalMarking::TransferMark(Heap* heap, Address old_start,
return;
} else if (Marking::IsGrey(old_mark_bit)) {
Marking::GreyToWhite(old_mark_bit);
- heap->incremental_marking()->WhiteToGreyAndPush(
- HeapObject::FromAddress(new_start), new_mark_bit);
+ heap->incremental_marking()->WhiteToGreyAndPush(to, new_mark_bit);
heap->incremental_marking()->RestartIfNotMarking();
}
@@ -268,7 +268,7 @@ class IncrementalMarkingMarkingVisitor
MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
if (Marking::IsWhite(mark_bit)) {
Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
+ MemoryChunk::IncrementLiveBytes(heap_object, heap_object->Size());
return true;
}
return false;
@@ -524,10 +524,10 @@ void IncrementalMarking::StartMarking() {
state_ = MARKING;
- if (heap_->UsingEmbedderHeapTracer()) {
+ {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
- heap_->embedder_heap_tracer()->TracePrologue();
+ heap_->local_embedder_heap_tracer()->TracePrologue();
}
RecordWriteStub::Mode mode = is_compacting_
@@ -603,7 +603,7 @@ void IncrementalMarking::MarkObjectGroups() {
TRACE_GC(heap_->tracer(),
GCTracer::Scope::MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING);
- DCHECK(!heap_->UsingEmbedderHeapTracer());
+ DCHECK(!heap_->local_embedder_heap_tracer()->InUse());
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
@@ -736,7 +736,8 @@ void IncrementalMarking::FinalizeIncrementally() {
// 4) Remove weak cell with live values from the list of weak cells, they
// do not need processing during GC.
MarkRoots();
- if (!heap_->UsingEmbedderHeapTracer()) {
+ if (!heap_->local_embedder_heap_tracer()->InUse() &&
+ FLAG_object_grouping_in_incremental_finalization) {
MarkObjectGroups();
}
if (incremental_marking_finalization_rounds_ == 0) {
@@ -750,7 +751,8 @@ void IncrementalMarking::FinalizeIncrementally() {
abs(old_marking_deque_top -
heap_->mark_compact_collector()->marking_deque()->top());
- marking_progress += static_cast<int>(heap_->wrappers_to_trace());
+ marking_progress += static_cast<int>(
+ heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
@@ -806,8 +808,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
// them.
if (map_word.IsForwardingAddress()) {
HeapObject* dest = map_word.ToForwardingAddress();
- if (Marking::IsBlack(ObjectMarking::MarkBitFrom(dest->address())))
- continue;
+ if (Marking::IsBlack(ObjectMarking::MarkBitFrom(dest))) continue;
array[new_top] = dest;
new_top = ((new_top + 1) & mask);
DCHECK(new_top != marking_deque->bottom());
@@ -864,7 +865,7 @@ void IncrementalMarking::MarkBlack(HeapObject* obj, int size) {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
if (Marking::IsBlack(mark_bit)) return;
Marking::GreyToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj, size);
+ MemoryChunk::IncrementLiveBytes(obj, size);
}
intptr_t IncrementalMarking::ProcessMarkingDeque(
@@ -890,6 +891,11 @@ intptr_t IncrementalMarking::ProcessMarkingDeque(
VisitObject(map, obj, size);
bytes_processed += size - unscanned_bytes_of_large_object_;
}
+ // Report all found wrappers to the embedder. This is necessary as the
+ // embedder could potentially invalidate wrappers as soon as V8 is done
+ // with its incremental marking processing. Any cached wrappers could
+ // result in broken pointers at this point.
+ heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
return bytes_processed;
}
@@ -933,7 +939,7 @@ void IncrementalMarking::Hurry() {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(cache);
if (Marking::IsGrey(mark_bit)) {
Marking::GreyToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
+ MemoryChunk::IncrementLiveBytes(cache, cache->Size());
}
}
context = Context::cast(context)->next_context_link();
@@ -1026,15 +1032,40 @@ void IncrementalMarking::Epilogue() {
double IncrementalMarking::AdvanceIncrementalMarking(
double deadline_in_ms, CompletionAction completion_action,
ForceCompletionAction force_completion, StepOrigin step_origin) {
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
DCHECK(!IsStopped());
+ DCHECK_EQ(
+ 0, heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
double remaining_time_in_ms = 0.0;
intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
kStepSizeInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ const bool incremental_wrapper_tracing =
+ state_ == MARKING && FLAG_incremental_marking_wrappers &&
+ heap_->local_embedder_heap_tracer()->InUse();
do {
- Step(step_size_in_bytes, completion_action, force_completion, step_origin);
+ if (incremental_wrapper_tracing && trace_wrappers_toggle_) {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
+ const double wrapper_deadline =
+ heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
+ if (!heap_->local_embedder_heap_tracer()
+ ->ShouldFinalizeIncrementalMarking()) {
+ heap_->local_embedder_heap_tracer()->Trace(
+ wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::
+ DO_NOT_FORCE_COMPLETION));
+ }
+ } else {
+ Step(step_size_in_bytes, completion_action, force_completion,
+ step_origin);
+ }
+ trace_wrappers_toggle_ = !trace_wrappers_toggle_;
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
} while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
@@ -1109,6 +1140,10 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
bytes_marked_ahead_of_schedule_ -= bytes_to_process;
bytes_processed = bytes_to_process;
} else {
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
FORCE_COMPLETION, StepOrigin::kV8);
}
@@ -1120,10 +1155,6 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
CompletionAction action,
ForceCompletionAction completion,
StepOrigin step_origin) {
- HistogramTimerScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking());
- TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
- TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) {
@@ -1133,41 +1164,26 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
size_t bytes_processed = 0;
if (state_ == MARKING) {
- const bool incremental_wrapper_tracing =
- FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
- const bool process_wrappers =
- incremental_wrapper_tracing &&
- (heap_->RequiresImmediateWrapperProcessing() ||
- heap_->mark_compact_collector()->marking_deque()->IsEmpty());
- bool wrapper_work_left = incremental_wrapper_tracing;
- if (!process_wrappers) {
- bytes_processed = ProcessMarkingDeque(bytes_to_process);
- if (step_origin == StepOrigin::kTask) {
- bytes_marked_ahead_of_schedule_ += bytes_processed;
- }
- } else {
- const double wrapper_deadline =
- heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
- heap_->RegisterWrappersWithEmbedderHeapTracer();
- wrapper_work_left = heap_->embedder_heap_tracer()->AdvanceTracing(
- wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::
- DO_NOT_FORCE_COMPLETION));
+ bytes_processed = ProcessMarkingDeque(bytes_to_process);
+ if (step_origin == StepOrigin::kTask) {
+ bytes_marked_ahead_of_schedule_ += bytes_processed;
}
- if (heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
- !wrapper_work_left) {
- if (completion == FORCE_COMPLETION ||
- IsIdleMarkingDelayCounterLimitReached()) {
- if (!finalize_marking_completed_) {
- FinalizeMarking(action);
+ if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
+ if (heap_->local_embedder_heap_tracer()
+ ->ShouldFinalizeIncrementalMarking()) {
+ if (completion == FORCE_COMPLETION ||
+ IsIdleMarkingDelayCounterLimitReached()) {
+ if (!finalize_marking_completed_) {
+ FinalizeMarking(action);
+ } else {
+ MarkingComplete(action);
+ }
} else {
- MarkingComplete(action);
+ IncrementIdleMarkingDelayCounter();
}
} else {
- IncrementIdleMarkingDelayCounter();
+ heap_->local_embedder_heap_tracer()->NotifyV8MarkingDequeWasEmpty();
}
}
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 7ce0ae2379..5464f129a7 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -22,7 +22,7 @@ class PagedSpace;
enum class StepOrigin { kV8, kTask };
-class IncrementalMarking {
+class V8_EXPORT_PRIVATE IncrementalMarking {
public:
enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
@@ -151,8 +151,7 @@ class IncrementalMarking {
INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
- V8_EXPORT_PRIVATE void RecordWriteSlow(HeapObject* obj, Object** slot,
- Object* value);
+ void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
@@ -184,7 +183,7 @@ class IncrementalMarking {
static void MarkBlack(HeapObject* object, int size);
- static void TransferMark(Heap* heap, Address old_start, Address new_start);
+ static void TransferMark(Heap* heap, HeapObject* from, HeapObject* to);
// Returns true if the color transfer requires live bytes updating.
INLINE(static bool TransferColor(HeapObject* from, HeapObject* to,
@@ -298,6 +297,7 @@ class IncrementalMarking {
bool was_activated_;
bool black_allocation_;
bool finalize_marking_completed_;
+ bool trace_wrappers_toggle_;
GCRequestType request_type_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 784a76f8bd..1973753b0c 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -15,7 +15,7 @@ namespace internal {
void MarkCompactCollector::PushBlack(HeapObject* obj) {
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
if (marking_deque()->Push(obj)) {
- MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
+ MemoryChunk::IncrementLiveBytes(obj, obj->Size());
} else {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
Marking::BlackToGrey(mark_bit);
@@ -26,7 +26,7 @@ void MarkCompactCollector::PushBlack(HeapObject* obj) {
void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
if (!marking_deque()->Unshift(obj)) {
- MemoryChunk::IncrementLiveBytesFromGC(obj, -obj->Size());
+ MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
Marking::BlackToGrey(mark_bit);
}
@@ -47,7 +47,7 @@ void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
DCHECK(Marking::IsWhite(mark_bit));
DCHECK(ObjectMarking::MarkBitFrom(obj) == mark_bit);
Marking::WhiteToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
+ MemoryChunk::IncrementLiveBytes(obj, obj->Size());
}
@@ -195,12 +195,13 @@ HeapObject* LiveObjectIterator<T>::Next() {
object = black_object;
}
} else if ((T == kGreyObjects || T == kAllLiveObjects)) {
+ map = base::NoBarrierAtomicValue<Map*>::FromAddress(addr)->Value();
object = HeapObject::FromAddress(addr);
}
// We found a live object.
if (object != nullptr) {
- if (map != nullptr && map == heap()->one_pointer_filler_map()) {
+ if (map == heap()->one_pointer_filler_map()) {
// Black areas together with slack tracking may result in black one
// word filler objects. We filter these objects out in the iterator.
object = nullptr;
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 88e6983035..c931f520b7 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -53,7 +53,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
#ifdef DEBUG
state_(IDLE),
#endif
- marking_parity_(ODD_MARKING_PARITY),
was_marked_incrementally_(false),
evacuation_(false),
compacting_(false),
@@ -105,7 +104,9 @@ static void VerifyMarking(Heap* heap, Address bottom, Address top) {
Address next_object_must_be_here_or_later = bottom;
for (Address current = bottom; current < top;) {
object = HeapObject::FromAddress(current);
- if (MarkCompactCollector::IsMarked(object)) {
+ // One word fillers at the end of a black area can be grey.
+ if (MarkCompactCollector::IsMarked(object) &&
+ object->map() != heap->one_pointer_filler_map()) {
CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
@@ -133,7 +134,7 @@ static void VerifyMarking(NewSpace* space) {
// page->area_start() as start of range on all pages.
CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
- NewSpacePageRange range(space->bottom(), end);
+ PageRange range(space->bottom(), end);
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address limit = it != range.end() ? page->area_end() : end;
@@ -197,7 +198,7 @@ static void VerifyEvacuation(Page* page) {
static void VerifyEvacuation(NewSpace* space) {
VerifyEvacuationVisitor visitor;
- NewSpacePageRange range(space->bottom(), space->top());
+ PageRange range(space->bottom(), space->top());
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address current = page->area_start();
@@ -322,7 +323,6 @@ void MarkCompactCollector::CollectGarbage() {
Finish();
}
-
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
for (Page* p : *space) {
@@ -333,7 +333,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
- for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
+ for (Page* p : PageRange(space->bottom(), space->top())) {
CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes());
}
@@ -354,7 +354,6 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
}
}
-
void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
HeapObjectIterator code_iterator(heap()->code_space());
for (HeapObject* obj = code_iterator.Next(); obj != NULL;
@@ -779,10 +778,8 @@ void MarkCompactCollector::Prepare() {
DCHECK(!FLAG_never_compact || !FLAG_always_compact);
- if (sweeping_in_progress()) {
- // Instead of waiting we could also abort the sweeper threads here.
- EnsureSweepingCompleted();
- }
+ // Instead of waiting we could also abort the sweeper threads here.
+ EnsureSweepingCompleted();
if (heap()->incremental_marking()->IsSweeping()) {
heap()->incremental_marking()->Stop();
@@ -801,22 +798,14 @@ void MarkCompactCollector::Prepare() {
AbortWeakCells();
AbortTransitionArrays();
AbortCompaction();
- if (heap_->UsingEmbedderHeapTracer()) {
- heap_->embedder_heap_tracer()->AbortTracing();
- }
+ heap_->local_embedder_heap_tracer()->AbortTracing();
marking_deque()->Clear();
was_marked_incrementally_ = false;
}
if (!was_marked_incrementally_) {
- if (heap_->UsingEmbedderHeapTracer()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
- heap_->embedder_heap_tracer()->TracePrologue();
- }
- }
-
- if (heap_->UsingEmbedderHeapTracer()) {
- heap_->embedder_heap_tracer()->EnterFinalPause();
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
+ heap_->local_embedder_heap_tracer()->TracePrologue();
}
// Don't start compaction if we are in the middle of incremental
@@ -874,13 +863,6 @@ void MarkCompactCollector::Finish() {
}
heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
-
- if (marking_parity_ == EVEN_MARKING_PARITY) {
- marking_parity_ = ODD_MARKING_PARITY;
- } else {
- DCHECK(marking_parity_ == ODD_MARKING_PARITY);
- marking_parity_ = EVEN_MARKING_PARITY;
- }
}
@@ -914,6 +896,8 @@ void MarkCompactCollector::Finish() {
void CodeFlusher::ProcessJSFunctionCandidates() {
Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
+ Code* interpreter_entry_trampoline =
+ isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
Object* undefined = isolate_->heap()->undefined_value();
JSFunction* candidate = jsfunction_candidates_head_;
@@ -936,8 +920,13 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
if (!shared->OptimizedCodeMapIsCleared()) {
shared->ClearOptimizedCodeMap();
}
- shared->set_code(lazy_compile);
- candidate->set_code(lazy_compile);
+ if (shared->HasBytecodeArray()) {
+ shared->set_code(interpreter_entry_trampoline);
+ candidate->set_code(interpreter_entry_trampoline);
+ } else {
+ shared->set_code(lazy_compile);
+ candidate->set_code(lazy_compile);
+ }
} else {
DCHECK(Marking::IsBlack(code_mark));
candidate->set_code(code);
@@ -964,7 +953,8 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
-
+ Code* interpreter_entry_trampoline =
+ isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
SharedFunctionInfo* next_candidate;
while (candidate != NULL) {
@@ -983,7 +973,11 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
if (!candidate->OptimizedCodeMapIsCleared()) {
candidate->ClearOptimizedCodeMap();
}
- candidate->set_code(lazy_compile);
+ if (candidate->HasBytecodeArray()) {
+ candidate->set_code(interpreter_entry_trampoline);
+ } else {
+ candidate->set_code(lazy_compile);
+ }
}
Object** code_slot =
@@ -1083,6 +1077,39 @@ void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
}
}
+class StaticYoungGenerationMarkingVisitor
+ : public StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor> {
+ public:
+ static void Initialize(Heap* heap) {
+ StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor>::Initialize();
+ }
+
+ inline static void VisitPointer(Heap* heap, HeapObject* object, Object** p) {
+ Object* target = *p;
+ if (heap->InNewSpace(target)) {
+ if (MarkRecursively(heap, HeapObject::cast(target))) return;
+ PushOnMarkingDeque(heap, target);
+ }
+ }
+
+ protected:
+ inline static void PushOnMarkingDeque(Heap* heap, Object* obj) {
+ HeapObject* object = HeapObject::cast(obj);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
+ heap->mark_compact_collector()->MarkObject(object, mark_bit);
+ }
+
+ inline static bool MarkRecursively(Heap* heap, HeapObject* object) {
+ StackLimitCheck check(heap->isolate());
+ if (check.HasOverflowed()) return false;
+
+ MarkBit mark = ObjectMarking::MarkBitFrom(object);
+ if (Marking::IsBlackOrGrey(mark)) return true;
+ heap->mark_compact_collector()->SetMark(object, mark);
+ IterateBody(object->map(), object);
+ return true;
+ }
+};
class MarkCompactMarkingVisitor
: public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
@@ -1336,11 +1363,12 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
}
// Visitor class for marking heap roots.
+template <MarkCompactMode mode>
class RootMarkingVisitor : public ObjectVisitor {
public:
explicit RootMarkingVisitor(Heap* heap)
@@ -1362,6 +1390,10 @@ class RootMarkingVisitor : public ObjectVisitor {
HeapObject* object = HeapObject::cast(*p);
+ if (mode == MarkCompactMode::YOUNG_GENERATION &&
+ !collector_->heap()->InNewSpace(object))
+ return;
+
MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
if (Marking::IsBlackOrGrey(mark_bit)) return;
@@ -1369,14 +1401,21 @@ class RootMarkingVisitor : public ObjectVisitor {
// Mark the object.
collector_->SetMark(object, mark_bit);
- // Mark the map pointer and body, and push them on the marking stack.
- MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
- collector_->MarkObject(map, map_mark);
- MarkCompactMarkingVisitor::IterateBody(map, object);
+ switch (mode) {
+ case MarkCompactMode::FULL: {
+ // Mark the map pointer and body, and push them on the marking stack.
+ MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
+ collector_->MarkObject(map, map_mark);
+ MarkCompactMarkingVisitor::IterateBody(map, object);
+ } break;
+ case MarkCompactMode::YOUNG_GENERATION:
+ StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
+ break;
+ }
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
- collector_->EmptyMarkingDeque();
+ collector_->EmptyMarkingDeque<mode>();
}
MarkCompactCollector* collector_;
@@ -1921,7 +1960,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
NewSpace* space = heap()->new_space();
- for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
+ for (Page* page : PageRange(space->bottom(), space->top())) {
DiscoverGreyObjectsOnPage(page);
if (marking_deque()->IsFull()) return;
}
@@ -1946,8 +1985,8 @@ bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
return Marking::IsWhite(mark);
}
-
-void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
+void MarkCompactCollector::MarkStringTable(
+ RootMarkingVisitor<MarkCompactMode::FULL>* visitor) {
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
MarkBit string_table_mark = ObjectMarking::MarkBitFrom(string_table);
@@ -1957,7 +1996,7 @@ void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
}
// Explicitly mark the prefix.
string_table->IteratePrefix(visitor);
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
}
@@ -1966,8 +2005,8 @@ void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
SetMark(site, mark_bit);
}
-
-void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
+void MarkCompactCollector::MarkRoots(
+ RootMarkingVisitor<MarkCompactMode::FULL>* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
@@ -1977,8 +2016,8 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// There may be overflowed objects in the heap. Visit them now.
while (marking_deque()->overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+ RefillMarkingDeque<MarkCompactMode::FULL>();
+ EmptyMarkingDeque<MarkCompactMode::FULL>();
}
}
@@ -2018,6 +2057,7 @@ void MarkCompactCollector::MarkImplicitRefGroups(
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
+template <MarkCompactMode mode>
void MarkCompactCollector::EmptyMarkingDeque() {
while (!marking_deque()->IsEmpty()) {
HeapObject* object = marking_deque()->Pop();
@@ -2028,10 +2068,17 @@ void MarkCompactCollector::EmptyMarkingDeque() {
DCHECK(!Marking::IsWhite(ObjectMarking::MarkBitFrom(object)));
Map* map = object->map();
- MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
- MarkObject(map, map_mark);
-
- MarkCompactMarkingVisitor::IterateBody(map, object);
+ switch (mode) {
+ case MarkCompactMode::FULL: {
+ MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
+ MarkObject(map, map_mark);
+ MarkCompactMarkingVisitor::IterateBody(map, object);
+ } break;
+ case MarkCompactMode::YOUNG_GENERATION: {
+ DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+ StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
+ } break;
+ }
}
}
@@ -2041,6 +2088,7 @@ void MarkCompactCollector::EmptyMarkingDeque() {
// before sweeping completes. If sweeping completes, there are no remaining
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
+template <MarkCompactMode mode>
void MarkCompactCollector::RefillMarkingDeque() {
isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
DCHECK(marking_deque()->overflowed());
@@ -2048,18 +2096,17 @@ void MarkCompactCollector::RefillMarkingDeque() {
DiscoverGreyObjectsInNewSpace();
if (marking_deque()->IsFull()) return;
- DiscoverGreyObjectsInSpace(heap()->old_space());
- if (marking_deque()->IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap()->code_space());
- if (marking_deque()->IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap()->map_space());
- if (marking_deque()->IsFull()) return;
-
- LargeObjectIterator lo_it(heap()->lo_space());
- DiscoverGreyObjectsWithIterator(&lo_it);
- if (marking_deque()->IsFull()) return;
+ if (mode == MarkCompactMode::FULL) {
+ DiscoverGreyObjectsInSpace(heap()->old_space());
+ if (marking_deque()->IsFull()) return;
+ DiscoverGreyObjectsInSpace(heap()->code_space());
+ if (marking_deque()->IsFull()) return;
+ DiscoverGreyObjectsInSpace(heap()->map_space());
+ if (marking_deque()->IsFull()) return;
+ LargeObjectIterator lo_it(heap()->lo_space());
+ DiscoverGreyObjectsWithIterator(&lo_it);
+ if (marking_deque()->IsFull()) return;
+ }
marking_deque()->ClearOverflowed();
}
@@ -2069,12 +2116,14 @@ void MarkCompactCollector::RefillMarkingDeque() {
// stack. Before: the marking stack contains zero or more heap object
// pointers. After: the marking stack is empty and there are no overflowed
// objects in the heap.
+template <MarkCompactMode mode>
void MarkCompactCollector::ProcessMarkingDeque() {
- EmptyMarkingDeque();
+ EmptyMarkingDeque<mode>();
while (marking_deque()->overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+ RefillMarkingDeque<mode>();
+ EmptyMarkingDeque<mode>();
}
+ DCHECK(marking_deque()->IsEmpty());
}
// Mark all objects reachable (transitively) from objects on the marking
@@ -2084,23 +2133,33 @@ void MarkCompactCollector::ProcessEphemeralMarking(
DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
bool work_to_do = true;
while (work_to_do) {
- if (heap_->UsingEmbedderHeapTracer()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
- heap_->RegisterWrappersWithEmbedderHeapTracer();
- heap_->embedder_heap_tracer()->AdvanceTracing(
- 0, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
- }
if (!only_process_harmony_weak_collections) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_OBJECT_GROUPING);
- isolate()->global_handles()->IterateObjectGroups(
- visitor, &IsUnmarkedHeapObjectWithHeap);
- MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
+ if (heap_->local_embedder_heap_tracer()->InUse()) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
+ heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+ heap_->local_embedder_heap_tracer()->Trace(
+ 0,
+ EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+ } else {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_OBJECT_GROUPING);
+ isolate()->global_handles()->IterateObjectGroups(
+ visitor, &IsUnmarkedHeapObjectWithHeap);
+ MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
+ }
+ } else {
+ // TODO(mlippautz): We currently do not trace through blink when
+ // discovering new objects reachable from weak roots (that have been made
+ // strong). This is a limitation of not having a separate handle type
+ // that doesn't require zapping before this phase. See crbug.com/668060.
+ heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
}
ProcessWeakCollections();
work_to_do = !marking_deque()->IsEmpty();
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
}
+ CHECK(marking_deque()->IsEmpty());
+ CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
}
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
@@ -2114,7 +2173,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
if (!code->CanDeoptAt(it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code, visitor);
}
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
return;
}
}
@@ -2154,6 +2213,7 @@ void MarkingDeque::StartUsing() {
void MarkingDeque::StopUsing() {
base::LockGuard<base::Mutex> guard(&mutex_);
+ if (!in_use_) return;
DCHECK(IsEmpty());
DCHECK(!overflowed_);
top_ = bottom_ = mask_ = 0;
@@ -2267,6 +2327,95 @@ void MarkCompactCollector::RecordObjectStats() {
}
}
+SlotCallbackResult MarkCompactCollector::CheckAndMarkObject(
+ Heap* heap, Address slot_address) {
+ Object* object = *reinterpret_cast<Object**>(slot_address);
+ if (heap->InNewSpace(object)) {
+ // Marking happens before flipping the young generation, so the object
+ // has to be in ToSpace.
+ DCHECK(heap->InToSpace(object));
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
+ if (Marking::IsBlackOrGrey(mark_bit)) {
+ return KEEP_SLOT;
+ }
+ heap->mark_compact_collector()->SetMark(heap_object, mark_bit);
+ StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(),
+ heap_object);
+ return KEEP_SLOT;
+ }
+ return REMOVE_SLOT;
+}
+
+static bool IsUnmarkedObject(Heap* heap, Object** p) {
+ DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
+ return heap->InNewSpace(*p) &&
+ !Marking::IsBlack(ObjectMarking::MarkBitFrom(HeapObject::cast(*p)));
+}
+
+void MarkCompactCollector::MarkLiveObjectsInYoungGeneration() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
+
+ PostponeInterruptsScope postpone(isolate());
+
+ StaticYoungGenerationMarkingVisitor::Initialize(heap());
+ RootMarkingVisitor<MarkCompactMode::YOUNG_GENERATION> root_visitor(heap());
+
+ marking_deque()->StartUsing();
+
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &Heap::IsUnmodifiedHeapObject);
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
+ heap()->IterateRoots(&root_visitor, VISIT_ALL_IN_SCAVENGE);
+ ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS);
+ RememberedSet<OLD_TO_NEW>::Iterate(heap(), [this](Address addr) {
+ return CheckAndMarkObject(heap(), addr);
+ });
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ heap(), [this](SlotType type, Address host_addr, Address addr) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate(), type, addr, [this](Object** addr) {
+ return CheckAndMarkObject(heap(),
+ reinterpret_cast<Address>(addr));
+ });
+ });
+ ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
+ heap()->VisitEncounteredWeakCollections(&root_visitor);
+ ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+ }
+
+ if (is_code_flushing_enabled()) {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_CODE_FLUSH_CANDIDATES);
+ code_flusher()->IteratePointersToFromSpace(&root_visitor);
+ ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
+ isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnmarkedObject);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>(
+ &root_visitor);
+ ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+ }
+
+ marking_deque()->StopUsing();
+}
+
void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
// The recursive GC marker detects when it is nearing stack overflow,
@@ -2291,12 +2440,14 @@ void MarkCompactCollector::MarkLiveObjects() {
marking_deque()->StartUsing();
+ heap_->local_embedder_heap_tracer()->EnterFinalPause();
+
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
PrepareForCodeFlushing();
}
- RootMarkingVisitor root_visitor(heap());
+ RootMarkingVisitor<MarkCompactMode::FULL> root_visitor(heap());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
@@ -2328,7 +2479,7 @@ void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
heap()->isolate()->global_handles()->IdentifyWeakHandles(
&IsUnmarkedHeapObject);
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
}
// Then we mark the objects.
@@ -2336,7 +2487,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
}
// Repeat Harmony weak maps marking to mark unmarked objects reachable from
@@ -2347,9 +2498,9 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeralMarking(&root_visitor, true);
- if (heap_->UsingEmbedderHeapTracer()) {
+ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
- heap()->embedder_heap_tracer()->TraceEpilogue();
+ heap()->local_embedder_heap_tracer()->TraceEpilogue();
}
}
}
@@ -2371,8 +2522,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
ExternalStringTableCleaner external_visitor(heap(), nullptr);
- heap()->external_string_table_.Iterate(&external_visitor);
- heap()->external_string_table_.CleanUp();
+ heap()->external_string_table_.IterateAll(&external_visitor);
+ heap()->external_string_table_.CleanUpAll();
}
{
@@ -2578,8 +2729,8 @@ bool MarkCompactCollector::CompactTransitionArray(
// array disappeared during GC.
int trim = TransitionArray::Capacity(transitions) - transition_index;
if (trim > 0) {
- heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- transitions, trim * TransitionArray::kTransitionSize);
+ heap_->RightTrimFixedArray(transitions,
+ trim * TransitionArray::kTransitionSize);
transitions->SetNumberOfTransitions(transition_index);
}
return descriptors_owner_died;
@@ -2597,8 +2748,8 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
int number_of_descriptors = descriptors->number_of_descriptors_storage();
int to_trim = number_of_descriptors - number_of_own_descriptors;
if (to_trim > 0) {
- heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- descriptors, to_trim * DescriptorArray::kDescriptorSize);
+ heap_->RightTrimFixedArray(descriptors,
+ to_trim * DescriptorArray::kDescriptorSize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
@@ -2629,13 +2780,11 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
int to_trim = enum_cache->length() - live_enum;
if (to_trim <= 0) return;
- heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- descriptors->GetEnumCache(), to_trim);
+ heap_->RightTrimFixedArray(descriptors->GetEnumCache(), to_trim);
if (!descriptors->HasEnumIndicesCache()) return;
FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
- heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
- to_trim);
+ heap_->RightTrimFixedArray(enum_indices_cache, to_trim);
}
@@ -2890,7 +3039,7 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
void MarkCompactCollector::EvacuateNewSpacePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
- for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
+ for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
newspace_evacuation_candidates_.Add(p);
}
new_space->Flip();
@@ -3676,7 +3825,7 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
heap, heap->isolate()->cancelable_task_manager(), semaphore);
Address space_start = heap->new_space()->bottom();
Address space_end = heap->new_space()->top();
- for (Page* page : NewSpacePageRange(space_start, space_end)) {
+ for (Page* page : PageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index de182073ea..9952b7953d 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+enum class MarkCompactMode { FULL, YOUNG_GENERATION };
+
// Callback function, returns whether an object is alive. The heap size
// of the object is returned in size. It optionally updates the offset
// to the first live object in the page (only used for old and map objects).
@@ -29,17 +31,15 @@ typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
class CodeFlusher;
class MarkCompactCollector;
class MarkingVisitor;
+template <MarkCompactMode mode>
class RootMarkingVisitor;
class ObjectMarking : public AllStatic {
public:
- INLINE(static MarkBit MarkBitFrom(Address addr)) {
- MemoryChunk* p = MemoryChunk::FromAddress(addr);
- return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
- }
-
- INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
- return MarkBitFrom(reinterpret_cast<Address>(obj));
+ V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj) {
+ const Address address = obj->address();
+ MemoryChunk* p = MemoryChunk::FromAddress(address);
+ return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(address));
}
static Marking::ObjectColor Color(HeapObject* obj) {
@@ -416,6 +416,9 @@ class MarkCompactCollector {
static void Initialize();
+ static SlotCallbackResult CheckAndMarkObject(Heap* heap,
+ Address slot_address);
+
void SetUp();
void TearDown();
@@ -435,12 +438,6 @@ class MarkCompactCollector {
void AbortCompaction();
-#ifdef DEBUG
- // Checks whether performing mark-compact collection.
- bool in_use() { return state_ > PREPARE_GC; }
- bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
-#endif
-
// Determine type of object and emit deletion log event.
static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
@@ -458,15 +455,6 @@ class MarkCompactCollector {
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
-#ifdef VERIFY_HEAP
- void VerifyValidStoreAndSlotsBufferEntries();
- void VerifyMarkbitsAreClean();
- static void VerifyMarkbitsAreClean(PagedSpace* space);
- static void VerifyMarkbitsAreClean(NewSpace* space);
- void VerifyWeakEmbeddedObjectsInCode();
- void VerifyOmittedMapChecks();
-#endif
-
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
return Page::FromAddress(reinterpret_cast<Address>(host))
->ShouldSkipEvacuationSlotRecording();
@@ -493,8 +481,6 @@ class MarkCompactCollector {
bool is_compacting() const { return compacting_; }
- MarkingParity marking_parity() { return marking_parity_; }
-
// Ensures that sweeping is finished.
//
// Note: Can only be called safely from main thread.
@@ -525,6 +511,21 @@ class MarkCompactCollector {
Sweeper& sweeper() { return sweeper_; }
+#ifdef DEBUG
+ // Checks whether performing mark-compact collection.
+ bool in_use() { return state_ > PREPARE_GC; }
+ bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
+#endif
+
+#ifdef VERIFY_HEAP
+ void VerifyValidStoreAndSlotsBufferEntries();
+ void VerifyMarkbitsAreClean();
+ static void VerifyMarkbitsAreClean(PagedSpace* space);
+ static void VerifyMarkbitsAreClean(NewSpace* space);
+ void VerifyWeakEmbeddedObjectsInCode();
+ void VerifyOmittedMapChecks();
+#endif
+
private:
template <PageEvacuationMode mode>
class EvacuateNewSpacePageVisitor;
@@ -564,8 +565,10 @@ class MarkCompactCollector {
friend class MarkCompactMarkingVisitor;
friend class MarkingVisitor;
friend class RecordMigratedSlotVisitor;
+ template <MarkCompactMode mode>
friend class RootMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
+ friend class StaticYoungGenerationMarkingVisitor;
// Mark code objects that are active on the stack to prevent them
// from being flushed.
@@ -575,6 +578,8 @@ class MarkCompactCollector {
// Marking operations for objects reachable from roots.
void MarkLiveObjects();
+ // Mark the young generation.
+ void MarkLiveObjectsInYoungGeneration();
// Pushes a black object onto the marking stack and accounts for live bytes.
// Note that this assumes live bytes have not yet been counted.
@@ -593,14 +598,15 @@ class MarkCompactCollector {
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
// Mark the heap roots and all objects reachable from them.
- void MarkRoots(RootMarkingVisitor* visitor);
+ void MarkRoots(RootMarkingVisitor<MarkCompactMode::FULL>* visitor);
// Mark the string table specially. References to internalized strings from
// the string table are weak.
- void MarkStringTable(RootMarkingVisitor* visitor);
+ void MarkStringTable(RootMarkingVisitor<MarkCompactMode::FULL>* visitor);
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
+ template <MarkCompactMode mode>
void ProcessMarkingDeque();
// Mark objects reachable (transitively) from objects in the marking stack
@@ -624,11 +630,13 @@ class MarkCompactCollector {
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
// overflow flag will be set.
+ template <MarkCompactMode mode>
void EmptyMarkingDeque();
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
// flag on the marking stack.
+ template <MarkCompactMode mode>
void RefillMarkingDeque();
// Helper methods for refilling the marking stack by discovering grey objects
@@ -733,8 +741,6 @@ class MarkCompactCollector {
CollectorState state_;
#endif
- MarkingParity marking_parity_;
-
bool was_marked_incrementally_;
bool evacuation_;
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 2aed4c714a..3645547ef5 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -17,6 +17,8 @@ const int MemoryReducer::kLongDelayMs = 8000;
const int MemoryReducer::kShortDelayMs = 500;
const int MemoryReducer::kWatchdogDelayMs = 100000;
const int MemoryReducer::kMaxNumberOfGCs = 3;
+const double MemoryReducer::kCommittedMemoryFactor = 1.1;
+const size_t MemoryReducer::kCommittedMemoryDelta = 10 * MB;
MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
: CancelableTask(memory_reducer->heap()->isolate()),
@@ -47,6 +49,7 @@ void MemoryReducer::TimerTask::RunInternal() {
event.can_start_incremental_gc =
heap->incremental_marking()->IsStopped() &&
(heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
+ event.committed_memory = heap->CommittedOldGenerationMemory();
memory_reducer_->NotifyTimer(event);
}
@@ -128,17 +131,30 @@ bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
MemoryReducer::State MemoryReducer::Step(const State& state,
const Event& event) {
if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
- return State(kDone, 0, 0, state.last_gc_time_ms);
+ return State(kDone, 0, 0, state.last_gc_time_ms, 0);
}
switch (state.action) {
case kDone:
if (event.type == kTimer) {
return state;
+ } else if (event.type == kMarkCompact) {
+ if (event.committed_memory <
+ Max(static_cast<size_t>(state.committed_memory_at_last_run *
+ kCommittedMemoryFactor),
+ state.committed_memory_at_last_run + kCommittedMemoryDelta)) {
+ return state;
+ } else {
+ return State(kWait, 0, event.time_ms + kLongDelayMs,
+ event.type == kMarkCompact ? event.time_ms
+ : state.last_gc_time_ms,
+ 0);
+ }
} else {
- DCHECK(event.type == kPossibleGarbage || event.type == kMarkCompact);
+ DCHECK_EQ(kPossibleGarbage, event.type);
return State(
kWait, 0, event.time_ms + kLongDelayMs,
- event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms);
+ event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms,
+ 0);
}
case kWait:
switch (event.type) {
@@ -146,23 +162,24 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
return state;
case kTimer:
if (state.started_gcs >= kMaxNumberOfGCs) {
- return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms);
+ return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms,
+ event.committed_memory);
} else if (event.can_start_incremental_gc &&
(event.should_start_incremental_gc ||
WatchdogGC(state, event))) {
if (state.next_gc_start_ms <= event.time_ms) {
return State(kRun, state.started_gcs + 1, 0.0,
- state.last_gc_time_ms);
+ state.last_gc_time_ms, 0);
} else {
return state;
}
} else {
return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
- state.last_gc_time_ms);
+ state.last_gc_time_ms, 0);
}
case kMarkCompact:
return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
- event.time_ms);
+ event.time_ms, 0);
}
case kRun:
if (event.type != kMarkCompact) {
@@ -171,14 +188,15 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
if (state.started_gcs < kMaxNumberOfGCs &&
(event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
- event.time_ms);
+ event.time_ms, 0);
} else {
- return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms);
+ return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms,
+ event.committed_memory);
}
}
}
UNREACHABLE();
- return State(kDone, 0, 0, 0.0); // Make the compiler happy.
+ return State(kDone, 0, 0, 0.0, 0); // Make the compiler happy.
}
@@ -192,7 +210,7 @@ void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
}
-void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
+void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 0421987a3c..0f0ad6eaa0 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -86,15 +86,17 @@ class V8_EXPORT_PRIVATE MemoryReducer {
struct State {
State(Action action, int started_gcs, double next_gc_start_ms,
- double last_gc_time_ms)
+ double last_gc_time_ms, size_t committed_memory_at_last_run)
: action(action),
started_gcs(started_gcs),
next_gc_start_ms(next_gc_start_ms),
- last_gc_time_ms(last_gc_time_ms) {}
+ last_gc_time_ms(last_gc_time_ms),
+ committed_memory_at_last_run(committed_memory_at_last_run) {}
Action action;
int started_gcs;
double next_gc_start_ms;
double last_gc_time_ms;
+ size_t committed_memory_at_last_run;
};
enum EventType { kTimer, kMarkCompact, kPossibleGarbage };
@@ -102,6 +104,7 @@ class V8_EXPORT_PRIVATE MemoryReducer {
struct Event {
EventType type;
double time_ms;
+ size_t committed_memory;
bool next_gc_likely_to_collect_more;
bool should_start_incremental_gc;
bool can_start_incremental_gc;
@@ -109,7 +112,7 @@ class V8_EXPORT_PRIVATE MemoryReducer {
explicit MemoryReducer(Heap* heap)
: heap_(heap),
- state_(kDone, 0, 0.0, 0.0),
+ state_(kDone, 0, 0.0, 0.0, 0),
js_calls_counter_(0),
js_calls_sample_time_ms_(0.0) {}
// Callbacks.
@@ -126,6 +129,12 @@ class V8_EXPORT_PRIVATE MemoryReducer {
static const int kShortDelayMs;
static const int kWatchdogDelayMs;
static const int kMaxNumberOfGCs;
+ // The committed memory has to increase by at least this factor since the
+ // last run in order to trigger a new run after mark-compact.
+ static const double kCommittedMemoryFactor;
+ // The committed memory has to increase by at least this amount since the
+ // last run in order to trigger a new run after mark-compact.
+ static const size_t kCommittedMemoryDelta;
Heap* heap() { return heap_; }
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index ef5f65734e..9f534a20e4 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -441,10 +441,8 @@ void ObjectStatsCollector::RecordJSCollectionDetails(JSObject* obj) {
}
void ObjectStatsCollector::RecordScriptDetails(Script* obj) {
- Object* infos = WeakFixedArray::cast(obj->shared_function_infos());
- if (infos->IsWeakFixedArray())
- RecordFixedArrayHelper(obj, WeakFixedArray::cast(infos),
- SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
+ FixedArray* infos = FixedArray::cast(obj->shared_function_infos());
+ RecordFixedArrayHelper(obj, infos, SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
}
void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
@@ -546,13 +544,6 @@ void ObjectStatsCollector::RecordSharedFunctionInfoDetails(
if (!feedback_metadata->is_empty()) {
RecordFixedArrayHelper(sfi, feedback_metadata,
TYPE_FEEDBACK_METADATA_SUB_TYPE, 0);
- Object* names =
- feedback_metadata->get(TypeFeedbackMetadata::kNamesTableIndex);
- if (!names->IsSmi()) {
- UnseededNumberDictionary* names = UnseededNumberDictionary::cast(
- feedback_metadata->get(TypeFeedbackMetadata::kNamesTableIndex));
- RecordHashTableHelper(sfi, names, TYPE_FEEDBACK_METADATA_SUB_TYPE);
- }
}
if (!sfi->OptimizedCodeMapIsCleared()) {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index f3502568d6..d86406bf5f 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -60,7 +60,6 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
int>::Visit);
table_.Register(kVisitByteArray, &VisitByteArray);
- table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
table_.Register(
kVisitSharedFunctionInfo,
@@ -103,19 +102,11 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.template RegisterSpecializations<StructVisitor, kVisitStruct,
kVisitStructGeneric>();
-}
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitBytecodeArray(
- Map* map, HeapObject* object) {
- VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
- HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
- return reinterpret_cast<BytecodeArray*>(object)->BytecodeArraySize();
+ table_.Register(kVisitBytecodeArray, &UnreachableVisitor);
+ table_.Register(kVisitSharedFunctionInfo, &UnreachableVisitor);
}
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitShortcutCandidate,
@@ -157,10 +148,7 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
- table_.Register(
- kVisitBytecodeArray,
- &FixedBodyVisitor<StaticVisitor, BytecodeArray::MarkingBodyDescriptor,
- void>::Visit);
+ table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
@@ -286,7 +274,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
StaticVisitor::MarkObject(heap, target);
}
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
Heap* heap, RelocInfo* rinfo) {
@@ -298,6 +285,13 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
StaticVisitor::MarkObject(heap, target);
}
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
+ Map* map, HeapObject* object) {
+ FixedBodyVisitor<StaticVisitor, BytecodeArray::MarkingBodyDescriptor,
+ void>::Visit(map, object);
+ BytecodeArray::cast(object)->MakeOlder();
+}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
@@ -421,7 +415,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map,
Heap* heap = map->GetHeap();
Code* code = Code::cast(object);
if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
- code->MakeOlder(heap->mark_compact_collector()->marking_parity());
+ code->MakeOlder();
}
CodeBodyVisitor::Visit(map, object);
}
@@ -435,12 +429,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (shared->ic_age() != heap->global_ic_age()) {
shared->ResetForNewContext(heap->global_ic_age());
}
- if (FLAG_flush_optimized_code_cache) {
- if (!shared->OptimizedCodeMapIsCleared()) {
- // Always flush the optimized code map if requested by flag.
- shared->ClearOptimizedCodeMap();
- }
- }
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
if (IsFlushable(heap, shared)) {
@@ -600,8 +588,8 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
- // The function must not be a builtin.
- if (shared_info->IsBuiltin()) {
+ // The function must be user code.
+ if (!shared_info->IsUserJavaScript()) {
return false;
}
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index d4aa8b2f00..146aa58675 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -107,7 +107,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_ARGUMENTS_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
- case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
@@ -159,6 +158,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case JS_BOUND_FUNCTION_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 633c277eb0..e35e47c3aa 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_VISITING_H_
#include "src/allocation.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/layout-descriptor.h"
@@ -267,12 +268,17 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
// Although we are using the JSFunction body descriptor which does not
// visit the code entry, compiler wants it to be accessible.
// See JSFunction::BodyDescriptorImpl.
- INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
- Address entry_address)) {
+ inline static void VisitCodeEntry(Heap* heap, HeapObject* object,
+ Address entry_address) {
UNREACHABLE();
}
private:
+ inline static int UnreachableVisitor(Map* map, HeapObject* object) {
+ UNREACHABLE();
+ return 0;
+ }
+
INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
@@ -300,8 +306,6 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return FreeSpace::cast(object)->size();
}
- INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object));
-
class DataObjectVisitor {
public:
template <int object_size>
@@ -372,6 +376,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
protected:
INLINE(static void VisitMap(Map* map, HeapObject* object));
INLINE(static void VisitCode(Map* map, HeapObject* object));
+ INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
@@ -420,7 +425,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
private:
INLINE(static void TracePossibleWrapper(HeapObject* object)) {
- if (object->GetHeap()->UsingEmbedderHeapTracer()) {
+ if (object->GetHeap()->local_embedder_heap_tracer()->InUse()) {
DCHECK(object->IsJSObject());
object->GetHeap()->TracePossibleWrapper(JSObject::cast(object));
}
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index a625b13dbf..cf17a46821 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -17,7 +17,7 @@ enum PointerDirection { OLD_TO_OLD, OLD_TO_NEW };
// TODO(ulan): Investigate performance of de-templatizing this class.
template <PointerDirection direction>
-class RememberedSet {
+class RememberedSet : public AllStatic {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
@@ -31,6 +31,19 @@ class RememberedSet {
slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
}
+ // Given a page and a slot in that page, this function returns true if
+ // the remembered set contains the slot.
+ static bool Contains(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = GetSlotSet(chunk);
+ if (slot_set == nullptr) {
+ return false;
+ }
+ uintptr_t offset = slot_addr - chunk->address();
+ return slot_set[offset / Page::kPageSize].Contains(offset %
+ Page::kPageSize);
+ }
+
// Given a page and a slot in that page, this function removes the slot from
// the remembered set.
// If the slot was never added, then the function does nothing.
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index cad0e8af25..f2722e81de 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -139,7 +139,7 @@ class ScavengingVisitor : public StaticVisitorBase {
if (marks_handling == TRANSFER_MARKS) {
if (IncrementalMarking::TransferColor(source, target, size)) {
- MemoryChunk::IncrementLiveBytesFromGC(target, size);
+ MemoryChunk::IncrementLiveBytes(target, size);
}
}
}
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index da61052b8a..7612199c3c 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -66,6 +66,18 @@ class SlotSet : public Malloced {
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
+ // Returns true if the set contains the slot.
+ bool Contains(int slot_offset) {
+ int bucket_index, cell_index, bit_index;
+ SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+ base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
+ if (current_bucket == nullptr) {
+ return false;
+ }
+ return (current_bucket[cell_index].Value() & (1u << bit_index)) != 0;
+ }
+
+ // The slot offset specifies a slot at address page_start_ + slot_offset.
void Remove(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index f3f9215f3d..2079a80a0b 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -28,10 +28,14 @@ PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
return tmp;
}
-NewSpacePageRange::NewSpacePageRange(Address start, Address limit)
- : range_(Page::FromAddress(start),
- Page::FromAllocationAreaAddress(limit)->next_page()) {
- SemiSpace::AssertValidRange(start, limit);
+PageRange::PageRange(Address start, Address limit)
+ : begin_(Page::FromAddress(start)),
+ end_(Page::FromAllocationAreaAddress(limit)->next_page()) {
+#ifdef DEBUG
+ if (begin_->InNewSpace()) {
+ SemiSpace::AssertValidRange(start, limit);
+ }
+#endif // DEBUG
}
// -----------------------------------------------------------------------------
@@ -221,7 +225,7 @@ void Page::InitializeFreeListCategories() {
}
}
-void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
+void MemoryChunk::IncrementLiveBytes(HeapObject* object, int by) {
MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
}
@@ -244,18 +248,8 @@ void MemoryChunk::IncrementLiveBytes(int by) {
DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
}
-void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
- static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
- }
- chunk->IncrementLiveBytes(by);
-}
-
bool PagedSpace::Contains(Address addr) {
- Page* p = Page::FromAddress(addr);
- if (!Page::IsValid(p)) return false;
- return p->owner() == this;
+ return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
}
bool PagedSpace::Contains(Object* o) {
@@ -288,7 +282,7 @@ MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
uintptr_t offset = addr - chunk->address();
if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
- chunk = heap->lo_space()->FindPage(addr);
+ chunk = heap->lo_space()->FindPageThreadSafe(addr);
}
return chunk;
}
@@ -436,11 +430,10 @@ AllocationResult PagedSpace::AllocateRawUnaligned(
if (object == NULL) {
object = SlowAllocateRaw(size_in_bytes);
}
- if (object != NULL) {
- if (heap()->incremental_marking()->black_allocation()) {
- Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
- MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
- }
+ if (object != NULL && heap()->incremental_marking()->black_allocation()) {
+ Address start = object->address();
+ Address end = object->address() + size_in_bytes;
+ Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
}
}
@@ -479,12 +472,19 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
if (object == NULL) {
object = SlowAllocateRaw(allocation_size);
}
- if (object != NULL && filler_size != 0) {
- object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
- alignment);
- // Filler objects are initialized, so mark only the aligned object memory
- // as uninitialized.
- allocation_size = size_in_bytes;
+ if (object != NULL) {
+ if (heap()->incremental_marking()->black_allocation()) {
+ Address start = object->address();
+ Address end = object->address() + allocation_size;
+ Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
+ }
+ if (filler_size != 0) {
+ object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
+ alignment);
+ // Filler objects are initialized, so mark only the aligned object
+ // memory as uninitialized.
+ allocation_size = size_in_bytes;
+ }
}
}
@@ -596,6 +596,17 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
FATAL("Code page is too large.");
}
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
+
+ // Initialize the owner field for each contained page (except the first, which
+ // is initialized by MemoryChunk::Initialize).
+ for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
+ addr < chunk->area_end(); addr += Page::kPageSize) {
+ // Clear out kPageHeaderTag.
+ Memory::Address_at(addr) = 0;
+ }
+
return static_cast<LargePage*>(chunk);
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index e0e6d12fda..8d98520d43 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -335,7 +335,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
private:
// v8::Task overrides.
void Run() override {
- unmapper_->PerformFreeMemoryOnQueuedChunks();
+ unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
}
@@ -350,7 +350,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
concurrent_unmapping_tasks_active_++;
} else {
- PerformFreeMemoryOnQueuedChunks();
+ PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
}
@@ -364,6 +364,7 @@ bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
return waited;
}
+template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
MemoryChunk* chunk = nullptr;
// Regular chunks.
@@ -372,6 +373,14 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
allocator_->PerformFreeMemory(chunk);
if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
}
+ if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
+ // The previous loop uncommitted any pages marked as pooled and added them
+ // to the pooled list. In case of kReleasePooled we need to free them
+ // though.
+ while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
+ allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
+ }
+ }
// Non-regular chunks.
while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
allocator_->PerformFreeMemory(chunk);
@@ -382,7 +391,10 @@ void MemoryAllocator::Unmapper::TearDown() {
WaitUntilCompleted();
ReconsiderDelayedChunks();
CHECK(delayed_regular_chunks_.empty());
- PerformFreeMemoryOnQueuedChunks();
+ PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+ for (int i = 0; i < kNumberOfChunkQueues; i++) {
+ DCHECK(chunks_[i].empty());
+ }
}
void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
@@ -833,6 +845,16 @@ size_t Page::ShrinkToHighWaterMark() {
return unused;
}
+void Page::CreateBlackArea(Address start, Address end) {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ DCHECK_EQ(Page::FromAddress(start), this);
+ DCHECK_NE(start, end);
+ DCHECK_EQ(Page::FromAddress(end - 1), this);
+ markbits()->SetRange(AddressToMarkbitIndex(start),
+ AddressToMarkbitIndex(end));
+ IncrementLiveBytes(static_cast<int>(end - start));
+}
+
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
Address start_free) {
// We do not allow partial shrink for code.
@@ -899,6 +921,11 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
break;
+ case kAlreadyPooled:
+ // Pooled pages cannot be touched anymore as their memory is uncommitted.
+ FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize),
+ Executability::NOT_EXECUTABLE);
+ break;
case kPooledAndQueue:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
@@ -909,13 +936,14 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
// The chunks added to this queue will be freed by a concurrent thread.
unmapper()->AddMemoryChunkSafe(chunk);
break;
- default:
- UNREACHABLE();
}
}
template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
+template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
+ MemoryChunk* chunk);
+
template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
MemoryChunk* chunk);
@@ -1287,25 +1315,6 @@ bool PagedSpace::ContainsSlow(Address addr) {
return false;
}
-
-Object* PagedSpace::FindObject(Address addr) {
- // Note: this function can only be called on iterable spaces.
- DCHECK(!heap()->mark_compact_collector()->in_use());
-
- if (!Contains(addr)) return Smi::kZero; // Signaling not found.
-
- Page* p = Page::FromAddress(addr);
- HeapObjectIterator it(p);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- Address cur = obj->address();
- Address next = cur + obj->Size();
- if ((cur <= addr) && (addr < next)) return obj;
- }
-
- UNREACHABLE();
- return Smi::kZero;
-}
-
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -1361,10 +1370,7 @@ void PagedSpace::SetAllocationInfo(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != nullptr && top != limit &&
heap()->incremental_marking()->black_allocation()) {
- Page* page = Page::FromAllocationAreaAddress(top);
- page->markbits()->SetRange(page->AddressToMarkbitIndex(top),
- page->AddressToMarkbitIndex(limit));
- page->IncrementLiveBytes(static_cast<int>(limit - top));
+ Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
}
}
@@ -1373,10 +1379,8 @@ void PagedSpace::MarkAllocationInfoBlack() {
Address current_top = top();
Address current_limit = limit();
if (current_top != nullptr && current_top != current_limit) {
- Page* page = Page::FromAllocationAreaAddress(current_top);
- page->markbits()->SetRange(page->AddressToMarkbitIndex(current_top),
- page->AddressToMarkbitIndex(current_limit));
- page->IncrementLiveBytes(static_cast<int>(current_limit - current_top));
+ Page::FromAllocationAreaAddress(current_top)
+ ->CreateBlackArea(current_top, current_limit);
}
}
@@ -2095,7 +2099,7 @@ void SemiSpace::set_age_mark(Address mark) {
DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark;
// Mark all pages up to the one containing mark.
- for (Page* p : NewSpacePageRange(space_start(), mark)) {
+ for (Page* p : PageRange(space_start(), mark)) {
p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
}
}
@@ -2616,7 +2620,7 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
- owner_->Allocate(static_cast<int>(new_node_size));
+ owner_->AccountAllocatedBytes(new_node_size);
if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just
@@ -2806,7 +2810,6 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
}
}
-
HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
@@ -2820,7 +2823,6 @@ HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
return nullptr;
}
-
HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
@@ -2877,9 +2879,7 @@ void PagedSpace::ReportStatistics() {
", available: %" V8PRIdPTR ", %%%d\n",
Capacity(), Waste(), Available(), pct);
- if (heap()->mark_compact_collector()->sweeping_in_progress()) {
- heap()->mark_compact_collector()->EnsureSweepingCompleted();
- }
+ heap()->mark_compact_collector()->EnsureSweepingCompleted();
ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
@@ -2994,7 +2994,6 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
InsertChunkMapEntries(page);
HeapObject* object = page->GetObject();
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
if (Heap::ShouldZapGarbage()) {
// Make the object consistent so the heap can be verified in OldSpaceStep.
@@ -3010,7 +3009,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
if (heap()->incremental_marking()->black_allocation()) {
Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
- MemoryChunk::IncrementLiveBytesFromGC(object, object_size);
+ MemoryChunk::IncrementLiveBytes(object, object_size);
}
return object;
}
@@ -3033,6 +3032,10 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Smi::kZero; // Signaling not found.
}
+LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
+ base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
+ return FindPage(a);
+}
LargePage* LargeObjectSpace::FindPage(Address a) {
uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
@@ -3069,6 +3072,9 @@ void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
MemoryChunk::kAlignment;
+ // There may be concurrent access on the chunk map. We have to take the lock
+ // here.
+ base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
for (uintptr_t key = start; key <= limit; key++) {
base::HashMap::Entry* entry = chunk_map_.InsertNew(
reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index f5701adc69..48551fa264 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -317,8 +317,11 @@ class MemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1;
static const intptr_t kSizeOffset = 0;
-
- static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
+ static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
+ static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
+ static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
+ static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
+ static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
static const size_t kMinHeaderSize =
kSizeOffset + kSizetSize // size_t size
@@ -367,8 +370,7 @@ class MemoryChunk {
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
- static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
- static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
+ static inline void IncrementLiveBytes(HeapObject* object, int by);
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
@@ -553,10 +555,11 @@ class MemoryChunk {
void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
Space* owner() const {
- if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
- kPageHeaderTag) {
- return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
- kPageHeaderTag);
+ intptr_t owner_value = base::NoBarrierAtomicValue<intptr_t>::FromAddress(
+ const_cast<Address*>(&owner_))
+ ->Value();
+ if ((owner_value & kPageHeaderTagMask) == kPageHeaderTag) {
+ return reinterpret_cast<Space*>(owner_value - kPageHeaderTag);
} else {
return nullptr;
}
@@ -769,6 +772,8 @@ class Page : public MemoryChunk {
size_t ShrinkToHighWaterMark();
+ void CreateBlackArea(Address start, Address end);
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -1092,7 +1097,7 @@ class SkipList {
// A space acquires chunks of memory from the operating system. The memory
// allocator allocates and deallocates pages for the paged heap spaces and large
// pages for large object space.
-class MemoryAllocator {
+class V8_EXPORT_PRIVATE MemoryAllocator {
public:
// Unmapper takes care of concurrently unmapping and uncommitting memory
// chunks.
@@ -1144,6 +1149,11 @@ class MemoryAllocator {
kNumberOfChunkQueues,
};
+ enum class FreeMode {
+ kUncommitPooled,
+ kReleasePooled,
+ };
+
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
base::LockGuard<base::Mutex> guard(&mutex_);
@@ -1165,6 +1175,7 @@ class MemoryAllocator {
}
void ReconsiderDelayedChunks();
+ template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
base::Mutex mutex_;
@@ -1187,6 +1198,7 @@ class MemoryAllocator {
enum FreeMode {
kFull,
+ kAlreadyPooled,
kPreFreeAndQueue,
kPooledAndQueue,
};
@@ -1376,6 +1388,15 @@ class MemoryAllocator {
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
+extern template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+ size_t size, PagedSpace* owner, Executability executable);
+extern template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+extern template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
// -----------------------------------------------------------------------------
// Interface for heap object iterator to be implemented by all object space
@@ -1419,6 +1440,8 @@ class PageRange {
typedef PageIterator iterator;
PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
+ inline PageRange(Address start, Address limit);
+
iterator begin() { return iterator(begin_); }
iterator end() { return iterator(end_); }
@@ -1641,7 +1664,7 @@ class AllocationStats BASE_EMBEDDED {
// words in size.
// At least 16384 words (huge): This list is for objects of 2048 words or
// larger. Empty pages are also added to this list.
-class FreeList {
+class V8_EXPORT_PRIVATE FreeList {
public:
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
@@ -1878,18 +1901,7 @@ class LocalAllocationBuffer {
AllocationInfo allocation_info_;
};
-class NewSpacePageRange {
- public:
- typedef PageRange::iterator iterator;
- inline NewSpacePageRange(Address start, Address limit);
- iterator begin() { return range_.begin(); }
- iterator end() { return range_.end(); }
-
- private:
- PageRange range_;
-};
-
-class PagedSpace : public Space {
+class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
public:
typedef PageIterator iterator;
@@ -1915,12 +1927,6 @@ class PagedSpace : public Space {
inline bool Contains(Object* o);
bool ContainsSlow(Address addr);
- // Given an address occupied by a live object, return that object if it is
- // in this space, or a Smi if it is not. The implementation iterates over
- // objects in the page containing the address, the cost is linear in the
- // number of objects in the page. It may be slow.
- Object* FindObject(Address addr);
-
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
void RepairFreeListsAfterDeserialization();
@@ -2034,7 +2040,9 @@ class PagedSpace : public Space {
void MarkAllocationInfoBlack();
- void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
+ void AccountAllocatedBytes(size_t bytes) {
+ accounting_stats_.AllocateBytes(bytes);
+ }
void IncreaseCapacity(size_t bytes);
@@ -2820,6 +2828,9 @@ class LargeObjectSpace : public Space {
// The function iterates through all objects in this space, may be slow.
Object* FindObject(Address a);
+ // Takes the chunk_map_mutex_ and calls FindPage after that.
+ LargePage* FindPageThreadSafe(Address a);
+
// Finds a large object page containing the given address, returns NULL
// if such a page doesn't exist.
LargePage* FindPage(Address a);
@@ -2870,6 +2881,9 @@ class LargeObjectSpace : public Space {
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
+ // The chunk_map_mutex_ has to be used when the chunk map is accessed
+ // concurrently.
+ base::Mutex chunk_map_mutex_;
// Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
base::HashMap chunk_map_;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 974b85e1c8..94a8ca81b7 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -16,13 +16,19 @@ namespace v8 {
namespace internal {
StoreBuffer::StoreBuffer(Heap* heap)
- : heap_(heap), top_(nullptr), current_(0), virtual_memory_(nullptr) {
+ : heap_(heap),
+ top_(nullptr),
+ current_(0),
+ mode_(NOT_IN_GC),
+ virtual_memory_(nullptr) {
for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr;
limit_[i] = nullptr;
lazy_top_[i] = nullptr;
}
task_running_ = false;
+ insertion_callback = &InsertDuringRuntime;
+ deletion_callback = &DeleteDuringRuntime;
}
void StoreBuffer::SetUp() {
@@ -85,7 +91,7 @@ void StoreBuffer::FlipStoreBuffers() {
current_ = other;
top_ = start_[current_];
- if (!task_running_) {
+ if (!task_running_ && FLAG_concurrent_sweeping) {
task_running_ = true;
Task* task = new Task(heap_->isolate(), this);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
@@ -137,29 +143,5 @@ void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
task_running_ = false;
}
-void StoreBuffer::DeleteEntry(Address start, Address end) {
- // Deletions coming from the GC are directly deleted from the remembered
- // set. Deletions coming from the runtime are added to the store buffer
- // to allow concurrent processing.
- if (heap_->gc_state() == Heap::NOT_IN_GC) {
- if (top_ + sizeof(Address) * 2 > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = MarkDeletionAddress(start);
- top_++;
- *top_ = end;
- top_++;
- } else {
- // In GC the store buffer has to be empty at any time.
- DCHECK(Empty());
- Page* page = Page::FromAddress(start);
- if (end) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
- } else {
- RememberedSet<OLD_TO_NEW>::Remove(page, start);
- }
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 09faf4dcbd..be46cb3242 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -24,7 +24,9 @@ namespace internal {
// slots are moved to the remembered set.
class StoreBuffer {
public:
- static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
+ enum StoreBufferMode { IN_GC, NOT_IN_GC };
+
+ static const int kStoreBufferSize = 1 << (11 + kPointerSizeLog2);
static const int kStoreBufferMask = kStoreBufferSize - 1;
static const int kStoreBuffers = 2;
static const intptr_t kDeletionTag = 1;
@@ -63,22 +65,77 @@ class StoreBuffer {
// If we only want to delete a single slot, end should be set to null which
// will be written into the second field. When processing the store buffer
// the more efficient Remove method will be called in this case.
- void DeleteEntry(Address start, Address end = nullptr);
+ void DeleteEntry(Address start, Address end = nullptr) {
+ // Deletions coming from the GC are directly deleted from the remembered
+ // set. Deletions coming from the runtime are added to the store buffer
+ // to allow concurrent processing.
+ deletion_callback(this, start, end);
+ }
+
+ static void DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address start, Address end) {
+ // In GC the store buffer has to be empty at any time.
+ DCHECK(store_buffer->Empty());
+ DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+ Page* page = Page::FromAddress(start);
+ if (end) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Remove(page, start);
+ }
+ }
+
+ static void DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
+ Address end) {
+ DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+ store_buffer->InsertDeletionIntoStoreBuffer(start, end);
+ }
+
+ void InsertDeletionIntoStoreBuffer(Address start, Address end) {
+ if (top_ + sizeof(Address) * 2 > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = MarkDeletionAddress(start);
+ top_++;
+ *top_ = end;
+ top_++;
+ }
+
+ static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address slot) {
+ DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ }
+
+ static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
+ DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+ store_buffer->InsertIntoStoreBuffer(slot);
+ }
+
+ void InsertIntoStoreBuffer(Address slot) {
+ if (top_ + sizeof(Address) > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = slot;
+ top_++;
+ }
void InsertEntry(Address slot) {
// Insertions coming from the GC are directly inserted into the remembered
// set. Insertions coming from the runtime are added to the store buffer to
// allow concurrent processing.
- if (heap_->gc_state() == Heap::NOT_IN_GC) {
- if (top_ + sizeof(Address) > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = slot;
- top_++;
+ insertion_callback(this, slot);
+ }
+
+ void SetMode(StoreBufferMode mode) {
+ mode_ = mode;
+ if (mode == NOT_IN_GC) {
+ insertion_callback = &InsertDuringRuntime;
+ deletion_callback = &DeleteDuringRuntime;
} else {
- // In GC the store buffer has to be empty at any time.
- DCHECK(Empty());
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ insertion_callback = &InsertDuringGarbageCollection;
+ deletion_callback = &DeleteDuringGarbageCollection;
}
}
@@ -95,6 +152,8 @@ class StoreBuffer {
return top_ == start_[current_];
}
+ Heap* heap() { return heap_; }
+
private:
// There are two store buffers. If one store buffer fills up, the main thread
// publishes the top pointer of the store buffer that needs processing in its
@@ -119,6 +178,8 @@ class StoreBuffer {
DISALLOW_COPY_AND_ASSIGN(Task);
};
+ StoreBufferMode mode() const { return mode_; }
+
void FlipStoreBuffers();
Heap* heap_;
@@ -142,7 +203,17 @@ class StoreBuffer {
// Points to the current buffer in use.
int current_;
+ // During GC, entries are directly added to the remembered set without
+ // going through the store buffer. This is signaled by a special
+ // IN_GC mode.
+ StoreBufferMode mode_;
+
base::VirtualMemory* virtual_memory_;
+
+ // Callbacks are more efficient than reading out the gc state for every
+ // store buffer operation.
+ std::function<void(StoreBuffer*, Address)> insertion_callback;
+ std::function<void(StoreBuffer*, Address, Address)> deletion_callback;
};
} // namespace internal