aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/heap.cc')
-rw-r--r--deps/v8/src/heap/heap.cc703
1 files changed, 173 insertions, 530 deletions
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 2ec30635be..b509d21142 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -33,7 +33,6 @@
#include "src/heap/heap-controller.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking.h"
-#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-reducer.h"
@@ -108,12 +107,7 @@ bool Heap::GCCallbackTuple::operator==(
}
Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
- const Heap::GCCallbackTuple& other) {
- callback = other.callback;
- gc_type = other.gc_type;
- data = other.data;
- return *this;
-}
+ const Heap::GCCallbackTuple& other) = default;
struct Heap::StrongRootsList {
Object** start;
@@ -135,112 +129,17 @@ class IdleScavengeObserver : public AllocationObserver {
};
Heap::Heap()
- : external_memory_(0),
- external_memory_limit_(kExternalAllocationSoftLimit),
- external_memory_at_last_mark_compact_(0),
- external_memory_concurrently_freed_(0),
- isolate_(nullptr),
- code_range_size_(0),
- // semispace_size_ should be a power of 2 and old_generation_size_ should
- // be a multiple of Page::kPageSize.
- max_semi_space_size_(8 * (kPointerSize / 4) * MB),
- initial_semispace_size_(kMinSemiSpaceSizeInKB * KB),
- max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
- initial_max_old_generation_size_(max_old_generation_size_),
+ : initial_max_old_generation_size_(max_old_generation_size_),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
- old_generation_size_configured_(false),
- // Variables set based on semispace_size_ and old_generation_size_ in
- // ConfigureHeap.
- // Will be 4 * reserved_semispace_size_ to ensure that young
- // generation can be aligned to its size.
- maximum_committed_(0),
- survived_since_last_expansion_(0),
- survived_last_scavenge_(0),
- always_allocate_scope_count_(0),
memory_pressure_level_(MemoryPressureLevel::kNone),
- contexts_disposed_(0),
- number_of_disposed_maps_(0),
- new_space_(nullptr),
- old_space_(nullptr),
- code_space_(nullptr),
- map_space_(nullptr),
- lo_space_(nullptr),
- new_lo_space_(nullptr),
- read_only_space_(nullptr),
- write_protect_code_memory_(false),
- code_space_memory_modification_scope_depth_(0),
- gc_state_(NOT_IN_GC),
- gc_post_processing_depth_(0),
- allocations_count_(0),
- raw_allocations_hash_(0),
- stress_marking_observer_(nullptr),
- stress_scavenge_observer_(nullptr),
- allocation_step_in_progress_(false),
- max_marking_limit_reached_(0.0),
- ms_count_(0),
- gc_count_(0),
- consecutive_ineffective_mark_compacts_(0),
- mmap_region_base_(0),
- remembered_unmapped_pages_index_(0),
old_generation_allocation_limit_(initial_old_generation_size_),
- inline_allocation_disabled_(false),
- tracer_(nullptr),
- promoted_objects_size_(0),
- promotion_ratio_(0),
- semi_space_copied_object_size_(0),
- previous_semi_space_copied_object_size_(0),
- semi_space_copied_rate_(0),
- nodes_died_in_new_space_(0),
- nodes_copied_in_new_space_(0),
- nodes_promoted_(0),
- maximum_size_scavenges_(0),
- last_idle_notification_time_(0.0),
- last_gc_time_(0.0),
- mark_compact_collector_(nullptr),
- minor_mark_compact_collector_(nullptr),
- array_buffer_collector_(nullptr),
- memory_allocator_(nullptr),
- store_buffer_(nullptr),
- incremental_marking_(nullptr),
- concurrent_marking_(nullptr),
- gc_idle_time_handler_(nullptr),
- memory_reducer_(nullptr),
- live_object_stats_(nullptr),
- dead_object_stats_(nullptr),
- scavenge_job_(nullptr),
- parallel_scavenge_semaphore_(0),
- idle_scavenge_observer_(nullptr),
- new_space_allocation_counter_(0),
- old_generation_allocation_counter_at_last_gc_(0),
- old_generation_size_at_last_gc_(0),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
- is_marking_flag_(false),
- ring_buffer_full_(false),
- ring_buffer_end_(0),
- configured_(false),
- current_gc_flags_(Heap::kNoGCFlags),
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
- external_string_table_(this),
- gc_callbacks_depth_(0),
- deserialization_complete_(false),
- strong_roots_list_(nullptr),
- heap_iterator_depth_(0),
- local_embedder_heap_tracer_(nullptr),
- fast_promotion_mode_(false),
- force_oom_(false),
- delay_sweeper_tasks_for_testing_(false),
- pending_layout_change_object_(nullptr),
- unprotected_memory_chunks_registry_enabled_(false)
-#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- ,
- allocation_timeout_(0)
-#endif // V8_ENABLE_ALLOCATION_TIMEOUT
-{
+ external_string_table_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
- memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(nullptr);
set_allocation_sites_list(Smi::kZero);
// Put a dummy entry in the remembered pages so we can find the list the
@@ -259,8 +158,8 @@ size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
old_space_physical_memory_factor *
kPointerMultiplier);
- return Max(Min(computed_size, HeapController::kMaxHeapSize),
- HeapController::kMinHeapSize);
+ return Max(Min(computed_size, HeapController::kMaxSize),
+ HeapController::kMinSize);
}
size_t Heap::Capacity() {
@@ -477,6 +376,8 @@ void Heap::PrintShortHeapStatistics() {
CommittedMemoryOfHeapAndUnmapper() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
external_memory_ / KB);
+ PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
+ backing_store_bytes_ / KB);
PrintIsolate(isolate_, "External memory global %zu KB\n",
external_memory_callback_() / KB);
PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
@@ -531,7 +432,7 @@ bool Heap::IsRetainingPathTarget(HeapObject* object,
MaybeObject* object_to_check = HeapObjectReference::Weak(object);
for (int i = 0; i < length; i++) {
MaybeObject* target = targets->Get(i);
- DCHECK(target->IsWeakOrClearedHeapObject());
+ DCHECK(target->IsWeakOrCleared());
if (target == object_to_check) {
DCHECK(retaining_path_target_option_.count(i));
*option = retaining_path_target_option_[i];
@@ -705,7 +606,7 @@ const char* Heap::GetSpaceName(int idx) {
}
void Heap::SetRootCodeStubs(SimpleNumberDictionary* value) {
- roots_[kCodeStubsRootIndex] = value;
+ roots_[RootIndex::kCodeStubs] = value;
}
void Heap::RepairFreeListsAfterDeserialization() {
@@ -1219,13 +1120,16 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
isolate()->ClearSerializerData();
- set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
+ set_current_gc_flags(kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
+ const v8::GCCallbackFlags callback_flags =
+ gc_reason == GarbageCollectionReason::kLowMemoryNotification
+ ? v8::kGCCallbackFlagForced
+ : v8::kGCCallbackFlagCollectAllAvailableGarbage;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_SPACE, gc_reason,
- v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
+ if (!CollectGarbage(OLD_SPACE, gc_reason, callback_flags) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
}
@@ -1234,7 +1138,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
UncommitFromSpace();
- memory_allocator()->unmapper()->EnsureUnmappingCompleted();
+ EagerlyFreeExternalMemory();
if (FLAG_trace_duplicate_threshold_kb) {
std::map<int, std::vector<HeapObject*>> objects_by_size;
@@ -1259,6 +1163,15 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
}
+void Heap::PreciseCollectAllGarbage(int flags,
+ GarbageCollectionReason gc_reason,
+ const GCCallbackFlags gc_callback_flags) {
+ if (!incremental_marking()->IsStopped()) {
+ FinalizeIncrementalMarkingAtomically(gc_reason);
+ }
+ CollectAllGarbage(flags, gc_reason, gc_callback_flags);
+}
+
void Heap::ReportExternalMemoryPressure() {
const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
static_cast<GCCallbackFlags>(
@@ -1267,7 +1180,7 @@ void Heap::ReportExternalMemoryPressure() {
if (external_memory_ >
(external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
CollectAllGarbage(
- kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
+ kReduceMemoryFootprintMask,
GarbageCollectionReason::kExternalMemoryPressure,
static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
kGCCallbackFlagsForExternalMemory));
@@ -1377,8 +1290,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
- if (collector == MARK_COMPACTOR) {
- tracer()->RecordMarkCompactHistograms(gc_type_timer);
+ if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
+ tracer()->RecordGCPhasesHistograms(gc_type_timer);
}
}
@@ -1416,12 +1329,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
- // Start incremental marking for the next cycle. The heap snapshot
- // generator needs incremental marking to stay off after it aborted.
- // We do this only for scavenger to avoid a loop where mark-compact
- // causes another mark-compact.
- if (IsYoungGenerationCollector(collector) &&
- !ShouldAbortIncrementalMarking()) {
+ // Start incremental marking for the next cycle. We do this only for scavenger
+ // to avoid a loop where mark-compact causes another mark-compact.
+ if (IsYoungGenerationCollector(collector)) {
StartIncrementalMarkingIfAllocationLimitIsReached(
GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
@@ -1627,11 +1537,10 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
} else {
if (counter > 1) {
- CollectAllGarbage(
- kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
- GarbageCollectionReason::kDeserializer);
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ GarbageCollectionReason::kDeserializer);
} else {
- CollectAllGarbage(kAbortIncrementalMarkingMask,
+ CollectAllGarbage(kNoGCFlags,
GarbageCollectionReason::kDeserializer);
}
}
@@ -1785,18 +1694,22 @@ bool Heap::PerformGarbageCollection(
external_memory_at_last_mark_compact_ = external_memory_;
external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
+ double max_factor =
+ heap_controller()->MaxGrowingFactor(max_old_generation_size_);
size_t new_limit = heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
- new_space()->Capacity(), CurrentHeapGrowingMode());
+ old_gen_size, max_old_generation_size_, max_factor, gc_speed,
+ mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
old_generation_allocation_limit_ = new_limit;
CheckIneffectiveMarkCompact(
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
+ double max_factor =
+ heap_controller()->MaxGrowingFactor(max_old_generation_size_);
size_t new_limit = heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
- new_space()->Capacity(), CurrentHeapGrowingMode());
+ old_gen_size, max_old_generation_size_, max_factor, gc_speed,
+ mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
if (new_limit < old_generation_allocation_limit_) {
old_generation_allocation_limit_ = new_limit;
}
@@ -1940,26 +1853,6 @@ void Heap::CheckNewSpaceExpansionCriteria() {
}
}
-static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
- return Heap::InFromSpace(*p) &&
- !HeapObject::cast(*p)->map_word().IsForwardingAddress();
-}
-
-class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
- public:
- virtual Object* RetainAs(Object* object) {
- if (!Heap::InFromSpace(object)) {
- return object;
- }
-
- MapWord map_word = HeapObject::cast(object)->map_word();
- if (map_word.IsForwardingAddress()) {
- return map_word.ToForwardingAddress();
- }
- return nullptr;
- }
-};
-
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
@@ -2003,79 +1896,6 @@ void Heap::EvacuateYoungGeneration() {
SetGCState(NOT_IN_GC);
}
-static bool IsLogging(Isolate* isolate) {
- return FLAG_verify_predictable || isolate->logger()->is_logging() ||
- isolate->is_profiling() ||
- (isolate->heap_profiler() != nullptr &&
- isolate->heap_profiler()->is_tracking_object_moves()) ||
- isolate->heap()->has_heap_object_allocation_tracker();
-}
-
-class PageScavengingItem final : public ItemParallelJob::Item {
- public:
- explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
- virtual ~PageScavengingItem() {}
-
- void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
-
- private:
- MemoryChunk* const chunk_;
-};
-
-class ScavengingTask final : public ItemParallelJob::Task {
- public:
- ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
- : ItemParallelJob::Task(heap->isolate()),
- heap_(heap),
- scavenger_(scavenger),
- barrier_(barrier) {}
-
- void RunInParallel() final {
- TRACE_BACKGROUND_GC(
- heap_->tracer(),
- GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
- double scavenging_time = 0.0;
- {
- barrier_->Start();
- TimedScope scope(&scavenging_time);
- PageScavengingItem* item = nullptr;
- while ((item = GetItem<PageScavengingItem>()) != nullptr) {
- item->Process(scavenger_);
- item->MarkFinished();
- }
- do {
- scavenger_->Process(barrier_);
- } while (!barrier_->Wait());
- scavenger_->Process();
- }
- if (FLAG_trace_parallel_scavenge) {
- PrintIsolate(heap_->isolate(),
- "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
- static_cast<void*>(this), scavenging_time,
- scavenger_->bytes_copied(), scavenger_->bytes_promoted());
- }
- };
-
- private:
- Heap* const heap_;
- Scavenger* const scavenger_;
- OneshotBarrier* const barrier_;
-};
-
-int Heap::NumberOfScavengeTasks() {
- if (!FLAG_parallel_scavenge) return 1;
- const int num_scavenge_tasks =
- static_cast<int>(new_space()->TotalCapacity()) / MB;
- static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
- int tasks =
- Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
- if (!CanExpandOldGeneration(static_cast<size_t>(tasks * Page::kPageSize))) {
- // Optimize for memory usage near the heap limit.
- tasks = 1;
- }
- return tasks;
-}
-
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
@@ -2088,7 +1908,6 @@ void Heap::Scavenge() {
// Bump-pointer allocations done during scavenge are not real allocations.
// Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this);
-
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
@@ -2097,137 +1916,19 @@ void Heap::Scavenge() {
SetGCState(SCAVENGE);
- // Implements Cheney's copying algorithm
- LOG(isolate_, ResourceEvent("scavenge", "begin"));
-
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
- new_space_->Flip();
- new_space_->ResetLinearAllocationArea();
-
- ItemParallelJob job(isolate()->cancelable_task_manager(),
- &parallel_scavenge_semaphore_);
- const int kMainThreadId = 0;
- Scavenger* scavengers[kMaxScavengerTasks];
- const bool is_logging = IsLogging(isolate());
- const int num_scavenge_tasks = NumberOfScavengeTasks();
- OneshotBarrier barrier;
- Scavenger::CopiedList copied_list(num_scavenge_tasks);
- Scavenger::PromotionList promotion_list(num_scavenge_tasks);
- for (int i = 0; i < num_scavenge_tasks; i++) {
- scavengers[i] =
- new Scavenger(this, is_logging, &copied_list, &promotion_list, i);
- job.AddTask(new ScavengingTask(this, scavengers[i], &barrier));
- }
-
- {
- Sweeper* sweeper = mark_compact_collector()->sweeper();
- // Pause the concurrent sweeper.
- Sweeper::PauseOrCompleteScope pause_scope(sweeper);
- // Filter out pages from the sweeper that need to be processed for old to
- // new slots by the Scavenger. After processing, the Scavenger adds back
- // pages that are still unsweeped. This way the Scavenger has exclusive
- // access to the slots of a page and can completely avoid any locks on
- // the page itself.
- Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
- filter_scope.FilterOldSpaceSweepingPages(
- [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- this, [&job](MemoryChunk* chunk) {
- job.AddItem(new PageScavengingItem(chunk));
- });
-
- RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
-
- {
- // Identify weak unmodified handles. Requires an unmodified graph.
- TRACE_GC(
- tracer(),
- GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
- isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &JSObject::IsUnmodifiedApiObject);
- }
- {
- // Copy roots.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
- IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
- }
- {
- // Parallel phase scavenging all copied and promoted objects.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- job.Run(isolate()->async_counters());
- DCHECK(copied_list.IsEmpty());
- DCHECK(promotion_list.IsEmpty());
- }
- {
- // Scavenge weak global handles.
- TRACE_GC(tracer(),
- GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
- isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
- &IsUnscavengedHeapObject);
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
- &root_scavenge_visitor);
- scavengers[kMainThreadId]->Process();
-
- DCHECK(copied_list.IsEmpty());
- DCHECK(promotion_list.IsEmpty());
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
- &root_scavenge_visitor, &IsUnscavengedHeapObject);
- }
-
- for (int i = 0; i < num_scavenge_tasks; i++) {
- scavengers[i]->Finalize();
- delete scavengers[i];
- }
- }
-
- {
- // Update references into new space
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
- UpdateNewSpaceReferencesInExternalStringTable(
- &UpdateNewSpaceReferenceInExternalStringTableEntry);
-
- incremental_marking()->UpdateMarkingWorklistAfterScavenge();
- }
-
- if (FLAG_concurrent_marking) {
- // Ensure that concurrent marker does not track pages that are
- // going to be unmapped.
- for (Page* p : PageRange(new_space()->from_space().first_page(), nullptr)) {
- concurrent_marking()->ClearLiveness(p);
- }
- }
-
- ScavengeWeakObjectRetainer weak_object_retainer;
- ProcessYoungWeakReferences(&weak_object_retainer);
-
- // Set age mark.
- new_space_->set_age_mark(new_space_->top());
-
- {
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS);
- ArrayBufferTracker::PrepareToFreeDeadInNewSpace(this);
- }
- array_buffer_collector()->FreeAllocationsOnBackgroundThread();
+ new_space()->Flip();
+ new_space()->ResetLinearAllocationArea();
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(this, [](MemoryChunk* chunk) {
- if (chunk->SweepingDone()) {
- RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- } else {
- RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
- }
- });
+ // We also flip the young generation large object space. All large objects
+ // will be in the from space.
+ new_lo_space()->Flip();
- // Update how much has survived scavenge.
- IncrementYoungSurvivorsCounter(SurvivedNewSpaceObjectSize());
+ // Implements Cheney's copying algorithm
+ LOG(isolate_, ResourceEvent("scavenge", "begin"));
- // Scavenger may find new wrappers by iterating objects promoted onto a black
- // page.
- local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+ scavenger_collector_->CollectGarbage();
LOG(isolate_, ResourceEvent("scavenge", "end"));
@@ -2285,15 +1986,6 @@ bool Heap::ExternalStringTable::Contains(HeapObject* obj) {
return false;
}
-void Heap::ProcessMovedExternalString(Page* old_page, Page* new_page,
- ExternalString* string) {
- size_t size = string->ExternalPayloadSize();
- new_page->IncrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kExternalString, size);
- old_page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kExternalString, size);
-}
-
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
@@ -2312,18 +2004,15 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
// String is still reachable.
String* new_string = String::cast(first_word.ToForwardingAddress());
- String* original_string = reinterpret_cast<String*>(*p);
- // The length of the original string is used to disambiguate the scenario
- // of a ThingString being forwarded to an ExternalString (which already exists
- // in the OLD space), and an ExternalString being forwarded to its promoted
- // copy. See Scavenger::EvacuateThinString.
- if (new_string->IsThinString() || original_string->length() == 0) {
+ if (new_string->IsThinString()) {
// Filtering Thin strings out of the external string table.
return nullptr;
} else if (new_string->IsExternalString()) {
- heap->ProcessMovedExternalString(
+ MemoryChunk::MoveExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString,
Page::FromAddress(reinterpret_cast<Address>(*p)),
- Page::FromHeapObject(new_string), ExternalString::cast(new_string));
+ Page::FromHeapObject(new_string),
+ ExternalString::cast(new_string)->ExternalPayloadSize());
return new_string;
}
@@ -2488,8 +2177,8 @@ void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
}
-void Heap::ForeachAllocationSite(Object* list,
- std::function<void(AllocationSite*)> visitor) {
+void Heap::ForeachAllocationSite(
+ Object* list, const std::function<void(AllocationSite*)>& visitor) {
DisallowHeapAllocation disallow_heap_allocation;
Object* current = list;
while (current->IsAllocationSite()) {
@@ -2555,8 +2244,8 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
explicit ExternalStringTableVisitorAdapter(
Isolate* isolate, v8::ExternalResourceVisitor* visitor)
: isolate_(isolate), visitor_(visitor) {}
- virtual void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) {
+ void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
@@ -2597,10 +2286,9 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
- intptr_t offset = OffsetFrom(address);
- if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
+ if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
return kPointerSize;
- if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
+ if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
return kDoubleSize - kPointerSize; // No fill if double is always aligned.
return 0;
}
@@ -2693,33 +2381,29 @@ void Heap::CreateFixedStubs() {
Heap::CreateJSRunMicrotasksEntryStub();
}
-bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
+bool Heap::RootCanBeWrittenAfterInitialization(RootIndex root_index) {
switch (root_index) {
- case kNumberStringCacheRootIndex:
- case kCodeStubsRootIndex:
- case kScriptListRootIndex:
- case kMaterializedObjectsRootIndex:
- case kMicrotaskQueueRootIndex:
- case kDetachedContextsRootIndex:
- case kRetainedMapsRootIndex:
- case kRetainingPathTargetsRootIndex:
- case kFeedbackVectorsForProfilingToolsRootIndex:
- case kNoScriptSharedFunctionInfosRootIndex:
- case kSerializedObjectsRootIndex:
- case kSerializedGlobalProxySizesRootIndex:
- case kPublicSymbolTableRootIndex:
- case kApiSymbolTableRootIndex:
- case kApiPrivateSymbolTableRootIndex:
- case kMessageListenersRootIndex:
- case kDeserializeLazyHandlerRootIndex:
- case kDeserializeLazyHandlerWideRootIndex:
- case kDeserializeLazyHandlerExtraWideRootIndex:
+ case RootIndex::kNumberStringCache:
+ case RootIndex::kCodeStubs:
+ case RootIndex::kScriptList:
+ case RootIndex::kMaterializedObjects:
+ case RootIndex::kDetachedContexts:
+ case RootIndex::kRetainedMaps:
+ case RootIndex::kRetainingPathTargets:
+ case RootIndex::kFeedbackVectorsForProfilingTools:
+ case RootIndex::kNoScriptSharedFunctionInfos:
+ case RootIndex::kSerializedObjects:
+ case RootIndex::kSerializedGlobalProxySizes:
+ case RootIndex::kPublicSymbolTable:
+ case RootIndex::kApiSymbolTable:
+ case RootIndex::kApiPrivateSymbolTable:
+ case RootIndex::kMessageListeners:
// Smi values
-#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
+#define SMI_ENTRY(type, name, Name) case RootIndex::k##Name:
SMI_ROOT_LIST(SMI_ENTRY)
#undef SMI_ENTRY
// String table
- case kStringTableRootIndex:
+ case RootIndex::kStringTable:
return true;
default:
@@ -2727,7 +2411,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
}
}
-bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
+bool Heap::RootCanBeTreatedAsConstant(RootIndex root_index) {
bool can_be = !RootCanBeWrittenAfterInitialization(root_index) &&
!InNewSpace(root(root_index));
DCHECK_IMPLIES(can_be, IsImmovable(HeapObject::cast(root(root_index))));
@@ -2743,61 +2427,6 @@ void Heap::FlushNumberStringCache() {
}
}
-namespace {
-
-Heap::RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type) {
- switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
- case kExternal##Type##Array: \
- return Heap::kFixed##Type##ArrayMapRootIndex;
-
- TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
-#undef ARRAY_TYPE_TO_ROOT_INDEX
- }
- UNREACHABLE();
-}
-
-Heap::RootListIndex RootIndexForFixedTypedArray(ElementsKind elements_kind) {
- switch (elements_kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- return Heap::kFixed##Type##ArrayMapRootIndex;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
- default:
- UNREACHABLE();
-#undef TYPED_ARRAY_CASE
- }
-}
-
-Heap::RootListIndex RootIndexForEmptyFixedTypedArray(
- ElementsKind elements_kind) {
- switch (elements_kind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- return Heap::kEmptyFixed##Type##ArrayRootIndex;
-
- TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
-#undef ELEMENT_KIND_TO_ROOT_INDEX
- default:
- UNREACHABLE();
- }
-}
-
-} // namespace
-
-Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
- return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
-}
-
-Map* Heap::MapForFixedTypedArray(ElementsKind elements_kind) {
- return Map::cast(roots_[RootIndexForFixedTypedArray(elements_kind)]);
-}
-
-FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
- return FixedTypedArrayBase::cast(
- roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
-}
-
HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode,
ClearFreedMemoryMode clear_memory_mode) {
@@ -2805,11 +2434,11 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)),
+ reinterpret_cast<Map*>(root(RootIndex::kOnePointerFillerMap)),
SKIP_WRITE_BARRIER);
} else if (size == 2 * kPointerSize) {
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)),
+ reinterpret_cast<Map*>(root(RootIndex::kTwoPointerFillerMap)),
SKIP_WRITE_BARRIER);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
Memory<Address>(addr + kPointerSize) =
@@ -2818,7 +2447,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
} else {
DCHECK_GT(size, 2 * kPointerSize);
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
+ reinterpret_cast<Map*>(root(RootIndex::kFreeSpaceMap)),
SKIP_WRITE_BARRIER);
FreeSpace::cast(filler)->relaxed_write_size(size);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
@@ -2865,8 +2494,8 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase* to_check)
: to_check_(to_check) {}
- virtual void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) {
+ void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) override {
for (Object** p = start; p < end; ++p) {
DCHECK_NE(*p, to_check_);
}
@@ -3532,7 +3161,7 @@ class MemoryPressureInterruptTask : public CancelableTask {
explicit MemoryPressureInterruptTask(Heap* heap)
: CancelableTask(heap->isolate()), heap_(heap) {}
- virtual ~MemoryPressureInterruptTask() {}
+ ~MemoryPressureInterruptTask() override = default;
private:
// v8::internal::CancelableTask overrides.
@@ -3575,9 +3204,10 @@ void Heap::CollectGarbageOnMemoryPressure() {
const double kMaxMemoryPressurePauseMs = 100;
double start = MonotonicallyIncreasingTimeInMs();
- CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
+ CollectAllGarbage(kReduceMemoryFootprintMask,
GarbageCollectionReason::kMemoryPressure,
kGCCallbackFlagCollectAllAvailableGarbage);
+ EagerlyFreeExternalMemory();
double end = MonotonicallyIncreasingTimeInMs();
// Estimate how much memory we can free.
@@ -3591,10 +3221,9 @@ void Heap::CollectGarbageOnMemoryPressure() {
// If we spent less than half of the time budget, then perform full GC
// Otherwise, start incremental marking.
if (end - start < kMaxMemoryPressurePauseMs / 2) {
- CollectAllGarbage(
- kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
- GarbageCollectionReason::kMemoryPressure,
- kGCCallbackFlagCollectAllAvailableGarbage);
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ GarbageCollectionReason::kMemoryPressure,
+ kGCCallbackFlagCollectAllAvailableGarbage);
} else {
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
StartIncrementalMarking(kReduceMemoryFootprintMask,
@@ -3617,13 +3246,27 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
} else {
ExecutionAccess access(isolate());
isolate()->stack_guard()->RequestGC();
- V8::GetCurrentPlatform()->CallOnForegroundThread(
- reinterpret_cast<v8::Isolate*>(isolate()),
- new MemoryPressureInterruptTask(this));
+ auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
+ reinterpret_cast<v8::Isolate*>(isolate()));
+ taskrunner->PostTask(
+ base::make_unique<MemoryPressureInterruptTask>(this));
}
}
}
+void Heap::EagerlyFreeExternalMemory() {
+ for (Page* page : *old_space()) {
+ if (!page->SweepingDone()) {
+ base::LockGuard<base::Mutex> guard(page->mutex());
+ if (!page->SweepingDone()) {
+ ArrayBufferTracker::FreeDead(
+ page, mark_compact_collector()->non_atomic_marking_state());
+ }
+ }
+ }
+ memory_allocator()->unmapper()->EnsureUnmappingCompleted();
+}
+
void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
void* data) {
const size_t kMaxCallbacks = 100;
@@ -3833,16 +3476,15 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
}
}
-
-bool Heap::RootIsImmortalImmovable(int root_index) {
+bool Heap::RootIsImmortalImmovable(RootIndex root_index) {
switch (root_index) {
-#define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
+#define IMMORTAL_IMMOVABLE_ROOT(name) case RootIndex::k##name:
IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
#undef IMMORTAL_IMMOVABLE_ROOT
-#define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
- INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
+#define INTERNALIZED_STRING(_, name, value) case RootIndex::k##name:
+ INTERNALIZED_STRING_LIST_GENERATOR(INTERNALIZED_STRING, /* not used */)
#undef INTERNALIZED_STRING
-#define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
+#define STRING_TYPE(NAME, size, name, Name) case RootIndex::k##Name##Map:
STRING_TYPE_LIST(STRING_TYPE)
#undef STRING_TYPE
return true;
@@ -3867,7 +3509,7 @@ class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
- if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ if ((*current)->GetHeapObject(&object)) {
CHECK(heap_->InReadOnlySpace(object));
}
}
@@ -3963,10 +3605,9 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
: SlotVerifyingVisitor(untyped, typed) {}
bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject* target) override {
- DCHECK_IMPLIES(
- target->IsStrongOrWeakHeapObject() && Heap::InNewSpace(target),
- Heap::InToSpace(target));
- return target->IsStrongOrWeakHeapObject() && Heap::InNewSpace(target) &&
+ DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InNewSpace(target),
+ Heap::InToSpace(target));
+ return target->IsStrongOrWeak() && Heap::InNewSpace(target) &&
!Heap::InNewSpace(host);
}
};
@@ -4077,9 +3718,8 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- v->VisitRootPointer(
- Root::kStringTable, nullptr,
- reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
+ v->VisitRootPointer(Root::kStringTable, nullptr,
+ &roots_[RootIndex::kStringTable]);
v->Synchronize(VisitorSynchronization::kStringTable);
if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
mode != VISIT_FOR_SERIALIZATION) {
@@ -4094,8 +3734,8 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
void Heap::IterateSmiRoots(RootVisitor* v) {
// Acquire execution access since we are going to read stack limit values.
ExecutionAccess access(isolate());
- v->VisitRootPointers(Root::kSmiRootList, nullptr, &roots_[kSmiRootsStart],
- &roots_[kRootListLength]);
+ v->VisitRootPointers(Root::kSmiRootList, nullptr, roots_.smi_roots_begin(),
+ roots_.smi_roots_end());
v->Synchronize(VisitorSynchronization::kSmiRootList);
}
@@ -4152,8 +3792,13 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- v->VisitRootPointers(Root::kStrongRootList, nullptr, &roots_[0],
- &roots_[kStrongRootListLength]);
+ // Garbage collection can skip over the read-only roots.
+ const bool isGC = mode != VISIT_ALL && mode != VISIT_FOR_SERIALIZATION &&
+ mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION;
+ Object** start =
+ isGC ? roots_.read_only_roots_end() : roots_.strong_roots_begin();
+ v->VisitRootPointers(Root::kStrongRootList, nullptr, start,
+ roots_.strong_roots_end());
v->Synchronize(VisitorSynchronization::kStrongRootList);
isolate_->bootstrapper()->Iterate(v);
@@ -4192,17 +3837,19 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// global handles need to be added manually.
break;
case VISIT_ONLY_STRONG:
+ case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
isolate_->global_handles()->IterateStrongRoots(v);
break;
case VISIT_ALL_IN_SCAVENGE:
isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
break;
case VISIT_ALL_IN_MINOR_MC_MARK:
- // Global handles are processed manually be the minor MC.
+ // Global handles are processed manually by the minor MC.
break;
case VISIT_ALL_IN_MINOR_MC_UPDATE:
- // Global handles are processed manually be the minor MC.
+ // Global handles are processed manually by the minor MC.
break;
+ case VISIT_ALL_BUT_READ_ONLY:
case VISIT_ALL_IN_SWEEP_NEWSPACE:
case VISIT_ALL:
isolate_->global_handles()->IterateAllRoots(v);
@@ -4706,6 +4353,9 @@ void Heap::SetUp() {
heap_controller_ = new HeapController(this);
mark_compact_collector_ = new MarkCompactCollector(this);
+
+ scavenger_collector_ = new ScavengerCollector(this);
+
incremental_marking_ =
new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
mark_compact_collector_->weak_objects());
@@ -4715,10 +4365,11 @@ void Heap::SetUp() {
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
- marking_worklist->on_hold(), mark_compact_collector_->weak_objects());
+ marking_worklist->on_hold(), mark_compact_collector_->weak_objects(),
+ marking_worklist->embedder());
} else {
- concurrent_marking_ =
- new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
+ concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr, nullptr,
+ nullptr, nullptr);
}
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
@@ -4727,7 +4378,8 @@ void Heap::SetUp() {
space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this);
space_[NEW_SPACE] = new_space_ =
- new NewSpace(this, initial_semispace_size_, max_semi_space_size_);
+ new NewSpace(this, memory_allocator_->data_page_allocator(),
+ initial_semispace_size_, max_semi_space_size_);
space_[OLD_SPACE] = old_space_ = new OldSpace(this);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
@@ -4809,15 +4461,15 @@ void Heap::SetStackLimits() {
// Set up the special root array entries containing the stack limits.
// These are actually addresses, but the tag makes the GC ignore it.
- roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
+ roots_[RootIndex::kStackLimit] = reinterpret_cast<Object*>(
(isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
- roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
+ roots_[RootIndex::kRealStackLimit] = reinterpret_cast<Object*>(
(isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
}
void Heap::ClearStackLimits() {
- roots_[kStackLimitRootIndex] = Smi::kZero;
- roots_[kRealStackLimitRootIndex] = Smi::kZero;
+ roots_[RootIndex::kStackLimit] = Smi::kZero;
+ roots_[RootIndex::kRealStackLimit] = Smi::kZero;
}
int Heap::NextAllocationTimeout(int current_timeout) {
@@ -4873,6 +4525,10 @@ void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
local_embedder_heap_tracer()->SetRemoteTracer(tracer);
}
+EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
+ return local_embedder_heap_tracer()->remote_tracer();
+}
+
void Heap::TracePossibleWrapper(JSObject* js_object) {
DCHECK(js_object->IsApiWrapper());
if (js_object->GetEmbedderFieldCount() >= 2 &&
@@ -4961,6 +4617,11 @@ void Heap::TearDown() {
}
#endif // ENABLE_MINOR_MC
+ if (scavenger_collector_ != nullptr) {
+ delete scavenger_collector_;
+ scavenger_collector_ = nullptr;
+ }
+
if (array_buffer_collector_ != nullptr) {
delete array_buffer_collector_;
array_buffer_collector_ = nullptr;
@@ -5100,7 +4761,7 @@ Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
int copy_to = 0;
for (int i = 0; i < array->length(); i++) {
MaybeObject* element = array->Get(i);
- if (element->IsClearedWeakHeapObject()) continue;
+ if (element->IsCleared()) continue;
new_array->Set(copy_to++, element);
}
new_array->set_length(copy_to);
@@ -5174,11 +4835,11 @@ void Heap::CompactRetainedMaps(WeakArrayList* retained_maps) {
// This loop compacts the array by removing cleared weak cells.
for (int i = 0; i < length; i += 2) {
MaybeObject* maybe_object = retained_maps->Get(i);
- if (maybe_object->IsClearedWeakHeapObject()) {
+ if (maybe_object->IsCleared()) {
continue;
}
- DCHECK(maybe_object->IsWeakHeapObject());
+ DCHECK(maybe_object->IsWeak());
MaybeObject* age = retained_maps->Get(i + 1);
DCHECK(age->IsSmi());
@@ -5268,17 +4929,19 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
}
}
-bool Heap::HasRecordedSlot(HeapObject* object, Object** slot) {
- if (InNewSpace(object)) {
- return false;
- }
+#ifdef DEBUG
+void Heap::VerifyClearedSlot(HeapObject* object, Object** slot) {
+ if (InNewSpace(object)) return;
Address slot_addr = reinterpret_cast<Address>(slot);
Page* page = Page::FromAddress(slot_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
- return RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr) ||
- RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr);
+ CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr));
+ // Old to old slots are filtered with invalidated slots.
+ CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr),
+ page->RegisteredObjectWithInvalidatedSlots(object));
}
+#endif
void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
@@ -5308,9 +4971,7 @@ PagedSpace* PagedSpaces::next() {
SpaceIterator::SpaceIterator(Heap* heap)
: heap_(heap), current_space_(FIRST_SPACE - 1) {}
-SpaceIterator::~SpaceIterator() {
-}
-
+SpaceIterator::~SpaceIterator() = default;
bool SpaceIterator::has_next() {
// Iterate until no more spaces.
@@ -5325,7 +4986,7 @@ Space* SpaceIterator::next() {
class HeapObjectsFilter {
public:
- virtual ~HeapObjectsFilter() {}
+ virtual ~HeapObjectsFilter() = default;
virtual bool SkipObject(HeapObject* object) = 0;
};
@@ -5336,14 +4997,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
MarkReachableObjects();
}
- ~UnreachableObjectsFilter() {
+ ~UnreachableObjectsFilter() override {
for (auto it : reachable_) {
delete it.second;
it.second = nullptr;
}
}
- bool SkipObject(HeapObject* object) {
+ bool SkipObject(HeapObject* object) override {
if (object->IsFiller()) return true;
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (reachable_.count(chunk) == 0) return true;
@@ -5396,7 +5057,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
// Treat weak references as strong.
for (MaybeObject** p = start; p < end; p++) {
HeapObject* heap_object;
- if ((*p)->ToStrongOrWeakHeapObject(&heap_object)) {
+ if ((*p)->GetHeapObject(&heap_object)) {
if (filter_->MarkAsReachable(heap_object)) {
marking_stack_.push_back(heap_object);
}
@@ -5411,7 +5072,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
void MarkReachableObjects() {
MarkingVisitor visitor(this);
- heap_->IterateRoots(&visitor, VISIT_ALL);
+ heap_->IterateRoots(&visitor, VISIT_ALL_BUT_READ_ONLY);
visitor.TransitiveClosure();
}
@@ -5597,24 +5258,6 @@ void Heap::UnregisterStrongRoots(Object** start) {
}
}
-bool Heap::IsDeserializeLazyHandler(Code* code) {
- return (code == deserialize_lazy_handler() ||
- code == deserialize_lazy_handler_wide() ||
- code == deserialize_lazy_handler_extra_wide());
-}
-
-void Heap::SetDeserializeLazyHandler(Code* code) {
- set_deserialize_lazy_handler(code);
-}
-
-void Heap::SetDeserializeLazyHandlerWide(Code* code) {
- set_deserialize_lazy_handler_wide(code);
-}
-
-void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
- set_deserialize_lazy_handler_extra_wide(code);
-}
-
void Heap::SetBuiltinsConstantsTable(FixedArray* cache) {
set_builtins_constants_table(cache);
}
@@ -5723,11 +5366,11 @@ void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
MaybeObject** end) {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
- if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ if ((*current)->GetHeapObject(&object)) {
CHECK(heap_->Contains(object));
CHECK(object->map()->IsMap());
} else {
- CHECK((*current)->IsSmi() || (*current)->IsClearedWeakHeapObject());
+ CHECK((*current)->IsSmi() || (*current)->IsCleared());
}
}
}