diff options
Diffstat (limited to 'deps/v8/src/heap/heap.cc')
-rw-r--r-- | deps/v8/src/heap/heap.cc | 912 |
1 files changed, 471 insertions, 441 deletions
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc index 7eb5af3b6a..d823232ac7 100644 --- a/deps/v8/src/heap/heap.cc +++ b/deps/v8/src/heap/heap.cc @@ -71,14 +71,14 @@ class IdleScavengeObserver : public AllocationObserver { Heap::Heap() : external_memory_(0), - external_memory_limit_(kExternalAllocationLimit), + external_memory_limit_(kExternalAllocationSoftLimit), external_memory_at_last_mark_compact_(0), isolate_(nullptr), code_range_size_(0), // semispace_size_ should be a power of 2 and old_generation_size_ should // be a multiple of Page::kPageSize. max_semi_space_size_(8 * (kPointerSize / 4) * MB), - initial_semispace_size_(Page::kPageSize), + initial_semispace_size_(MB), max_old_generation_size_(700ul * (kPointerSize / 4) * MB), initial_old_generation_size_(max_old_generation_size_ / kInitalOldGenerationLimitFactor), @@ -96,7 +96,7 @@ Heap::Heap() contexts_disposed_(0), number_of_disposed_maps_(0), global_ic_age_(0), - new_space_(this), + new_space_(nullptr), old_space_(NULL), code_space_(NULL), map_space_(NULL), @@ -112,11 +112,9 @@ Heap::Heap() allocation_timeout_(0), #endif // DEBUG old_generation_allocation_limit_(initial_old_generation_size_), - old_gen_exhausted_(false), inline_allocation_disabled_(false), total_regexp_code_generated_(0), tracer_(nullptr), - high_survival_rate_period_length_(0), promoted_objects_size_(0), promotion_ratio_(0), semi_space_copied_object_size_(0), @@ -126,12 +124,6 @@ Heap::Heap() nodes_copied_in_new_space_(0), nodes_promoted_(0), maximum_size_scavenges_(0), - max_gc_pause_(0.0), - total_gc_time_ms_(0.0), - max_alive_after_gc_(0), - min_in_mutator_(kMaxInt), - marking_time_(0.0), - sweeping_time_(0.0), last_idle_notification_time_(0.0), last_gc_time_(0.0), scavenge_collector_(nullptr), @@ -148,7 +140,7 @@ Heap::Heap() full_codegen_bytes_generated_(0), crankshaft_codegen_bytes_generated_(0), new_space_allocation_counter_(0), - old_generation_allocation_counter_(0), + old_generation_allocation_counter_at_last_gc_(0), old_generation_size_at_last_gc_(0), gcs_since_last_deopt_(0), global_pretenuring_feedback_(nullptr), @@ -163,6 +155,8 @@ Heap::Heap() deserialization_complete_(false), strong_roots_list_(NULL), heap_iterator_depth_(0), + embedder_heap_tracer_(nullptr), + embedder_reference_reporter_(new TracePossibleWrapperReporter(this)), force_oom_(false) { // Allow build-time customization of the max semispace size. Building // V8 with snapshots and a non-default max semispace size is much @@ -189,7 +183,7 @@ Heap::Heap() intptr_t Heap::Capacity() { if (!HasBeenSetUp()) return 0; - return new_space_.Capacity() + OldGenerationCapacity(); + return new_space_->Capacity() + OldGenerationCapacity(); } intptr_t Heap::OldGenerationCapacity() { @@ -199,44 +193,41 @@ intptr_t Heap::OldGenerationCapacity() { map_space_->Capacity() + lo_space_->SizeOfObjects(); } - -intptr_t Heap::CommittedOldGenerationMemory() { +size_t Heap::CommittedOldGenerationMemory() { if (!HasBeenSetUp()) return 0; return old_space_->CommittedMemory() + code_space_->CommittedMemory() + map_space_->CommittedMemory() + lo_space_->Size(); } - -intptr_t Heap::CommittedMemory() { +size_t Heap::CommittedMemory() { if (!HasBeenSetUp()) return 0; - return new_space_.CommittedMemory() + CommittedOldGenerationMemory(); + return new_space_->CommittedMemory() + CommittedOldGenerationMemory(); } size_t Heap::CommittedPhysicalMemory() { if (!HasBeenSetUp()) return 0; - return new_space_.CommittedPhysicalMemory() + + return new_space_->CommittedPhysicalMemory() + old_space_->CommittedPhysicalMemory() + code_space_->CommittedPhysicalMemory() + map_space_->CommittedPhysicalMemory() + lo_space_->CommittedPhysicalMemory(); } - -intptr_t Heap::CommittedMemoryExecutable() { +size_t Heap::CommittedMemoryExecutable() { if (!HasBeenSetUp()) return 0; - return memory_allocator()->SizeExecutable(); + return static_cast<size_t>(memory_allocator()->SizeExecutable()); } void Heap::UpdateMaximumCommitted() { if (!HasBeenSetUp()) return; - intptr_t current_committed_memory = CommittedMemory(); + const size_t current_committed_memory = CommittedMemory(); if (current_committed_memory > maximum_committed_) { maximum_committed_ = current_committed_memory; } @@ -275,22 +266,6 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, return MARK_COMPACTOR; } - // Is enough data promoted to justify a global GC? - if (OldGenerationAllocationLimitReached()) { - isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment(); - *reason = "promotion limit reached"; - return MARK_COMPACTOR; - } - - // Have allocation in OLD and LO failed? - if (old_gen_exhausted_) { - isolate_->counters() - ->gc_compactor_caused_by_oldspace_exhaustion() - ->Increment(); - *reason = "old generations exhausted"; - return MARK_COMPACTOR; - } - // Is there enough space left in OLD to guarantee that a scavenge can // succeed? // @@ -300,7 +275,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, // and does not count available bytes already in the old space or code // space. Undercounting is safe---we may get an unrequested full GC when // a scavenge would have succeeded. - if (memory_allocator()->MaxAvailable() <= new_space_.Size()) { + if (static_cast<intptr_t>(memory_allocator()->MaxAvailable()) <= + new_space_->Size()) { isolate_->counters() ->gc_compactor_caused_by_oldspace_exhaustion() ->Increment(); @@ -321,18 +297,18 @@ void Heap::ReportStatisticsBeforeGC() { // compiled --log-gc is set. The following logic is used to avoid // double logging. #ifdef DEBUG - if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics(); + if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics(); if (FLAG_heap_stats) { ReportHeapStatistics("Before GC"); } else if (FLAG_log_gc) { - new_space_.ReportStatistics(); + new_space_->ReportStatistics(); } - if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms(); + if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms(); #else if (FLAG_log_gc) { - new_space_.CollectStatistics(); - new_space_.ReportStatistics(); - new_space_.ClearHistograms(); + new_space_->CollectStatistics(); + new_space_->ReportStatistics(); + new_space_->ClearHistograms(); } #endif // DEBUG } @@ -340,50 +316,51 @@ void Heap::ReportStatisticsBeforeGC() { void Heap::PrintShortHeapStatistics() { if (!FLAG_trace_gc_verbose) return; - PrintIsolate(isolate_, "Memory allocator, used: %6" V8PRIdPTR - " KB, available: %6" V8PRIdPTR " KB\n", + PrintIsolate(isolate_, + "Memory allocator, used: %6zu KB," + " available: %6zu KB\n", memory_allocator()->Size() / KB, memory_allocator()->Available() / KB); PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR " KB" ", available: %6" V8PRIdPTR " KB" - ", committed: %6" V8PRIdPTR " KB\n", - new_space_.Size() / KB, new_space_.Available() / KB, - new_space_.CommittedMemory() / KB); + ", committed: %6zu KB\n", + new_space_->Size() / KB, new_space_->Available() / KB, + new_space_->CommittedMemory() / KB); PrintIsolate(isolate_, "Old space, used: %6" V8PRIdPTR " KB" ", available: %6" V8PRIdPTR " KB" - ", committed: %6" V8PRIdPTR " KB\n", + ", committed: %6zu KB\n", old_space_->SizeOfObjects() / KB, old_space_->Available() / KB, old_space_->CommittedMemory() / KB); PrintIsolate(isolate_, "Code space, used: %6" V8PRIdPTR " KB" ", available: %6" V8PRIdPTR " KB" - ", committed: %6" V8PRIdPTR " KB\n", + ", committed: %6zu KB\n", code_space_->SizeOfObjects() / KB, code_space_->Available() / KB, code_space_->CommittedMemory() / KB); PrintIsolate(isolate_, "Map space, used: %6" V8PRIdPTR " KB" ", available: %6" V8PRIdPTR " KB" - ", committed: %6" V8PRIdPTR " KB\n", + ", committed: %6zu KB\n", map_space_->SizeOfObjects() / KB, map_space_->Available() / KB, map_space_->CommittedMemory() / KB); PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR " KB" ", available: %6" V8PRIdPTR " KB" - ", committed: %6" V8PRIdPTR " KB\n", + ", committed: %6zu KB\n", lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB, lo_space_->CommittedMemory() / KB); PrintIsolate(isolate_, "All spaces, used: %6" V8PRIdPTR " KB" ", available: %6" V8PRIdPTR " KB" - ", committed: %6" V8PRIdPTR " KB\n", + ", committed: %6zu KB\n", this->SizeOfObjects() / KB, this->Available() / KB, this->CommittedMemory() / KB); PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n", @@ -399,13 +376,13 @@ void Heap::ReportStatisticsAfterGC() { // NewSpace statistics are logged exactly once when --log-gc is turned on. #if defined(DEBUG) if (FLAG_heap_stats) { - new_space_.CollectStatistics(); + new_space_->CollectStatistics(); ReportHeapStatistics("After GC"); } else if (FLAG_log_gc) { - new_space_.ReportStatistics(); + new_space_->ReportStatistics(); } #else - if (FLAG_log_gc) new_space_.ReportStatistics(); + if (FLAG_log_gc) new_space_->ReportStatistics(); #endif // DEBUG for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount); ++i) { @@ -423,6 +400,7 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) { deferred_counters_[feature]++; } +bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); } void Heap::GarbageCollectionPrologue() { { @@ -454,7 +432,7 @@ void Heap::GarbageCollectionPrologue() { ReportStatisticsBeforeGC(); #endif // DEBUG - if (new_space_.IsAtMaximumCapacity()) { + if (new_space_->IsAtMaximumCapacity()) { maximum_size_scavenges_++; } else { maximum_size_scavenges_ = 0; @@ -534,8 +512,8 @@ void Heap::MergeAllocationSitePretenuringFeedback( class Heap::PretenuringScope { public: explicit PretenuringScope(Heap* heap) : heap_(heap) { - heap_->global_pretenuring_feedback_ = new base::HashMap( - base::HashMap::PointersMatch, kInitialFeedbackCapacity); + heap_->global_pretenuring_feedback_ = + new base::HashMap(kInitialFeedbackCapacity); } ~PretenuringScope() { @@ -789,14 +767,16 @@ void Heap::HandleGCRequest() { } else if (incremental_marking()->request_type() == IncrementalMarking::COMPLETE_MARKING) { incremental_marking()->reset_request_type(); - CollectAllGarbage(current_gc_flags_, "GC interrupt", + CollectAllGarbage(current_gc_flags_, + GarbageCollectionReason::kFinalizeMarkingViaStackGuard, current_gc_callback_flags_); } else if (incremental_marking()->request_type() == IncrementalMarking::FINALIZATION && incremental_marking()->IsMarking() && !incremental_marking()->finalize_marking_completed()) { incremental_marking()->reset_request_type(); - FinalizeIncrementalMarking("GC interrupt: finalize incremental marking"); + FinalizeIncrementalMarking( + GarbageCollectionReason::kFinalizeMarkingViaStackGuard); } } @@ -805,10 +785,11 @@ void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) { scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated); } - -void Heap::FinalizeIncrementalMarking(const char* gc_reason) { +void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) { if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] (%s).\n", gc_reason); + isolate()->PrintWithTimestamp( + "[IncrementalMarking] (%s).\n", + Heap::GarbageCollectionReasonToString(gc_reason)); } HistogramTimerScope incremental_marking_scope( @@ -856,7 +837,7 @@ HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) { } } -void Heap::CollectAllGarbage(int flags, const char* gc_reason, +void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason, const v8::GCCallbackFlags gc_callback_flags) { // Since we are ignoring the return value, the exact choice of space does // not matter, so long as we do not specify NEW_SPACE, which would not @@ -866,8 +847,7 @@ void Heap::CollectAllGarbage(int flags, const char* gc_reason, set_current_gc_flags(kNoGCFlags); } - -void Heap::CollectAllAvailableGarbage(const char* gc_reason) { +void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) { // Since we are ignoring the return value, the exact choice of space does // not matter, so long as we do not specify NEW_SPACE, which would not // cause a full GC. @@ -897,36 +877,46 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) { } } set_current_gc_flags(kNoGCFlags); - new_space_.Shrink(); + new_space_->Shrink(); UncommitFromSpace(); } - -void Heap::ReportExternalMemoryPressure(const char* gc_reason) { +void Heap::ReportExternalMemoryPressure() { + if (external_memory_ > + (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) { + CollectAllGarbage( + kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask, + GarbageCollectionReason::kExternalMemoryPressure, + static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage | + kGCCallbackFlagCollectAllExternalMemory)); + return; + } if (incremental_marking()->IsStopped()) { if (incremental_marking()->CanBeActivated()) { StartIncrementalMarking( - i::Heap::kNoGCFlags, + i::Heap::kNoGCFlags, GarbageCollectionReason::kExternalMemoryPressure, static_cast<GCCallbackFlags>( kGCCallbackFlagSynchronousPhantomCallbackProcessing | - kGCCallbackFlagCollectAllExternalMemory), - gc_reason); + kGCCallbackFlagCollectAllExternalMemory)); } else { - CollectAllGarbage(i::Heap::kNoGCFlags, gc_reason, + CollectAllGarbage(i::Heap::kNoGCFlags, + GarbageCollectionReason::kExternalMemoryPressure, kGCCallbackFlagSynchronousPhantomCallbackProcessing); } } else { // Incremental marking is turned on an has already been started. - - // TODO(mlippautz): Compute the time slice for incremental marking based on - // memory pressure. - double deadline = MonotonicallyIncreasingTimeInMs() + - FLAG_external_allocation_limit_incremental_time; + const double pressure = + static_cast<double>(external_memory_ - + external_memory_at_last_mark_compact_ - + kExternalAllocationSoftLimit) / + external_memory_hard_limit(); + DCHECK_GE(1, pressure); + const double kMaxStepSizeOnExternalLimit = 25; + const double deadline = MonotonicallyIncreasingTimeInMs() + + pressure * kMaxStepSizeOnExternalLimit; incremental_marking()->AdvanceIncrementalMarking( - deadline, - IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD, - IncrementalMarking::FORCE_MARKING, - IncrementalMarking::FORCE_COMPLETION)); + deadline, IncrementalMarking::GC_VIA_STACK_GUARD, + IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8); } } @@ -936,7 +926,7 @@ void Heap::EnsureFillerObjectAtTop() { // evacuation of a non-full new space (or if we are on the last page) there // may be uninitialized memory behind top. We fill the remainder of the page // with a filler. - Address to_top = new_space_.top(); + Address to_top = new_space_->top(); Page* page = Page::FromAddress(to_top - kPointerSize); if (page->Contains(to_top)) { int remaining_in_page = static_cast<int>(page->area_end() - to_top); @@ -944,8 +934,8 @@ void Heap::EnsureFillerObjectAtTop() { } } - -bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, +bool Heap::CollectGarbage(GarbageCollector collector, + GarbageCollectionReason gc_reason, const char* collector_reason, const v8::GCCallbackFlags gc_callback_flags) { // The VM is in the GC state until exiting this function. @@ -964,19 +954,22 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Scavenge during marking.\n"); + isolate()->PrintWithTimestamp( + "[IncrementalMarking] Scavenge during marking.\n"); } } - if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() && - !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() && - !incremental_marking()->should_hurry() && FLAG_incremental_marking && - OldGenerationAllocationLimitReached()) { + if (collector == MARK_COMPACTOR && FLAG_incremental_marking && + !ShouldFinalizeIncrementalMarking() && !ShouldAbortIncrementalMarking() && + !incremental_marking()->IsStopped() && + !incremental_marking()->should_hurry() && + !IsCloseToOutOfMemory(new_space_->Capacity())) { if (!incremental_marking()->IsComplete() && - !mark_compact_collector()->marking_deque_.IsEmpty() && + !mark_compact_collector()->marking_deque()->IsEmpty() && !FLAG_gc_global) { if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); + isolate()->PrintWithTimestamp( + "[IncrementalMarking] Delaying MarkSweep.\n"); } collector = SCAVENGER; collector_reason = "incremental marking delaying mark-sweep"; @@ -1041,9 +1034,11 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, // Start incremental marking for the next cycle. The heap snapshot // generator needs incremental marking to stay off after it aborted. - if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() && - incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { - StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); + // We do this only for scavenger to avoid a loop where mark-compact + // causes another mark-compact. + if (collector == SCAVENGER && !ShouldAbortIncrementalMarking()) { + StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags, + kNoGCCallbackFlags); } return next_gc_likely_to_collect_more; @@ -1069,21 +1064,33 @@ int Heap::NotifyContextDisposed(bool dependant_context) { return ++contexts_disposed_; } - void Heap::StartIncrementalMarking(int gc_flags, - const GCCallbackFlags gc_callback_flags, - const char* reason) { + GarbageCollectionReason gc_reason, + GCCallbackFlags gc_callback_flags) { DCHECK(incremental_marking()->IsStopped()); set_current_gc_flags(gc_flags); current_gc_callback_flags_ = gc_callback_flags; - incremental_marking()->Start(reason); + incremental_marking()->Start(gc_reason); } +void Heap::StartIncrementalMarkingIfAllocationLimitIsReached( + int gc_flags, const GCCallbackFlags gc_callback_flags) { + if (incremental_marking()->IsStopped()) { + IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached(); + if (reached_limit == IncrementalMarkingLimit::kSoftLimit) { + incremental_marking()->incremental_marking_job()->ScheduleTask(this); + } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) { + StartIncrementalMarking(gc_flags, + GarbageCollectionReason::kAllocationLimit, + gc_callback_flags); + } + } +} -void Heap::StartIdleIncrementalMarking() { +void Heap::StartIdleIncrementalMarking(GarbageCollectionReason gc_reason) { gc_idle_time_handler_->ResetNoProgressCounter(); - StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags, - "idle"); + StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason, + kNoGCCallbackFlags); } @@ -1192,17 +1199,15 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) { } if (perform_gc) { if (space == NEW_SPACE) { - CollectGarbage(NEW_SPACE, "failed to reserve space in the new space"); + CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer); } else { if (counter > 1) { CollectAllGarbage( kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, - "failed to reserve space in paged or large " - "object space, trying to reduce memory footprint"); + GarbageCollectionReason::kDeserializer); } else { - CollectAllGarbage( - kAbortIncrementalMarkingMask, - "failed to reserve space in paged or large object space"); + CollectAllGarbage(kAbortIncrementalMarkingMask, + GarbageCollectionReason::kDeserializer); } } gc_performed = true; @@ -1216,7 +1221,7 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) { void Heap::EnsureFromSpaceIsCommitted() { - if (new_space_.CommitFromSpaceIfNeeded()) return; + if (new_space_->CommitFromSpaceIfNeeded()) return; // Committing memory to from space failed. // Memory is exhausted and we will die. @@ -1264,11 +1269,6 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) { double survival_rate = promotion_ratio_ + semi_space_copied_rate_; tracer()->AddSurvivalRatio(survival_rate); - if (survival_rate > kYoungSurvivalRateHighThreshold) { - high_survival_rate_period_length_++; - } else { - high_survival_rate_period_length_ = 0; - } } bool Heap::PerformGarbageCollection( @@ -1303,14 +1303,7 @@ bool Heap::PerformGarbageCollection( EnsureFromSpaceIsCommitted(); - int start_new_space_size = Heap::new_space()->SizeAsInt(); - - if (IsHighSurvivalRate()) { - // We speed up the incremental marker if it is running so that it - // does not fall behind the rate of promotion, which would cause a - // constantly growing old space. - incremental_marking()->NotifyOfHighPromotionRate(); - } + int start_new_space_size = static_cast<int>(Heap::new_space()->Size()); { Heap::PretenuringScope pretenuring_scope(this); @@ -1319,11 +1312,10 @@ bool Heap::PerformGarbageCollection( UpdateOldGenerationAllocationCounter(); // Perform mark-sweep with optional compaction. MarkCompact(); - old_gen_exhausted_ = false; old_generation_size_configured_ = true; // This should be updated before PostGarbageCollectionProcessing, which // can cause another GC. Take into account the objects promoted during GC. - old_generation_allocation_counter_ += + old_generation_allocation_counter_at_last_gc_ += static_cast<size_t>(promoted_objects_size_); old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects(); } else { @@ -1360,7 +1352,7 @@ bool Heap::PerformGarbageCollection( if (collector == MARK_COMPACTOR) { // Register the amount of external allocated memory. external_memory_at_last_mark_compact_ = external_memory_; - external_memory_limit_ = external_memory_ + kExternalAllocationLimit; + external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit; SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); } else if (HasLowYoungGenerationAllocationRate() && old_generation_size_configured_) { @@ -1491,18 +1483,18 @@ void Heap::MarkCompactPrologue() { void Heap::CheckNewSpaceExpansionCriteria() { if (FLAG_experimental_new_space_growth_heuristic) { - if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() && - survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) { + if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() && + survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) { // Grow the size of new space if there is room to grow, and more than 10% // have survived the last scavenge. - new_space_.Grow(); + new_space_->Grow(); survived_since_last_expansion_ = 0; } - } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() && - survived_since_last_expansion_ > new_space_.TotalCapacity()) { + } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() && + survived_since_last_expansion_ > new_space_->TotalCapacity()) { // Grow the size of new space if there is room to grow, and enough data // has survived scavenge since the last expansion. - new_space_.Grow(); + new_space_->Grow(); survived_since_last_expansion_ = 0; } } @@ -1541,6 +1533,11 @@ void PromotionQueue::Initialize() { emergency_stack_ = NULL; } +void PromotionQueue::Destroy() { + DCHECK(is_empty()); + delete emergency_stack_; + emergency_stack_ = NULL; +} void PromotionQueue::RelocateQueueHead() { DCHECK(emergency_stack_ == NULL); @@ -1615,13 +1612,13 @@ void Heap::Scavenge() { // Register found wrappers with embedder so it can add them to its marking // deque and correctly manage the case when v8 scavenger collects the // wrappers by either keeping wrappables alive, or cleaning marking deque. - mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer(); + RegisterWrappersWithEmbedderHeapTracer(); } // Flip the semispaces. After flipping, to space is empty, from space has // live objects. - new_space_.Flip(); - new_space_.ResetAllocationInfo(); + new_space_->Flip(); + new_space_->ResetAllocationInfo(); // We need to sweep newly copied objects which can be either in the // to space or promoted to the old generation. For to-space @@ -1640,7 +1637,7 @@ void Heap::Scavenge() { // for the addresses of promoted objects: every object promoted // frees up its size in bytes from the top of the new space, and // objects are at least one pointer in size. - Address new_space_front = new_space_.ToSpaceStart(); + Address new_space_front = new_space_->ToSpaceStart(); promotion_queue_.Initialize(); PromotionMode promotion_mode = CurrentPromotionMode(); @@ -1737,16 +1734,17 @@ void Heap::Scavenge() { ScavengeWeakObjectRetainer weak_object_retainer(this); ProcessYoungWeakReferences(&weak_object_retainer); - DCHECK(new_space_front == new_space_.top()); + DCHECK(new_space_front == new_space_->top()); // Set age mark. - new_space_.set_age_mark(new_space_.top()); + new_space_->set_age_mark(new_space_->top()); ArrayBufferTracker::FreeDeadInNewSpace(this); // Update how much has survived scavenge. - IncrementYoungSurvivorsCounter(static_cast<int>( - (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size())); + IncrementYoungSurvivorsCounter( + static_cast<int>((PromotedSpaceSizeOfObjects() - survived_watermark) + + new_space_->Size())); LOG(isolate_, ResourceEvent("scavenge", "end")); @@ -1910,11 +1908,11 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front, PromotionMode promotion_mode) { do { - SemiSpace::AssertValidRange(new_space_front, new_space_.top()); + SemiSpace::AssertValidRange(new_space_front, new_space_->top()); // The addresses new_space_front and new_space_.top() define a // queue of unprocessed copied objects. Process them until the // queue is empty. - while (new_space_front != new_space_.top()) { + while (new_space_front != new_space_->top()) { if (!Page::IsAlignedToPageSize(new_space_front)) { HeapObject* object = HeapObject::FromAddress(new_space_front); if (promotion_mode == PROMOTE_MARKED) { @@ -1953,7 +1951,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, // Take another spin if there are now unswept objects in new space // (there are currently no more unswept promoted objects). - } while (new_space_front != new_space_.top()); + } while (new_space_front != new_space_->top()); return new_space_front; } @@ -2283,6 +2281,8 @@ bool Heap::CreateInitialMaps() { DCHECK_NE(fixed_array_map(), fixed_cow_array_map()); ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info) + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info_entry) + ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info) ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number, Context::NUMBER_FUNCTION_INDEX) ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize, @@ -2391,6 +2391,12 @@ bool Heap::CreateInitialMaps() { } { + AllocationResult allocation = AllocateEmptyScopeInfo(); + if (!allocation.To(&obj)) return false; + } + + set_empty_scope_info(ScopeInfo::cast(obj)); + { AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE); if (!allocation.To(&obj)) return false; } @@ -2432,7 +2438,7 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode, // Statically ensure that it is safe to allocate heap numbers in paged // spaces. int size = HeapNumber::kSize; - STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize); + STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize); AllocationSpace space = SelectSpace(pretenure); @@ -2452,7 +2458,7 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode, AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count], \ PretenureFlag pretenure) { \ int size = Type::kSize; \ - STATIC_ASSERT(Type::kSize <= Page::kMaxRegularHeapObjectSize); \ + STATIC_ASSERT(Type::kSize <= kMaxRegularHeapObjectSize); \ \ AllocationSpace space = SelectSpace(pretenure); \ \ @@ -2476,7 +2482,7 @@ SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION) AllocationResult Heap::AllocateCell(Object* value) { int size = Cell::kSize; - STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize); + STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize); HeapObject* result = nullptr; { @@ -2488,10 +2494,9 @@ AllocationResult Heap::AllocateCell(Object* value) { return result; } - AllocationResult Heap::AllocatePropertyCell() { int size = PropertyCell::kSize; - STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize); + STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize); HeapObject* result = nullptr; AllocationResult allocation = AllocateRaw(size, OLD_SPACE); @@ -2509,7 +2514,7 @@ AllocationResult Heap::AllocatePropertyCell() { AllocationResult Heap::AllocateWeakCell(HeapObject* value) { int size = WeakCell::kSize; - STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize); + STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize); HeapObject* result = nullptr; { AllocationResult allocation = AllocateRaw(size, OLD_SPACE); @@ -2729,12 +2734,6 @@ void Heap::CreateInitialObjects() { #undef SYMBOL_INIT } - // Allocate the dictionary of intrinsic function names. - Handle<NameDictionary> intrinsic_names = - NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED); - Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names); - set_intrinsic_function_names(*intrinsic_names); - Handle<NameDictionary> empty_properties_dictionary = NameDictionary::New(isolate(), 0, TENURED); empty_properties_dictionary->SetRequiresCopyOnCapacityChange(); @@ -2777,18 +2776,18 @@ void Heap::CreateInitialObjects() { { StaticFeedbackVectorSpec spec; - FeedbackVectorSlot load_ic_slot = spec.AddLoadICSlot(); - FeedbackVectorSlot keyed_load_ic_slot = spec.AddKeyedLoadICSlot(); - FeedbackVectorSlot store_ic_slot = spec.AddStoreICSlot(); - FeedbackVectorSlot keyed_store_ic_slot = spec.AddKeyedStoreICSlot(); - - DCHECK_EQ(load_ic_slot, - FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot)); - DCHECK_EQ(keyed_load_ic_slot, + FeedbackVectorSlot slot = spec.AddLoadICSlot(); + DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot)); + + slot = spec.AddKeyedLoadICSlot(); + DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot)); - DCHECK_EQ(store_ic_slot, - FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot)); - DCHECK_EQ(keyed_store_ic_slot, + + slot = spec.AddStoreICSlot(); + DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot)); + + slot = spec.AddKeyedStoreICSlot(); + DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); Handle<TypeFeedbackMetadata> dummy_metadata = @@ -2796,19 +2795,36 @@ void Heap::CreateInitialObjects() { Handle<TypeFeedbackVector> dummy_vector = TypeFeedbackVector::New(isolate(), dummy_metadata); - Object* megamorphic = *TypeFeedbackVector::MegamorphicSentinel(isolate()); - dummy_vector->Set(load_ic_slot, megamorphic, SKIP_WRITE_BARRIER); - dummy_vector->Set(keyed_load_ic_slot, megamorphic, SKIP_WRITE_BARRIER); - dummy_vector->Set(store_ic_slot, megamorphic, SKIP_WRITE_BARRIER); - dummy_vector->Set(keyed_store_ic_slot, megamorphic, SKIP_WRITE_BARRIER); - set_dummy_vector(*dummy_vector); + + // Now initialize dummy vector's entries. + LoadICNexus(isolate()).ConfigureMegamorphic(); + StoreICNexus(isolate()).ConfigureMegamorphic(); + KeyedLoadICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY); + KeyedStoreICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY); } { + // Create a canonical empty TypeFeedbackVector, which is shared by all + // functions that don't need actual type feedback slots. Note however + // that all these functions will share the same invocation count, but + // that shouldn't matter since we only use the invocation count to + // relativize the absolute call counts, but we can only have call counts + // if we have actual feedback slots. + Handle<FixedArray> empty_type_feedback_vector = factory->NewFixedArray( + TypeFeedbackVector::kReservedIndexCount, TENURED); + empty_type_feedback_vector->set(TypeFeedbackVector::kMetadataIndex, + empty_fixed_array()); + empty_type_feedback_vector->set(TypeFeedbackVector::kInvocationCountIndex, + Smi::FromInt(0)); + set_empty_type_feedback_vector(*empty_type_feedback_vector); + + // We use a canonical empty LiteralsArray for all functions that neither + // have literals nor need a TypeFeedbackVector (besides the invocation + // count special slot). Handle<FixedArray> empty_literals_array = factory->NewFixedArray(1, TENURED); - empty_literals_array->set(0, *factory->empty_fixed_array()); + empty_literals_array->set(0, *empty_type_feedback_vector); set_empty_literals_array(*empty_literals_array); } @@ -2882,6 +2898,10 @@ void Heap::CreateInitialObjects() { handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate())); set_species_protector(*species_cell); + cell = factory->NewPropertyCell(); + cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid)); + set_string_length_protector(*cell); + set_serialized_templates(empty_fixed_array()); set_weak_stack_trace_list(Smi::FromInt(0)); @@ -3009,7 +3029,7 @@ FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) { AllocationResult Heap::AllocateForeign(Address address, PretenureFlag pretenure) { // Statically ensure that it is safe to allocate foreigns in paged spaces. - STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize); + STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize); AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE; Foreign* result = nullptr; AllocationResult allocation = Allocate(foreign_map(), space); @@ -3776,6 +3796,18 @@ AllocationResult Heap::AllocateEmptyFixedArray() { return result; } +AllocationResult Heap::AllocateEmptyScopeInfo() { + int size = FixedArray::SizeFor(0); + HeapObject* result = nullptr; + { + AllocationResult allocation = AllocateRaw(size, OLD_SPACE); + if (!allocation.To(&result)) return allocation; + } + // Initialize the object. + result->set_map_no_write_barrier(scope_info_map()); + FixedArray::cast(result)->set_length(0); + return result; +} AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) { if (!InNewSpace(src)) { @@ -3908,7 +3940,14 @@ AllocationResult Heap::AllocateRawFixedArray(int length, int size = FixedArray::SizeFor(length); AllocationSpace space = SelectSpace(pretenure); - return AllocateRaw(size, space); + AllocationResult result = AllocateRaw(size, space); + if (!result.IsRetry() && size > kMaxRegularHeapObjectSize && + FLAG_use_marking_progress_bar) { + MemoryChunk* chunk = + MemoryChunk::FromAddress(result.ToObjectChecked()->address()); + chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR); + } + return result; } @@ -3988,7 +4027,7 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length, AllocationResult Heap::AllocateSymbol() { // Statically ensure that it is safe to allocate symbols in paged spaces. - STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize); + STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize); HeapObject* result = nullptr; AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE); @@ -4049,7 +4088,8 @@ bool Heap::IsHeapIterable() { void Heap::MakeHeapIterable() { DCHECK(AllowHeapAllocation::IsAllowed()); if (!IsHeapIterable()) { - CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable"); + CollectAllGarbage(kMakeHeapIterableMask, + GarbageCollectionReason::kMakeHeapIterable); } if (mark_compact_collector()->sweeping_in_progress()) { mark_compact_collector()->EnsureSweepingCompleted(); @@ -4081,10 +4121,10 @@ double Heap::YoungGenerationMutatorUtilization() { tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects); double result = ComputeMutatorUtilization(mutator_speed, gc_speed); if (FLAG_trace_mutator_utilization) { - PrintIsolate(isolate(), - "Young generation mutator utilization = %.3f (" - "mutator_speed=%.f, gc_speed=%.f)\n", - result, mutator_speed, gc_speed); + isolate()->PrintWithTimestamp( + "Young generation mutator utilization = %.3f (" + "mutator_speed=%.f, gc_speed=%.f)\n", + result, mutator_speed, gc_speed); } return result; } @@ -4097,10 +4137,10 @@ double Heap::OldGenerationMutatorUtilization() { tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond()); double result = ComputeMutatorUtilization(mutator_speed, gc_speed); if (FLAG_trace_mutator_utilization) { - PrintIsolate(isolate(), - "Old generation mutator utilization = %.3f (" - "mutator_speed=%.f, gc_speed=%.f)\n", - result, mutator_speed, gc_speed); + isolate()->PrintWithTimestamp( + "Old generation mutator utilization = %.3f (" + "mutator_speed=%.f, gc_speed=%.f)\n", + result, mutator_speed, gc_speed); } return result; } @@ -4170,44 +4210,49 @@ void Heap::ReduceNewSpaceSize() { if (ShouldReduceMemory() || ((allocation_throughput != 0) && (allocation_throughput < kLowAllocationThroughput))) { - new_space_.Shrink(); + new_space_->Shrink(); UncommitFromSpace(); } } +bool Heap::MarkingDequesAreEmpty() { + return mark_compact_collector()->marking_deque()->IsEmpty() && + (!UsingEmbedderHeapTracer() || + (wrappers_to_trace() == 0 && + embedder_heap_tracer()->NumberOfWrappersToTrace() == 0)); +} -void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) { +void Heap::FinalizeIncrementalMarkingIfComplete( + GarbageCollectionReason gc_reason) { if (incremental_marking()->IsMarking() && (incremental_marking()->IsReadyToOverApproximateWeakClosure() || (!incremental_marking()->finalize_marking_completed() && - mark_compact_collector()->marking_deque()->IsEmpty()))) { - FinalizeIncrementalMarking(comment); + MarkingDequesAreEmpty()))) { + FinalizeIncrementalMarking(gc_reason); } else if (incremental_marking()->IsComplete() || (mark_compact_collector()->marking_deque()->IsEmpty())) { - CollectAllGarbage(current_gc_flags_, comment); + CollectAllGarbage(current_gc_flags_, gc_reason); } } - -bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) { +bool Heap::TryFinalizeIdleIncrementalMarking( + double idle_time_in_ms, GarbageCollectionReason gc_reason) { size_t size_of_objects = static_cast<size_t>(SizeOfObjects()); double final_incremental_mark_compact_speed_in_bytes_per_ms = tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond(); if (incremental_marking()->IsReadyToOverApproximateWeakClosure() || (!incremental_marking()->finalize_marking_completed() && - mark_compact_collector()->marking_deque()->IsEmpty() && + MarkingDequesAreEmpty() && gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure( idle_time_in_ms))) { - FinalizeIncrementalMarking( - "Idle notification: finalize incremental marking"); + FinalizeIncrementalMarking(gc_reason); return true; } else if (incremental_marking()->IsComplete() || - (mark_compact_collector()->marking_deque()->IsEmpty() && + (MarkingDequesAreEmpty() && gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact( idle_time_in_ms, size_of_objects, final_incremental_mark_compact_speed_in_bytes_per_ms))) { - CollectAllGarbage(current_gc_flags_, - "idle notification: finalize incremental marking"); + CollectAllGarbage(current_gc_flags_, gc_reason); return true; } return false; @@ -4267,22 +4312,23 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action, result = true; break; case DO_INCREMENTAL_STEP: { - if (incremental_marking()->incremental_marking_job()->IdleTaskPending()) { - result = true; - } else { - incremental_marking() - ->incremental_marking_job() - ->NotifyIdleTaskProgress(); - result = IncrementalMarkingJob::IdleTask::Step(this, deadline_in_ms) == - IncrementalMarkingJob::IdleTask::kDone; + const double remaining_idle_time_in_ms = + incremental_marking()->AdvanceIncrementalMarking( + deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD, + IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask); + if (remaining_idle_time_in_ms > 0.0) { + TryFinalizeIdleIncrementalMarking( + remaining_idle_time_in_ms, + GarbageCollectionReason::kFinalizeMarkingViaTask); } + result = incremental_marking()->IsStopped(); break; } case DO_FULL_GC: { DCHECK(contexts_disposed_ > 0); HistogramTimerScope scope(isolate_->counters()->gc_context()); TRACE_EVENT0("v8", "V8.GCContext"); - CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed"); + CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal); break; } case DO_NOTHING: @@ -4328,8 +4374,7 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action, if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) || FLAG_trace_idle_notification_verbose) { - PrintIsolate(isolate_, "%8.0f ms: ", isolate()->time_millis_since_init()); - PrintF( + isolate_->PrintWithTimestamp( "Idle notification: requested idle time %.2f ms, used idle time %.2f " "ms, deadline usage %.2f ms [", idle_time_in_ms, idle_time_in_ms - deadline_difference, @@ -4416,10 +4461,11 @@ void Heap::CheckMemoryPressure() { } } if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) { - CollectGarbageOnMemoryPressure("memory pressure"); + CollectGarbageOnMemoryPressure(); } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) { if (FLAG_incremental_marking && incremental_marking()->IsStopped()) { - StartIdleIncrementalMarking(); + StartIncrementalMarking(kReduceMemoryFootprintMask, + GarbageCollectionReason::kMemoryPressure); } } MemoryReducer::Event event; @@ -4428,7 +4474,7 @@ void Heap::CheckMemoryPressure() { memory_reducer_->NotifyPossibleGarbage(event); } -void Heap::CollectGarbageOnMemoryPressure(const char* source) { +void Heap::CollectGarbageOnMemoryPressure() { const int kGarbageThresholdInBytes = 8 * MB; const double kGarbageThresholdAsFractionOfTotalMemory = 0.1; // This constant is the maximum response time in RAIL performance model. @@ -4436,7 +4482,8 @@ void Heap::CollectGarbageOnMemoryPressure(const char* source) { double start = MonotonicallyIncreasingTimeInMs(); CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, - source, kGCCallbackFlagCollectAllAvailableGarbage); + GarbageCollectionReason::kMemoryPressure, + kGCCallbackFlagCollectAllAvailableGarbage); double end = MonotonicallyIncreasingTimeInMs(); // Estimate how much memory we can free. @@ -4451,11 +4498,13 @@ void Heap::CollectGarbageOnMemoryPressure(const char* source) { // Otherwise, start incremental marking. if (end - start < kMaxMemoryPressurePauseMs / 2) { CollectAllGarbage( - kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, source, + kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, + GarbageCollectionReason::kMemoryPressure, kGCCallbackFlagCollectAllAvailableGarbage); } else { if (FLAG_incremental_marking && incremental_marking()->IsStopped()) { - StartIdleIncrementalMarking(); + StartIncrementalMarking(kReduceMemoryFootprintMask, + GarbageCollectionReason::kMemoryPressure); } } } @@ -4527,7 +4576,7 @@ void Heap::ReportHeapStatistics(const char* title) { PrintF("Heap statistics : "); memory_allocator()->ReportStatistics(); PrintF("To space : "); - new_space_.ReportStatistics(); + new_space_->ReportStatistics(); PrintF("Old space : "); old_space_->ReportStatistics(); PrintF("Code space : "); @@ -4541,12 +4590,64 @@ void Heap::ReportHeapStatistics(const char* title) { #endif // DEBUG +const char* Heap::GarbageCollectionReasonToString( + GarbageCollectionReason gc_reason) { + switch (gc_reason) { + case GarbageCollectionReason::kAllocationFailure: + return "allocation failure"; + case GarbageCollectionReason::kAllocationLimit: + return "allocation limit"; + case GarbageCollectionReason::kContextDisposal: + return "context disposal"; + case GarbageCollectionReason::kCountersExtension: + return "counters extension"; + case GarbageCollectionReason::kDebugger: + return "debugger"; + case GarbageCollectionReason::kDeserializer: + return "deserialize"; + case GarbageCollectionReason::kExternalMemoryPressure: + return "external memory pressure"; + case GarbageCollectionReason::kFinalizeMarkingViaStackGuard: + return "finalize incremental marking via stack guard"; + case GarbageCollectionReason::kFinalizeMarkingViaTask: + return "finalize incremental marking via task"; + case GarbageCollectionReason::kFullHashtable: + return "full hash-table"; + case GarbageCollectionReason::kHeapProfiler: + return "heap profiler"; + case GarbageCollectionReason::kIdleTask: + return "idle task"; + case GarbageCollectionReason::kLastResort: + return "last resort"; + case GarbageCollectionReason::kLowMemoryNotification: + return "low memory notification"; + case GarbageCollectionReason::kMakeHeapIterable: + return "make heap iterable"; + case GarbageCollectionReason::kMemoryPressure: + return "memory pressure"; + case GarbageCollectionReason::kMemoryReducer: + return "memory reducer"; + case GarbageCollectionReason::kRuntime: + return "runtime"; + case GarbageCollectionReason::kSamplingProfiler: + return "sampling profiler"; + case GarbageCollectionReason::kSnapshotCreator: + return "snapshot creator"; + case GarbageCollectionReason::kTesting: + return "testing"; + case GarbageCollectionReason::kUnknown: + return "unknown"; + } + UNREACHABLE(); + return ""; +} + bool Heap::Contains(HeapObject* value) { if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) { return false; } return HasBeenSetUp() && - (new_space_.ToSpaceContains(value) || old_space_->Contains(value) || + (new_space_->ToSpaceContains(value) || old_space_->Contains(value) || code_space_->Contains(value) || map_space_->Contains(value) || lo_space_->Contains(value)); } @@ -4556,7 +4657,7 @@ bool Heap::ContainsSlow(Address addr) { return false; } return HasBeenSetUp() && - (new_space_.ToSpaceContainsSlow(addr) || + (new_space_->ToSpaceContainsSlow(addr) || old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) || map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr)); } @@ -4569,7 +4670,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) { switch (space) { case NEW_SPACE: - return new_space_.ToSpaceContains(value); + return new_space_->ToSpaceContains(value); case OLD_SPACE: return old_space_->Contains(value); case CODE_SPACE: @@ -4591,7 +4692,7 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) { switch (space) { case NEW_SPACE: - return new_space_.ToSpaceContainsSlow(addr); + return new_space_->ToSpaceContainsSlow(addr); case OLD_SPACE: return old_space_->ContainsSlow(addr); case CODE_SPACE: @@ -4654,7 +4755,7 @@ void Heap::Verify() { VerifySmisVisitor smis_visitor; IterateSmiRoots(&smis_visitor); - new_space_.Verify(); + new_space_->Verify(); old_space_->Verify(&visitor); map_space_->Verify(&visitor); @@ -4673,9 +4774,9 @@ void Heap::Verify() { void Heap::ZapFromSpace() { - if (!new_space_.IsFromSpaceCommitted()) return; - for (Page* page : NewSpacePageRange(new_space_.FromSpaceStart(), - new_space_.FromSpaceEnd())) { + if (!new_space_->IsFromSpaceCommitted()) return; + for (Page* page : NewSpacePageRange(new_space_->FromSpaceStart(), + new_space_->FromSpaceEnd())) { for (Address cursor = page->area_start(), limit = page->area_end(); cursor < limit; cursor += kPointerSize) { Memory::Address_at(cursor) = kFromSpaceZapValue; @@ -4967,7 +5068,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size, if (FLAG_stress_compaction) { // This will cause more frequent GCs when stressing. - max_semi_space_size_ = Page::kPageSize; + max_semi_space_size_ = MB; } // The new space size must be a power of two to support single-bit testing @@ -5018,7 +5119,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size, old_generation_allocation_limit_ = initial_old_generation_size_; // We rely on being able to allocate new arrays in paged spaces. - DCHECK(Page::kMaxRegularHeapObjectSize >= + DCHECK(kMaxRegularHeapObjectSize >= (JSArray::kSize + FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) + AllocationMemento::kSize)); @@ -5060,8 +5161,8 @@ bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); } void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { *stats->start_marker = HeapStats::kStartMarker; *stats->end_marker = HeapStats::kEndMarker; - *stats->new_space_size = new_space_.SizeAsInt(); - *stats->new_space_capacity = new_space_.Capacity(); + *stats->new_space_size = new_space_->Size(); + *stats->new_space_capacity = new_space_->Capacity(); *stats->old_space_size = old_space_->SizeOfObjects(); *stats->old_space_capacity = old_space_->Capacity(); *stats->code_space_size = code_space_->SizeOfObjects(); @@ -5183,11 +5284,19 @@ intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor, CHECK(old_gen_size > 0); intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); limit = Max(limit, old_gen_size + MinimumAllocationLimitGrowingStep()); - limit += new_space_.Capacity(); + limit += new_space_->Capacity(); intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; return Min(limit, halfway_to_the_max); } +intptr_t Heap::MinimumAllocationLimitGrowingStep() { + const double kRegularAllocationLimitGrowingStep = 8; + const double kLowMemoryAllocationLimitGrowingStep = 2; + intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB); + return limit * (ShouldOptimizeForMemoryUsage() + ? kLowMemoryAllocationLimitGrowingStep + : kRegularAllocationLimitGrowingStep); +} void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed, @@ -5195,11 +5304,11 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size, double factor = HeapGrowingFactor(gc_speed, mutator_speed); if (FLAG_trace_gc_verbose) { - PrintIsolate(isolate_, - "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f " - "(gc=%.f, mutator=%.f)\n", - factor, kTargetMutatorUtilization, gc_speed / mutator_speed, - gc_speed, mutator_speed); + isolate_->PrintWithTimestamp( + "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f " + "(gc=%.f, mutator=%.f)\n", + factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed, + mutator_speed); } if (IsMemoryConstrainedDevice()) { @@ -5223,14 +5332,13 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size, CalculateOldGenerationAllocationLimit(factor, old_gen_size); if (FLAG_trace_gc_verbose) { - PrintIsolate(isolate_, "Grow: old size: %" V8PRIdPTR - " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n", - old_gen_size / KB, old_generation_allocation_limit_ / KB, - factor); + isolate_->PrintWithTimestamp("Grow: old size: %" V8PRIdPTR + " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n", + old_gen_size / KB, + old_generation_allocation_limit_ / KB, factor); } } - void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed, double mutator_speed) { @@ -5238,17 +5346,64 @@ void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size, intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size); if (limit < old_generation_allocation_limit_) { if (FLAG_trace_gc_verbose) { - PrintIsolate(isolate_, - "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR - " KB, " - "new limit: %" V8PRIdPTR " KB (%.1f)\n", - old_gen_size / KB, old_generation_allocation_limit_ / KB, - limit / KB, factor); + isolate_->PrintWithTimestamp( + "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR + " KB, " + "new limit: %" V8PRIdPTR " KB (%.1f)\n", + old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB, + factor); } old_generation_allocation_limit_ = limit; } } +// This predicate is called when an old generation space cannot allocated from +// the free list and is about to add a new page. Returning false will cause a +// major GC. It happens when the old generation allocation limit is reached and +// - either we need to optimize for memory usage, +// - or the incremental marking is not in progress and we cannot start it. +bool Heap::ShouldExpandOldGenerationOnAllocationFailure() { + if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true; + // We reached the old generation allocation limit. + + if (ShouldOptimizeForMemoryUsage()) return false; + + if (incremental_marking()->IsStopped() && + IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) { + // We cannot start incremental marking. + return false; + } + return true; +} + +// This function returns either kNoLimit, kSoftLimit, or kHardLimit. +// The kNoLimit means that either incremental marking is disabled or it is too +// early to start incremental marking. +// The kSoftLimit means that incremental marking should be started soon. +// The kHardLimit means that incremental marking should be started immediately. +Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() { + if (!incremental_marking()->CanBeActivated() || + PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) { + // Incremental marking is disabled or it is too early to start. + return IncrementalMarkingLimit::kNoLimit; + } + if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) || + HighMemoryPressure()) { + // If there is high memory pressure or stress testing is enabled, then + // start marking immediately. + return IncrementalMarkingLimit::kHardLimit; + } + intptr_t old_generation_space_available = OldGenerationSpaceAvailable(); + if (old_generation_space_available > new_space_->Capacity()) { + return IncrementalMarkingLimit::kNoLimit; + } + // We are close to the allocation limit. + // Choose between the hard and the soft limits. + if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) { + return IncrementalMarkingLimit::kHardLimit; + } + return IncrementalMarkingLimit::kSoftLimit; +} void Heap::EnableInlineAllocation() { if (!inline_allocation_disabled_) return; @@ -5316,33 +5471,30 @@ bool Heap::SetUp() { // Initialize incremental marking. incremental_marking_ = new IncrementalMarking(this); - // Set up new space. - if (!new_space_.SetUp(initial_semispace_size_, max_semi_space_size_)) { + for (int i = 0; i <= LAST_SPACE; i++) { + space_[i] = nullptr; + } + + space_[NEW_SPACE] = new_space_ = new NewSpace(this); + if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) { return false; } new_space_top_after_last_gc_ = new_space()->top(); - // Initialize old space. - old_space_ = new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE); - if (old_space_ == NULL) return false; + space_[OLD_SPACE] = old_space_ = + new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE); if (!old_space_->SetUp()) return false; - // Initialize the code space, set its maximum capacity to the old - // generation size. It needs executable memory. - code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE); - if (code_space_ == NULL) return false; + space_[CODE_SPACE] = code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE); if (!code_space_->SetUp()) return false; - // Initialize map space. - map_space_ = new MapSpace(this, MAP_SPACE); - if (map_space_ == NULL) return false; + space_[MAP_SPACE] = map_space_ = new MapSpace(this, MAP_SPACE); if (!map_space_->SetUp()) return false; // The large object code space may contain code or data. We set the memory // to be non-executable here for safety, but this means we need to enable it // explicitly when allocating large code objects. - lo_space_ = new LargeObjectSpace(this, LO_SPACE); - if (lo_space_ == NULL) return false; + space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE); if (!lo_space_->SetUp()) return false; // Set up the seed that is used to randomize the string hash function. @@ -5362,20 +5514,14 @@ bool Heap::SetUp() { } tracer_ = new GCTracer(this); - scavenge_collector_ = new Scavenger(this); - mark_compact_collector_ = new MarkCompactCollector(this); - gc_idle_time_handler_ = new GCIdleTimeHandler(); - memory_reducer_ = new MemoryReducer(this); - if (FLAG_track_gc_object_stats) { live_object_stats_ = new ObjectStats(this); dead_object_stats_ = new ObjectStats(this); } - scavenge_job_ = new ScavengeJob(); LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); @@ -5435,28 +5581,52 @@ void Heap::PrintAlloctionsHash() { void Heap::NotifyDeserializationComplete() { - deserialization_complete_ = true; -#ifdef DEBUG - // All pages right after bootstrapping must be marked as never-evacuate. + DCHECK_EQ(0, gc_count()); PagedSpaces spaces(this); for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) { + if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages(); +#ifdef DEBUG + // All pages right after bootstrapping must be marked as never-evacuate. for (Page* p : *s) { CHECK(p->NeverEvacuate()); } - } #endif // DEBUG + } + + deserialization_complete_ = true; } void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) { - mark_compact_collector()->SetEmbedderHeapTracer(tracer); + DCHECK_NOT_NULL(tracer); + CHECK_NULL(embedder_heap_tracer_); + embedder_heap_tracer_ = tracer; } -bool Heap::UsingEmbedderHeapTracer() { - return mark_compact_collector()->UsingEmbedderHeapTracer(); +void Heap::RegisterWrappersWithEmbedderHeapTracer() { + DCHECK(UsingEmbedderHeapTracer()); + if (wrappers_to_trace_.empty()) { + return; + } + embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_); + wrappers_to_trace_.clear(); } void Heap::TracePossibleWrapper(JSObject* js_object) { - mark_compact_collector()->TracePossibleWrapper(js_object); + DCHECK(js_object->WasConstructedFromApiFunction()); + if (js_object->GetInternalFieldCount() >= 2 && + js_object->GetInternalField(0) && + js_object->GetInternalField(0) != undefined_value() && + js_object->GetInternalField(1) != undefined_value()) { + DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0); + wrappers_to_trace_.push_back(std::pair<void*, void*>( + reinterpret_cast<void*>(js_object->GetInternalField(0)), + reinterpret_cast<void*>(js_object->GetInternalField(1)))); + } +} + +bool Heap::RequiresImmediateWrapperProcessing() { + const size_t kTooManyWrappers = 16000; + return wrappers_to_trace_.size() > kTooManyWrappers; } void Heap::RegisterExternallyReferencedObject(Object** object) { @@ -5480,33 +5650,18 @@ void Heap::TearDown() { UpdateMaximumCommitted(); - if (FLAG_print_cumulative_gc_stat) { - PrintF("\n"); - PrintF("gc_count=%d ", gc_count_); - PrintF("mark_sweep_count=%d ", ms_count_); - PrintF("max_gc_pause=%.1f ", get_max_gc_pause()); - PrintF("total_gc_time=%.1f ", total_gc_time_ms_); - PrintF("min_in_mutator=%.1f ", get_min_in_mutator()); - PrintF("max_alive_after_gc=%" V8PRIdPTR " ", get_max_alive_after_gc()); - PrintF("total_marking_time=%.1f ", tracer()->cumulative_marking_duration()); - PrintF("total_sweeping_time=%.1f ", - tracer()->cumulative_sweeping_duration()); - PrintF("\n\n"); - } - if (FLAG_print_max_heap_committed) { PrintF("\n"); - PrintF("maximum_committed_by_heap=%" V8PRIdPTR " ", - MaximumCommittedMemory()); - PrintF("maximum_committed_by_new_space=%" V8PRIdPTR " ", - new_space_.MaximumCommittedMemory()); - PrintF("maximum_committed_by_old_space=%" V8PRIdPTR " ", + PrintF("maximum_committed_by_heap=%" PRIuS " ", MaximumCommittedMemory()); + PrintF("maximum_committed_by_new_space=%" PRIuS " ", + new_space_->MaximumCommittedMemory()); + PrintF("maximum_committed_by_old_space=%" PRIuS " ", old_space_->MaximumCommittedMemory()); - PrintF("maximum_committed_by_code_space=%" V8PRIdPTR " ", + PrintF("maximum_committed_by_code_space=%" PRIuS " ", code_space_->MaximumCommittedMemory()); - PrintF("maximum_committed_by_map_space=%" V8PRIdPTR " ", + PrintF("maximum_committed_by_map_space=%" PRIuS " ", map_space_->MaximumCommittedMemory()); - PrintF("maximum_committed_by_lo_space=%" V8PRIdPTR " ", + PrintF("maximum_committed_by_lo_space=%" PRIuS " ", lo_space_->MaximumCommittedMemory()); PrintF("\n\n"); } @@ -5560,7 +5715,9 @@ void Heap::TearDown() { delete tracer_; tracer_ = nullptr; - new_space_.TearDown(); + new_space_->TearDown(); + delete new_space_; + new_space_ = nullptr; if (old_space_ != NULL) { delete old_space_; @@ -5599,6 +5756,9 @@ void Heap::TearDown() { delete memory_allocator_; memory_allocator_ = nullptr; + + delete embedder_reference_reporter_; + embedder_reference_reporter_ = nullptr; } @@ -5879,14 +6039,10 @@ OldSpace* OldSpaces::next() { } } - SpaceIterator::SpaceIterator(Heap* heap) - : heap_(heap), current_space_(FIRST_SPACE), iterator_(NULL) {} - + : heap_(heap), current_space_(FIRST_SPACE - 1) {} SpaceIterator::~SpaceIterator() { - // Delete active iterator if any. - delete iterator_; } @@ -5895,48 +6051,9 @@ bool SpaceIterator::has_next() { return current_space_ != LAST_SPACE; } - -ObjectIterator* SpaceIterator::next() { - if (iterator_ != NULL) { - delete iterator_; - iterator_ = NULL; - // Move to the next space - current_space_++; - if (current_space_ > LAST_SPACE) { - return NULL; - } - } - - // Return iterator for the new current space. - return CreateIterator(); -} - - -// Create an iterator for the space to iterate. -ObjectIterator* SpaceIterator::CreateIterator() { - DCHECK(iterator_ == NULL); - - switch (current_space_) { - case NEW_SPACE: - iterator_ = new SemiSpaceIterator(heap_->new_space()); - break; - case OLD_SPACE: - iterator_ = new HeapObjectIterator(heap_->old_space()); - break; - case CODE_SPACE: - iterator_ = new HeapObjectIterator(heap_->code_space()); - break; - case MAP_SPACE: - iterator_ = new HeapObjectIterator(heap_->map_space()); - break; - case LO_SPACE: - iterator_ = new LargeObjectIterator(heap_->lo_space()); - break; - } - - // Return the newly allocated iterator; - DCHECK(iterator_ != NULL); - return iterator_; +Space* SpaceIterator::next() { + DCHECK(has_next()); + return heap_->space(++current_space_); } @@ -6021,7 +6138,7 @@ HeapIterator::HeapIterator(Heap* heap, default: break; } - object_iterator_ = space_iterator_->next(); + object_iterator_ = space_iterator_->next()->GetObjectIterator(); } @@ -6034,8 +6151,6 @@ HeapIterator::~HeapIterator() { DCHECK(object_iterator_ == nullptr); } #endif - // Make sure the last iterator is deallocated. - delete object_iterator_; delete space_iterator_; delete filter_; } @@ -6052,22 +6167,22 @@ HeapObject* HeapIterator::next() { HeapObject* HeapIterator::NextObject() { // No iterator means we are done. - if (object_iterator_ == nullptr) return nullptr; + if (object_iterator_.get() == nullptr) return nullptr; - if (HeapObject* obj = object_iterator_->Next()) { + if (HeapObject* obj = object_iterator_.get()->Next()) { // If the current iterator has more objects we are fine. return obj; } else { // Go though the spaces looking for one that has objects. while (space_iterator_->has_next()) { - object_iterator_ = space_iterator_->next(); - if (HeapObject* obj = object_iterator_->Next()) { + object_iterator_ = space_iterator_->next()->GetObjectIterator(); + if (HeapObject* obj = object_iterator_.get()->Next()) { return obj; } } } // Done with the last space. - object_iterator_ = nullptr; + object_iterator_.reset(nullptr); return nullptr; } @@ -6260,95 +6375,10 @@ void Heap::TracePathToGlobal() { } #endif - -void Heap::UpdateCumulativeGCStatistics(double duration, - double spent_in_mutator, - double marking_time) { - if (FLAG_print_cumulative_gc_stat) { - total_gc_time_ms_ += duration; - max_gc_pause_ = Max(max_gc_pause_, duration); - max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects()); - min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator); - } else if (FLAG_trace_gc_verbose) { +void Heap::UpdateTotalGCTime(double duration) { + if (FLAG_trace_gc_verbose) { total_gc_time_ms_ += duration; } - - marking_time_ += marking_time; -} - - -int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) { - DisallowHeapAllocation no_gc; - // Uses only lower 32 bits if pointers are larger. - uintptr_t addr_hash = - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift; - return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); -} - - -int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) { - DisallowHeapAllocation no_gc; - int index = (Hash(map, name) & kHashMask); - for (int i = 0; i < kEntriesPerBucket; i++) { - Key& key = keys_[index + i]; - if ((key.map == *map) && key.name->Equals(*name)) { - return field_offsets_[index + i]; - } - } - return kNotFound; -} - - -void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name, - int field_offset) { - DisallowHeapAllocation no_gc; - if (!name->IsUniqueName()) { - if (!StringTable::InternalizeStringIfExists( - name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) { - return; - } - } - // This cache is cleared only between mark compact passes, so we expect the - // cache to only contain old space names. - DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name)); - - int index = (Hash(map, name) & kHashMask); - // After a GC there will be free slots, so we use them in order (this may - // help to get the most frequently used one in position 0). - for (int i = 0; i < kEntriesPerBucket; i++) { - Key& key = keys_[index]; - Object* free_entry_indicator = NULL; - if (key.map == free_entry_indicator) { - key.map = *map; - key.name = *name; - field_offsets_[index + i] = field_offset; - return; - } - } - // No free entry found in this bucket, so we move them all down one and - // put the new entry at position zero. - for (int i = kEntriesPerBucket - 1; i > 0; i--) { - Key& key = keys_[index + i]; - Key& key2 = keys_[index + i - 1]; - key = key2; - field_offsets_[index + i] = field_offsets_[index + i - 1]; - } - - // Write the new first entry. - Key& key = keys_[index]; - key.map = *map; - key.name = *name; - field_offsets_[index] = field_offset; -} - - -void KeyedLookupCache::Clear() { - for (int index = 0; index < kLength; index++) keys_[index].map = NULL; -} - - -void DescriptorLookupCache::Clear() { - for (int index = 0; index < kLength; index++) keys_[index].source = NULL; } void Heap::ExternalStringTable::CleanUp() { |