aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2018-04-10 21:39:51 -0400
committerMyles Borins <mylesborins@google.com>2018-04-11 13:22:42 -0400
commit12a1b9b8049462e47181a298120243dc83e81c55 (patch)
tree8605276308c8b4e3597516961266bae1af57557a /deps/v8/src/heap
parent78cd8263354705b767ef8c6a651740efe4931ba0 (diff)
downloadandroid-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.gz
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.bz2
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.zip
deps: update V8 to 6.6.346.23
PR-URL: https://github.com/nodejs/node/pull/19201 Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc107
-rw-r--r--deps/v8/src/heap/concurrent-marking.h77
-rw-r--r--deps/v8/src/heap/heap-inl.h2
-rw-r--r--deps/v8/src/heap/heap.cc323
-rw-r--r--deps/v8/src/heap/heap.h78
-rw-r--r--deps/v8/src/heap/incremental-marking.cc24
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h6
-rw-r--r--deps/v8/src/heap/invalidated-slots.h6
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc130
-rw-r--r--deps/v8/src/heap/item-parallel-job.h134
-rw-r--r--deps/v8/src/heap/mark-compact.cc160
-rw-r--r--deps/v8/src/heap/mark-compact.h5
-rw-r--r--deps/v8/src/heap/marking.h6
-rw-r--r--deps/v8/src/heap/memory-reducer.cc1
-rw-r--r--deps/v8/src/heap/memory-reducer.h6
-rw-r--r--deps/v8/src/heap/object-stats.cc878
-rw-r--r--deps/v8/src/heap/object-stats.h88
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h6
-rw-r--r--deps/v8/src/heap/objects-visiting.h7
-rw-r--r--deps/v8/src/heap/remembered-set.h9
-rw-r--r--deps/v8/src/heap/scavenge-job.h2
-rw-r--r--deps/v8/src/heap/scavenger-inl.h9
-rw-r--r--deps/v8/src/heap/scavenger.cc9
-rw-r--r--deps/v8/src/heap/scavenger.h5
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc32
-rw-r--r--deps/v8/src/heap/slot-set.h6
-rw-r--r--deps/v8/src/heap/spaces-inl.h32
-rw-r--r--deps/v8/src/heap/spaces.cc190
-rw-r--r--deps/v8/src/heap/spaces.h95
-rw-r--r--deps/v8/src/heap/store-buffer.h6
-rw-r--r--deps/v8/src/heap/stress-marking-observer.h2
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.h2
-rw-r--r--deps/v8/src/heap/sweeper.cc8
-rw-r--r--deps/v8/src/heap/worklist.h6
34 files changed, 1526 insertions, 931 deletions
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 44ab099ba8..3aafd191cc 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -424,18 +424,11 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
shared_(shared),
bailout_(bailout),
on_hold_(on_hold),
- weak_objects_(weak_objects),
- total_marked_bytes_(0),
- pending_task_count_(0),
- task_count_(0) {
+ weak_objects_(weak_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
#endif
- for (int i = 0; i <= kMaxTasks; i++) {
- is_pending_[i] = false;
- task_state_[i].marked_bytes = 0;
- }
}
void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
@@ -443,13 +436,8 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
- LiveBytesMap* live_bytes = nullptr;
- {
- base::LockGuard<base::Mutex> guard(&task_state->lock);
- live_bytes = &task_state->live_bytes;
- }
- ConcurrentMarkingVisitor visitor(shared_, bailout_, live_bytes, weak_objects_,
- task_id);
+ ConcurrentMarkingVisitor visitor(shared_, bailout_, &task_state->live_bytes,
+ weak_objects_, task_id);
double time_ms;
size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
@@ -458,9 +446,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
{
TimedScope scope(&time_ms);
+
bool done = false;
while (!done) {
- base::LockGuard<base::Mutex> guard(&task_state->lock);
size_t current_marked_bytes = 0;
int objects_processed = 0;
while (current_marked_bytes < kBytesUntilInterruptCheck &&
@@ -484,17 +472,16 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes);
- if (task_state->interrupt_request.Value()) {
- task_state->interrupt_condition.Wait(&task_state->lock);
+ if (task_state->preemption_request.Value()) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ConcurrentMarking::Run Preempted");
+ break;
}
}
- {
- // Take the lock to synchronize with worklist update after
- // young generation GC.
- base::LockGuard<base::Mutex> guard(&task_state->lock);
- bailout_->FlushToGlobal(task_id);
- on_hold_->FlushToGlobal(task_id);
- }
+ shared_->FlushToGlobal(task_id);
+ bailout_->FlushToGlobal(task_id);
+ on_hold_->FlushToGlobal(task_id);
+
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
@@ -517,21 +504,21 @@ void ConcurrentMarking::ScheduleTasks() {
DCHECK(heap_->use_tasks());
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
+ DCHECK_EQ(0, pending_task_count_);
if (task_count_ == 0) {
- // TODO(ulan): Increase the number of tasks for platforms that benefit
- // from it.
- task_count_ = static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() / 2);
- task_count_ = Max(Min(task_count_, kMaxTasks), 1);
+ task_count_ = Max(
+ 1, Min(kMaxTasks,
+ static_cast<int>(V8::GetCurrentPlatform()
+ ->NumberOfAvailableBackgroundThreads())));
}
// Task id 0 is for the main thread.
- for (int i = 1; i <= task_count_ && pending_task_count_ < task_count_; i++) {
+ for (int i = 1; i <= task_count_; i++) {
if (!is_pending_[i]) {
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Scheduling concurrent marking task %d\n", i);
}
- task_state_[i].interrupt_request.SetValue(false);
+ task_state_[i].preemption_request.SetValue(false);
is_pending_[i] = true;
++pending_task_count_;
Task* task = new Task(heap_->isolate(), this, &task_state_[i], i);
@@ -540,6 +527,7 @@ void ConcurrentMarking::ScheduleTasks() {
task, v8::Platform::kShortRunningTask);
}
}
+ DCHECK_EQ(task_count_, pending_task_count_);
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
@@ -553,25 +541,24 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
}
}
-void ConcurrentMarking::WaitForTasks() {
- if (!FLAG_concurrent_marking) return;
+bool ConcurrentMarking::Stop(StopRequest stop_request) {
+ if (!FLAG_concurrent_marking) return false;
base::LockGuard<base::Mutex> guard(&pending_lock_);
- while (pending_task_count_ > 0) {
- pending_condition_.Wait(&pending_lock_);
- }
-}
-void ConcurrentMarking::EnsureCompleted() {
- if (!FLAG_concurrent_marking) return;
- base::LockGuard<base::Mutex> guard(&pending_lock_);
- CancelableTaskManager* task_manager =
- heap_->isolate()->cancelable_task_manager();
- for (int i = 1; i <= task_count_; i++) {
- if (is_pending_[i]) {
- if (task_manager->TryAbort(cancelable_id_[i]) ==
- CancelableTaskManager::kTaskAborted) {
- is_pending_[i] = false;
- --pending_task_count_;
+ if (pending_task_count_ == 0) return false;
+
+ if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
+ CancelableTaskManager* task_manager =
+ heap_->isolate()->cancelable_task_manager();
+ for (int i = 1; i <= task_count_; i++) {
+ if (is_pending_[i]) {
+ if (task_manager->TryAbort(cancelable_id_[i]) ==
+ CancelableTaskManager::kTaskAborted) {
+ is_pending_[i] = false;
+ --pending_task_count_;
+ } else if (stop_request == StopRequest::PREEMPT_TASKS) {
+ task_state_[i].preemption_request.SetValue(true);
+ }
}
}
}
@@ -581,6 +568,7 @@ void ConcurrentMarking::EnsureCompleted() {
for (int i = 1; i <= task_count_; i++) {
DCHECK(!is_pending_[i]);
}
+ return true;
}
void ConcurrentMarking::FlushLiveBytes(
@@ -620,25 +608,14 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
}
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
- : concurrent_marking_(concurrent_marking) {
- if (!FLAG_concurrent_marking) return;
- // Request task_state for all tasks.
- for (int i = 1; i <= kMaxTasks; i++) {
- concurrent_marking_->task_state_[i].interrupt_request.SetValue(true);
- }
- // Now take a lock to ensure that the tasks are waiting.
- for (int i = 1; i <= kMaxTasks; i++) {
- concurrent_marking_->task_state_[i].lock.Lock();
- }
+ : concurrent_marking_(concurrent_marking),
+ resume_on_exit_(concurrent_marking_->Stop(
+ ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
+ DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
}
ConcurrentMarking::PauseScope::~PauseScope() {
- if (!FLAG_concurrent_marking) return;
- for (int i = kMaxTasks; i >= 1; i--) {
- concurrent_marking_->task_state_[i].interrupt_request.SetValue(false);
- concurrent_marking_->task_state_[i].interrupt_condition.NotifyAll();
- concurrent_marking_->task_state_[i].lock.Unlock();
- }
+ if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
}
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 0f0c8bf992..c5af406e45 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -2,10 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_CONCURRENT_MARKING_
-#define V8_HEAP_CONCURRENT_MARKING_
+#ifndef V8_HEAP_CONCURRENT_MARKING_H_
+#define V8_HEAP_CONCURRENT_MARKING_H_
+#include "include/v8-platform.h"
#include "src/allocation.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
#include "src/cancelable-task.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
@@ -26,26 +30,48 @@ using LiveBytesMap =
class ConcurrentMarking {
public:
// When the scope is entered, the concurrent marking tasks
- // are paused and are not looking at the heap objects.
+ // are preempted and are not looking at the heap objects, concurrent marking
+ // is resumed when the scope is exited.
class PauseScope {
public:
explicit PauseScope(ConcurrentMarking* concurrent_marking);
~PauseScope();
private:
- ConcurrentMarking* concurrent_marking_;
+ ConcurrentMarking* const concurrent_marking_;
+ const bool resume_on_exit_;
};
- static const int kMaxTasks = 4;
+ enum class StopRequest {
+ // Preempt ongoing tasks ASAP (and cancel unstarted tasks).
+ PREEMPT_TASKS,
+ // Wait for ongoing tasks to complete (and cancels unstarted tasks).
+ COMPLETE_ONGOING_TASKS,
+ // Wait for all scheduled tasks to complete (only use this in tests that
+ // control the full stack -- otherwise tasks cancelled by the platform can
+ // make this call hang).
+ COMPLETE_TASKS_FOR_TESTING,
+ };
+
+ // TODO(gab): The only thing that prevents this being above 7 is
+ // Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
+ // task 0, reserved for the main thread).
+ static constexpr int kMaxTasks = 7;
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout, MarkingWorklist* on_hold,
WeakObjects* weak_objects);
+ // Schedules asynchronous tasks to perform concurrent marking. Objects in the
+ // heap should not be moved while these are active (can be stopped safely via
+ // Stop() or PauseScope).
void ScheduleTasks();
- void WaitForTasks();
- void EnsureCompleted();
+
+ // Stops concurrent marking per |stop_request|'s semantics. Returns true
+ // if concurrent marking was in progress, false otherwise.
+ bool Stop(StopRequest stop_request);
+
void RescheduleTasksIfNeeded();
// Flushes the local live bytes into the given marking state.
void FlushLiveBytes(MajorNonAtomicMarkingState* marking_state);
@@ -59,37 +85,32 @@ class ConcurrentMarking {
private:
struct TaskState {
- // When the concurrent marking task has this lock, then objects in the
- // heap are guaranteed to not move.
- base::Mutex lock;
- // The main thread sets this flag to true, when it wants the concurrent
- // maker to give up the lock.
- base::AtomicValue<bool> interrupt_request;
- // The concurrent marker waits on this condition until the request
- // flag is cleared by the main thread.
- base::ConditionVariable interrupt_condition;
+ // The main thread sets this flag to true when it wants the concurrent
+ // marker to give up the worker thread.
+ base::AtomicValue<bool> preemption_request;
+
LiveBytesMap live_bytes;
- size_t marked_bytes;
+ size_t marked_bytes = 0;
char cache_line_padding[64];
};
class Task;
void Run(int task_id, TaskState* task_state);
- Heap* heap_;
- MarkingWorklist* shared_;
- MarkingWorklist* bailout_;
- MarkingWorklist* on_hold_;
- WeakObjects* weak_objects_;
+ Heap* const heap_;
+ MarkingWorklist* const shared_;
+ MarkingWorklist* const bailout_;
+ MarkingWorklist* const on_hold_;
+ WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
- base::AtomicNumber<size_t> total_marked_bytes_;
+ base::AtomicNumber<size_t> total_marked_bytes_{0};
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
- int pending_task_count_;
- bool is_pending_[kMaxTasks + 1];
- CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1];
- int task_count_;
+ int pending_task_count_ = 0;
+ bool is_pending_[kMaxTasks + 1] = {};
+ CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
+ int task_count_ = 0;
};
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_PAGE_PARALLEL_JOB_
+#endif // V8_HEAP_CONCURRENT_MARKING_H_
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index f4e5c1fe13..41af95fa44 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -626,6 +626,7 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
LargePage* page = heap_->lo_space()->first_page();
while (page != nullptr) {
if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndWritable();
}
page = page->next_page();
@@ -640,6 +641,7 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
LargePage* page = heap_->lo_space()->first_page();
while (page != nullptr) {
if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndExecutable();
}
page = page->next_page();
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 7f965602b8..9a83c0d172 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -17,7 +17,6 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
-#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -46,6 +45,7 @@
#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
+#include "src/instruction-stream.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/data-handler.h"
#include "src/objects/shared-function-info.h"
@@ -56,6 +56,7 @@
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/unicode-decoder.h"
#include "src/unicode-inl.h"
#include "src/utils-inl.h"
#include "src/utils.h"
@@ -177,6 +178,7 @@ Heap::Heap()
raw_allocations_hash_(0),
stress_marking_observer_(nullptr),
stress_scavenge_observer_(nullptr),
+ allocation_step_in_progress_(false),
max_marking_limit_reached_(0.0),
ms_count_(0),
gc_count_(0),
@@ -461,30 +463,6 @@ bool Heap::IsRetainingPathTarget(HeapObject* object,
return false;
}
-namespace {
-const char* RootToString(Root root) {
- switch (root) {
-#define ROOT_CASE(root_id, ignore, description) \
- case Root::root_id: \
- return description;
- ROOT_ID_LIST(ROOT_CASE)
-#undef ROOT_CASE
- case Root::kCodeFlusher:
- return "(Code flusher)";
- case Root::kPartialSnapshotCache:
- return "(Partial snapshot cache)";
- case Root::kWeakCollections:
- return "(Weak collections)";
- case Root::kWrapperTracing:
- return "(Wrapper tracing)";
- case Root::kUnknown:
- return "(Unknown)";
- }
- UNREACHABLE();
- return nullptr;
-}
-} // namespace
-
void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
PrintF("\n\n\n");
PrintF("#################################################\n");
@@ -527,7 +505,7 @@ void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
}
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
- PrintF("Root: %s\n", RootToString(root));
+ PrintF("Root: %s\n", RootVisitor::RootName(root));
PrintF("-------------------------------------------------\n");
}
@@ -644,7 +622,7 @@ const char* Heap::GetSpaceName(int idx) {
return nullptr;
}
-void Heap::SetRootCodeStubs(NumberDictionary* value) {
+void Heap::SetRootCodeStubs(SimpleNumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
@@ -1112,6 +1090,66 @@ void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
set_current_gc_flags(kNoGCFlags);
}
+namespace {
+
+intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
+ int words = size / kPointerSize;
+ DCHECK_EQ(a->Size(), size);
+ DCHECK_EQ(b->Size(), size);
+ intptr_t* slot_a = reinterpret_cast<intptr_t*>(a->address());
+ intptr_t* slot_b = reinterpret_cast<intptr_t*>(b->address());
+ for (int i = 0; i < words; i++) {
+ if (*slot_a != *slot_b) {
+ return *slot_a - *slot_b;
+ }
+ slot_a++;
+ slot_b++;
+ }
+ return 0;
+}
+
+void ReportDuplicates(int size, std::vector<HeapObject*>& objects) {
+ if (objects.size() == 0) return;
+
+ sort(objects.begin(), objects.end(), [size](HeapObject* a, HeapObject* b) {
+ intptr_t c = CompareWords(size, a, b);
+ if (c != 0) return c < 0;
+ return a < b;
+ });
+
+ std::vector<std::pair<int, HeapObject*>> duplicates;
+ HeapObject* current = objects[0];
+ int count = 1;
+ for (size_t i = 1; i < objects.size(); i++) {
+ if (CompareWords(size, current, objects[i]) == 0) {
+ count++;
+ } else {
+ if (count > 1) {
+ duplicates.push_back(std::make_pair(count - 1, current));
+ }
+ count = 1;
+ current = objects[i];
+ }
+ }
+ if (count > 1) {
+ duplicates.push_back(std::make_pair(count - 1, current));
+ }
+
+ int threshold = FLAG_trace_duplicate_threshold_kb * KB;
+
+ sort(duplicates.begin(), duplicates.end());
+ for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
+ int duplicate_bytes = it->first * size;
+ if (duplicate_bytes < threshold) break;
+ PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
+ duplicate_bytes / KB);
+ PrintF("Sample object: ");
+ it->second->Print();
+ PrintF("============================\n");
+ }
+}
+} // anonymous namespace
+
void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
@@ -1129,12 +1167,9 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
RuntimeCallTimerScope runtime_timer(
isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
- if (isolate()->concurrent_recompilation_enabled()) {
- // The optimizing compiler may be unnecessarily holding on to memory.
- DisallowHeapAllocation no_recursive_gc;
- isolate()->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
- }
+
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
isolate()->ClearSerializerData();
set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
@@ -1151,6 +1186,28 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
UncommitFromSpace();
+
+ if (FLAG_trace_duplicate_threshold_kb) {
+ std::map<int, std::vector<HeapObject*>> objects_by_size;
+ PagedSpaces spaces(this);
+ for (PagedSpace* space = spaces.next(); space != nullptr;
+ space = spaces.next()) {
+ HeapObjectIterator it(space);
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ objects_by_size[obj->Size()].push_back(obj);
+ }
+ }
+ {
+ LargeObjectIterator it(lo_space());
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ objects_by_size[obj->Size()].push_back(obj);
+ }
+ }
+ for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
+ ++it) {
+ ReportDuplicates(it->first, it->second);
+ }
+ }
}
void Heap::ReportExternalMemoryPressure() {
@@ -1316,11 +1373,8 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer_->NotifyPossibleGarbage(event);
}
- if (isolate()->concurrent_recompilation_enabled()) {
- // Flush the queued recompilation tasks.
- isolate()->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
- }
+ isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
+
number_of_disposed_maps_ = retained_maps()->Length();
tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
return ++contexts_disposed_;
@@ -1733,12 +1787,12 @@ void Heap::MarkCompact() {
void Heap::MinorMarkCompact() {
DCHECK(FLAG_minor_mc);
+ PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
AlwaysAllocateScope always_allocate(isolate());
- PauseAllocationObserversScope pause_observers(this);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
CodeSpaceMemoryModificationScope code_modifcation(this);
@@ -1924,11 +1978,10 @@ int Heap::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
static_cast<int>(new_space()->TotalCapacity()) / MB;
- return Max(
- 1,
- Min(Min(num_scavenge_tasks, kMaxScavengerTasks),
- static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())));
+ static int num_cores =
+ 1 + static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+ return Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
}
void Heap::Scavenge() {
@@ -2015,7 +2068,7 @@ void Heap::Scavenge() {
{
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- job.Run();
+ job.Run(isolate()->async_counters());
DCHECK(copied_list.IsGlobalEmpty());
DCHECK(promotion_list.IsGlobalEmpty());
}
@@ -2187,7 +2240,8 @@ void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
if (!new_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, new_space_strings_.data(),
+ v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
+ new_space_strings_.data(),
new_space_strings_.data() + new_space_strings_.size());
}
}
@@ -2195,7 +2249,8 @@ void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
IterateNewSpaceStrings(v);
if (!old_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, old_space_strings_.data(),
+ v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
+ old_space_strings_.data(),
old_space_strings_.data() + old_space_strings_.size());
}
}
@@ -2301,7 +2356,8 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
explicit ExternalStringTableVisitorAdapter(
v8::ExternalResourceVisitor* visitor)
: visitor_(visitor) {}
- virtual void VisitRootPointers(Root root, Object** start, Object** end) {
+ virtual void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
@@ -2512,12 +2568,12 @@ AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
return result;
}
-AllocationResult Heap::AllocateBigInt(int length) {
+AllocationResult Heap::AllocateBigInt(int length, PretenureFlag pretenure) {
if (length < 0 || length > BigInt::kMaxLength) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true);
}
int size = BigInt::SizeFor(length);
- AllocationSpace space = SelectSpace(NOT_TENURED);
+ AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space);
@@ -2541,6 +2597,20 @@ AllocationResult Heap::AllocateCell(Object* value) {
return result;
}
+AllocationResult Heap::AllocateFeedbackCell(Map* map, HeapObject* value) {
+ int size = FeedbackCell::kSize;
+ STATIC_ASSERT(FeedbackCell::kSize <= kMaxRegularHeapObjectSize);
+
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ if (!allocation.To(&result)) return allocation;
+ }
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ FeedbackCell::cast(result)->set_value(value);
+ return result;
+}
+
AllocationResult Heap::AllocatePropertyCell(Name* name) {
DCHECK(name->IsUniqueName());
int size = PropertyCell::kSize;
@@ -2849,11 +2919,11 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_parameter_count(parameter_count);
instance->set_incoming_new_target_or_generator_register(
interpreter::Register::invalid_value());
- instance->set_interrupt_budget(interpreter::Interpreter::kInterruptBudget);
+ instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
instance->set_osr_loop_nesting_level(0);
instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(constant_pool);
- instance->set_handler_table(empty_fixed_array());
+ instance->set_handler_table(empty_byte_array());
instance->set_source_position_table(empty_byte_array());
CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
instance->clear_padding();
@@ -3145,10 +3215,10 @@ AllocationResult Heap::AllocateCode(int object_size, Movability movability) {
AllocationResult Heap::AllocateCode(
const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
int32_t builtin_index, ByteArray* reloc_info,
- CodeDataContainer* data_container, HandlerTable* handler_table,
- ByteArray* source_position_table, DeoptimizationData* deopt_data,
- Movability movability, uint32_t stub_key, bool is_turbofanned,
- int stack_slots, int safepoint_table_offset) {
+ CodeDataContainer* data_container, ByteArray* source_position_table,
+ DeoptimizationData* deopt_data, Movability movability, uint32_t stub_key,
+ bool is_turbofanned, int stack_slots, int safepoint_table_offset,
+ int handler_table_offset) {
bool has_unwinding_info = desc.unwinding_info != nullptr;
DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
(!has_unwinding_info && desc.unwinding_info_size == 0));
@@ -3174,11 +3244,11 @@ AllocationResult Heap::AllocateCode(
code->set_relocation_info(reloc_info);
code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots);
code->set_safepoint_table_offset(safepoint_table_offset);
+ code->set_handler_table_offset(handler_table_offset);
code->set_code_data_container(data_container);
code->set_has_tagged_params(true);
code->set_deoptimization_data(deopt_data);
code->set_stub_key(stub_key);
- code->set_handler_table(handler_table);
code->set_source_position_table(source_position_table);
code->set_protected_instructions(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
@@ -3322,6 +3392,21 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
return result;
}
+AllocationResult Heap::AllocateJSPromise(JSFunction* constructor,
+ PretenureFlag pretenure) {
+ AllocationResult allocation = AllocateJSObject(constructor, pretenure);
+ JSPromise* promise = nullptr;
+ if (!allocation.To(&promise)) return allocation;
+
+ // Setup JSPromise fields
+ promise->set_reactions_or_result(Smi::kZero);
+ promise->set_flags(0);
+ for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
+ promise->SetEmbedderField(i, Smi::kZero);
+ }
+ return promise;
+}
+
void Heap::InitializeJSObjectFromMap(JSObject* obj, Object* properties,
Map* map) {
obj->set_raw_properties_or_hash(properties);
@@ -3503,28 +3588,17 @@ static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
int len) {
- const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
- size_t stream_length = vector.length();
- while (stream_length != 0) {
- size_t consumed = 0;
- uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
+ unibrow::Utf8Iterator it = unibrow::Utf8Iterator(vector);
+ while (!it.Done()) {
+ DCHECK_GT(len, 0);
+ len -= 1;
+
+ uint16_t c = *it;
+ ++it;
DCHECK_NE(unibrow::Utf8::kBadChar, c);
- DCHECK(consumed <= stream_length);
- stream_length -= consumed;
- stream += consumed;
- if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- len -= 2;
- if (len < 0) break;
- *chars++ = unibrow::Utf16::LeadSurrogate(c);
- *chars++ = unibrow::Utf16::TrailSurrogate(c);
- } else {
- len -= 1;
- if (len < 0) break;
- *chars++ = c;
- }
+ *chars++ = c;
}
- DCHECK_EQ(0, stream_length);
- DCHECK_EQ(0, len);
+ DCHECK_EQ(len, 0);
}
@@ -4447,12 +4521,8 @@ class MemoryPressureInterruptTask : public CancelableTask {
void Heap::CheckMemoryPressure() {
if (HighMemoryPressure()) {
- if (isolate()->concurrent_recompilation_enabled()) {
- // The optimizing compiler may be unnecessarily holding on to memory.
- DisallowHeapAllocation no_recursive_gc;
- isolate()->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
- }
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
}
if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
CollectGarbageOnMemoryPressure();
@@ -4877,8 +4947,9 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- v->VisitRootPointer(Root::kStringTable, reinterpret_cast<Object**>(
- &roots_[kStringTableRootIndex]));
+ v->VisitRootPointer(
+ Root::kStringTable, nullptr,
+ reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
v->Synchronize(VisitorSynchronization::kStringTable);
if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
mode != VISIT_FOR_SERIALIZATION) {
@@ -4893,13 +4964,13 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
void Heap::IterateSmiRoots(RootVisitor* v) {
// Acquire execution access since we are going to read stack limit values.
ExecutionAccess access(isolate());
- v->VisitRootPointers(Root::kSmiRootList, &roots_[kSmiRootsStart],
+ v->VisitRootPointers(Root::kSmiRootList, nullptr, &roots_[kSmiRootsStart],
&roots_[kRootListLength]);
v->Synchronize(VisitorSynchronization::kSmiRootList);
}
void Heap::IterateEncounteredWeakCollections(RootVisitor* visitor) {
- visitor->VisitRootPointer(Root::kWeakCollections,
+ visitor->VisitRootPointer(Root::kWeakCollections, nullptr,
&encountered_weak_collections_);
}
@@ -4913,9 +4984,13 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
USE(heap_);
}
- void VisitRootPointer(Root root, Object** p) override { FixHandle(p); }
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
+ FixHandle(p);
+ }
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) FixHandle(p);
}
@@ -4951,7 +5026,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- v->VisitRootPointers(Root::kStrongRootList, &roots_[0],
+ v->VisitRootPointers(Root::kStrongRootList, nullptr, &roots_[0],
&roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -5026,7 +5101,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// Iterate over other strong roots (currently only identity maps).
for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
- v->VisitRootPointers(Root::kStrongRoots, list->start, list->end);
+ v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
}
v->Synchronize(VisitorSynchronization::kStrongRoots);
@@ -5038,6 +5113,9 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
}
}
+void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
+ isolate_->global_handles()->IterateWeakRoots(v);
+}
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
@@ -5745,7 +5823,8 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
}
void Heap::TearDown() {
- use_tasks_ = false;
+ SetGCState(TEAR_DOWN);
+ DCHECK(!use_tasks_);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -6035,7 +6114,8 @@ void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
class PrintHandleVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++)
PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
reinterpret_cast<void*>(*p));
@@ -6057,7 +6137,8 @@ class CheckHandleCountVisitor : public RootVisitor {
~CheckHandleCountVisitor() override {
CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
handle_count_ += end - start;
}
@@ -6207,7 +6288,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
MarkPointers(start, end);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
MarkPointers(start, end);
}
@@ -6449,6 +6531,10 @@ void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
set_deserialize_lazy_handler_extra_wide(code);
}
+void Heap::SetBuiltinsConstantsTable(FixedArray* cache) {
+ set_builtins_constants_table(cache);
+}
+
size_t Heap::NumberOfTrackedHeapObjectTypes() {
return ObjectStats::OBJECT_STATS_COUNT;
}
@@ -6480,19 +6566,13 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
return true;
INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
-#define COMPARE_AND_RETURN_NAME(name) \
- case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
- *object_type = "CODE_TYPE"; \
- *object_sub_type = "CODE_KIND/" #name; \
- return true;
- CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
-#undef COMPARE_AND_RETURN_NAME
-#define COMPARE_AND_RETURN_NAME(name) \
- case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
- *object_type = "FIXED_ARRAY_TYPE"; \
- *object_sub_type = #name; \
+
+#define COMPARE_AND_RETURN_NAME(name) \
+ case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
+ *object_type = #name; \
+ *object_sub_type = ""; \
return true;
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
+ VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
}
return false;
@@ -6537,8 +6617,9 @@ void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
VerifyPointers(start, end);
}
-void VerifyPointersVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void VerifyPointersVisitor::VisitRootPointers(Root root,
+ const char* description,
+ Object** start, Object** end) {
VerifyPointers(start, end);
}
@@ -6554,8 +6635,8 @@ void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
}
}
-void VerifySmisVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
CHECK((*current)->IsSmi());
}
@@ -6580,12 +6661,11 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
AllocationSpace src = chunk->owner()->identity();
switch (src) {
case NEW_SPACE:
- return dst == src || dst == OLD_SPACE;
+ return dst == NEW_SPACE || dst == OLD_SPACE;
case OLD_SPACE:
- return dst == src &&
- (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
+ return dst == OLD_SPACE;
case CODE_SPACE:
- return dst == src && type == CODE_TYPE;
+ return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
case LO_SPACE:
return false;
@@ -6612,6 +6692,7 @@ void AllocationObserver::AllocationStep(int bytes_allocated,
step_size_ = GetNextStepSize();
bytes_to_next_step_ = step_size_;
}
+ DCHECK_GE(bytes_to_next_step_, 0);
}
namespace {
@@ -6638,12 +6719,24 @@ Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
Map* map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == code->GetHeap()->code_map());
+#ifdef V8_EMBEDDED_BUILTINS
+ if (FLAG_stress_off_heap_code) {
+ if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
+ }
+#endif
Address start = code->address();
Address end = code->address() + code->SizeFromMap(map);
return start <= addr && addr < end;
}
Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
+#ifdef V8_EMBEDDED_BUILTINS
+ if (FLAG_stress_off_heap_code) {
+ Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
+ if (code != nullptr) return code;
+ }
+#endif
+
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = lo_space()->FindPage(inner_pointer);
if (large_page != nullptr) {
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 7cc65479ca..63bcfb2990 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -114,6 +114,7 @@ using v8::MemoryPressureLevel;
V(Map, name_dictionary_map, NameDictionaryMap) \
V(Map, global_dictionary_map, GlobalDictionaryMap) \
V(Map, number_dictionary_map, NumberDictionaryMap) \
+ V(Map, simple_number_dictionary_map, SimpleNumberDictionaryMap) \
V(Map, string_table_map, StringTableMap) \
V(Map, weak_hash_table_map, WeakHashTableMap) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
@@ -168,6 +169,8 @@ using v8::MemoryPressureLevel;
V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
+ V(Map, fixed_biguint64_array_map, FixedBigUint64ArrayMap) \
+ V(Map, fixed_bigint64_array_map, FixedBigInt64ArrayMap) \
/* Oddball maps */ \
V(Map, undefined_map, UndefinedMap) \
V(Map, the_hole_map, TheHoleMap) \
@@ -193,8 +196,11 @@ using v8::MemoryPressureLevel;
V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
EmptyFixedUint8ClampedArray) \
+ V(FixedTypedArrayBase, empty_fixed_biguint64_array, \
+ EmptyFixedBigUint64Array) \
+ V(FixedTypedArrayBase, empty_fixed_bigint64_array, EmptyFixedBigInt64Array) \
V(Script, empty_script, EmptyScript) \
- V(Cell, undefined_cell, UndefinedCell) \
+ V(FeedbackCell, many_closures_cell, ManyClosuresCell) \
V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
V(NumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
@@ -213,6 +219,8 @@ using v8::MemoryPressureLevel;
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
V(PropertyCell, array_buffer_neutering_protector, \
ArrayBufferNeuteringProtector) \
+ V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
+ V(PropertyCell, promise_then_protector, PromiseThenProtector) \
/* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, hole_nan_value, HoleNanValue) \
@@ -230,7 +238,7 @@ using v8::MemoryPressureLevel;
V(NameDictionary, api_symbol_table, ApiSymbolTable) \
V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
V(Object, script_list, ScriptList) \
- V(NumberDictionary, code_stubs, CodeStubs) \
+ V(SimpleNumberDictionary, code_stubs, CodeStubs) \
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
V(FixedArray, detached_contexts, DetachedContexts) \
@@ -242,6 +250,8 @@ using v8::MemoryPressureLevel;
/* slots refer to the code with the reference to the weak object. */ \
V(ArrayList, weak_new_space_object_to_code_list, \
WeakNewSpaceObjectToCodeList) \
+ /* Indirection lists for isolate-independent builtins */ \
+ V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) \
/* Feedback vectors that we need for code coverage or type profile */ \
V(Object, feedback_vectors_for_profiling_tools, \
FeedbackVectorsForProfilingTools) \
@@ -340,6 +350,7 @@ using v8::MemoryPressureLevel;
V(JsConstructEntryCode) \
V(JsEntryCode) \
V(JSMessageObjectMap) \
+ V(ManyClosuresCell) \
V(ManyClosuresCellMap) \
V(MetaMap) \
V(MinusInfinityValue) \
@@ -363,6 +374,7 @@ using v8::MemoryPressureLevel;
V(ScopeInfoMap) \
V(ScriptContextMap) \
V(SharedFunctionInfoMap) \
+ V(SimpleNumberDictionaryMap) \
V(SloppyArgumentsElementsMap) \
V(SmallOrderedHashMapMap) \
V(SmallOrderedHashSetMap) \
@@ -377,7 +389,6 @@ using v8::MemoryPressureLevel;
V(TransitionArrayMap) \
V(TrueValue) \
V(TwoPointerFillerMap) \
- V(UndefinedCell) \
V(UndefinedMap) \
V(UndefinedValue) \
V(UninitializedMap) \
@@ -575,7 +586,13 @@ class Heap {
enum FindMementoMode { kForRuntime, kForGC };
- enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT, MINOR_MARK_COMPACT };
+ enum HeapState {
+ NOT_IN_GC,
+ SCAVENGE,
+ MARK_COMPACT,
+ MINOR_MARK_COMPACT,
+ TEAR_DOWN
+ };
using PretenuringFeedbackMap = std::unordered_map<AllocationSite*, size_t>;
@@ -966,6 +983,8 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp();
+ void stop_using_tasks() { use_tasks_ = false; }
+
bool use_tasks() const { return use_tasks_; }
// ===========================================================================
@@ -1062,7 +1081,7 @@ class Heap {
Object** roots_array_start() { return roots_; }
// Sets the stub_cache_ (only used when expanding the dictionary).
- void SetRootCodeStubs(NumberDictionary* value);
+ void SetRootCodeStubs(SimpleNumberDictionary* value);
void SetRootMaterializedObjects(FixedArray* objects) {
roots_[kMaterializedObjectsRootIndex] = objects;
@@ -1110,6 +1129,8 @@ class Heap {
void SetDeserializeLazyHandlerWide(Code* code);
void SetDeserializeLazyHandlerExtraWide(Code* code);
+ void SetBuiltinsConstantsTable(FixedArray* cache);
+
// ===========================================================================
// Inline allocation. ========================================================
// ===========================================================================
@@ -1161,15 +1182,15 @@ class Heap {
// Iterators. ================================================================
// ===========================================================================
- // Iterates over all roots in the heap.
void IterateRoots(RootVisitor* v, VisitMode mode);
- // Iterates over all strong roots in the heap.
void IterateStrongRoots(RootVisitor* v, VisitMode mode);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
void IterateSmiRoots(RootVisitor* v);
- // Iterates over all the other roots in the heap.
+ // Iterates over weak string tables.
void IterateWeakRoots(RootVisitor* v, VisitMode mode);
+ // Iterates over weak global handles.
+ void IterateWeakGlobalHandles(RootVisitor* v);
// ===========================================================================
// Store buffer API. =========================================================
@@ -1571,6 +1592,11 @@ class Heap {
void RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer);
+ bool allocation_step_in_progress() { return allocation_step_in_progress_; }
+ void set_allocation_step_in_progress(bool val) {
+ allocation_step_in_progress_ = val;
+ }
+
// ===========================================================================
// Retaining path tracking. ==================================================
// ===========================================================================
@@ -2076,7 +2102,8 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateHeapNumber(
MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT AllocationResult AllocateBigInt(int length);
+ MUST_USE_RESULT AllocationResult
+ AllocateBigInt(int length, PretenureFlag pretenure = NOT_TENURED);
// Allocates a byte array of the specified length
MUST_USE_RESULT AllocationResult
@@ -2265,6 +2292,10 @@ class Heap {
// Allocate a tenured simple cell.
MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
+ // Allocate a tenured simple feedback cell.
+ MUST_USE_RESULT AllocationResult AllocateFeedbackCell(Map* map,
+ HeapObject* value);
+
// Allocate a tenured JS global property cell initialized with the hole.
MUST_USE_RESULT AllocationResult AllocatePropertyCell(Name* name);
@@ -2287,13 +2318,16 @@ class Heap {
// Allocates a new code object (fully initialized). All header fields of the
// returned object are immutable and the code object is write protected.
- MUST_USE_RESULT AllocationResult
- AllocateCode(const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, ByteArray* reloc_info,
- CodeDataContainer* data_container, HandlerTable* handler_table,
- ByteArray* source_position_table, DeoptimizationData* deopt_data,
- Movability movability, uint32_t stub_key, bool is_turbofanned,
- int stack_slots, int safepoint_table_offset);
+ MUST_USE_RESULT AllocationResult AllocateCode(
+ const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
+ int32_t builtin_index, ByteArray* reloc_info,
+ CodeDataContainer* data_container, ByteArray* source_position_table,
+ DeoptimizationData* deopt_data, Movability movability, uint32_t stub_key,
+ bool is_turbofanned, int stack_slots, int safepoint_table_offset,
+ int handler_table_offset);
+
+ MUST_USE_RESULT AllocationResult AllocateJSPromise(
+ JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
void set_force_oom(bool value) { force_oom_ = value; }
@@ -2400,6 +2434,8 @@ class Heap {
// Observer that can cause early scavenge start.
StressScavengeObserver* stress_scavenge_observer_;
+ bool allocation_step_in_progress_;
+
// The maximum percent of the marking limit reached wihout causing marking.
// This is tracked when specyfing --fuzzer-gc-analysis.
double max_marking_limit_reached_;
@@ -2658,6 +2694,7 @@ class AlwaysAllocateScope {
Heap* heap_;
};
+// The CodeSpaceMemoryModificationScope can only be used by the main thread.
class CodeSpaceMemoryModificationScope {
public:
explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
@@ -2667,6 +2704,9 @@ class CodeSpaceMemoryModificationScope {
Heap* heap_;
};
+// The CodePageMemoryModificationScope does not check if tansitions to
+// writeable and back to executable are actually allowed, i.e. the MemoryChunk
+// was registered to be executable. It can be used by concurrent threads.
class CodePageMemoryModificationScope {
public:
explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
@@ -2689,7 +2729,8 @@ class CodePageMemoryModificationScope {
class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
void VisitPointers(HeapObject* host, Object** start, Object** end) override;
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
private:
void VerifyPointers(Object** start, Object** end);
@@ -2699,7 +2740,8 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
// Verify that all objects are Smis.
class VerifySmisVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
};
// Space iterator for iterating over all the paged spaces of the heap: Map
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 4868adc26e..a7b56e4315 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -203,11 +203,13 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
IncrementalMarking* incremental_marking)
: heap_(incremental_marking->heap()) {}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
@@ -653,15 +655,17 @@ bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
- // The object can already be black in two cases:
- // 1. The object is a fixed array with the progress bar.
- // 2. The object is a JSObject that was colored black before
- // unsafe layout change.
- // 3. The object is a string that was colored black before
- // unsafe layout change.
if (!marking_state()->GreyToBlack(obj)) {
- DCHECK(IsFixedArrayWithProgressBar(obj) || obj->IsJSObject() ||
- obj->IsString());
+ // The object can already be black in these cases:
+ // 1. The object is a fixed array with the progress bar.
+ // 2. The object is a JSObject that was colored black before
+ // unsafe layout change.
+ // 3. The object is a string that was colored black before
+ // unsafe layout change.
+ // 4. The object is materizalized by the deoptimizer.
+ DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
+ obj->IsContextExtension() || obj->IsFixedArray() ||
+ obj->IsJSObject() || obj->IsString());
}
DCHECK(marking_state()->IsBlack(obj));
WhiteToGreyAndPush(map);
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index b62aa93cde..8ca289cf1a 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INVALIDATED_SLOTS_INL_H
-#define V8_INVALIDATED_SLOTS_INL_H
+#ifndef V8_HEAP_INVALIDATED_SLOTS_INL_H_
+#define V8_HEAP_INVALIDATED_SLOTS_INL_H_
#include <map>
@@ -67,4 +67,4 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
} // namespace internal
} // namespace v8
-#endif // V8_INVALIDATED_SLOTS_INL_H
+#endif // V8_HEAP_INVALIDATED_SLOTS_INL_H_
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 78ac03bc79..e9410575a3 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INVALIDATED_SLOTS_H
-#define V8_INVALIDATED_SLOTS_H
+#ifndef V8_HEAP_INVALIDATED_SLOTS_H_
+#define V8_HEAP_INVALIDATED_SLOTS_H_
#include <map>
#include <stack>
@@ -51,4 +51,4 @@ class InvalidatedSlotsFilter {
} // namespace internal
} // namespace v8
-#endif // V8_INVALIDATED_SLOTS_H
+#endif // V8_HEAP_INVALIDATED_SLOTS_H_
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
new file mode 100644
index 0000000000..1c8d4c8ac4
--- /dev/null
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -0,0 +1,130 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/item-parallel-job.h"
+
+#include "src/base/platform/semaphore.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+ItemParallelJob::Task::Task(Isolate* isolate) : CancelableTask(isolate) {}
+
+ItemParallelJob::Task::~Task() {
+ // The histogram is reset in RunInternal(). If it's still around it means
+ // this task was cancelled before being scheduled.
+ if (gc_parallel_task_latency_histogram_)
+ gc_parallel_task_latency_histogram_->RecordAbandon();
+}
+
+void ItemParallelJob::Task::SetupInternal(
+ base::Semaphore* on_finish, std::vector<Item*>* items, size_t start_index,
+ base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram) {
+ on_finish_ = on_finish;
+ items_ = items;
+
+ if (start_index < items->size()) {
+ cur_index_ = start_index;
+ } else {
+ items_considered_ = items_->size();
+ }
+
+ gc_parallel_task_latency_histogram_ =
+ std::move(gc_parallel_task_latency_histogram);
+}
+
+void ItemParallelJob::Task::RunInternal() {
+ if (gc_parallel_task_latency_histogram_) {
+ gc_parallel_task_latency_histogram_->RecordDone();
+ gc_parallel_task_latency_histogram_.reset();
+ }
+
+ RunInParallel();
+ on_finish_->Signal();
+}
+
+ItemParallelJob::ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
+ base::Semaphore* pending_tasks)
+ : cancelable_task_manager_(cancelable_task_manager),
+ pending_tasks_(pending_tasks) {}
+
+ItemParallelJob::~ItemParallelJob() {
+ for (size_t i = 0; i < items_.size(); i++) {
+ Item* item = items_[i];
+ CHECK(item->IsFinished());
+ delete item;
+ }
+}
+
+void ItemParallelJob::Run(std::shared_ptr<Counters> async_counters) {
+ DCHECK_GT(tasks_.size(), 0);
+ const size_t num_items = items_.size();
+ const size_t num_tasks = tasks_.size();
+
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ItemParallelJob::Run", TRACE_EVENT_SCOPE_THREAD,
+ "num_tasks", static_cast<int>(num_tasks), "num_items",
+ static_cast<int>(num_items));
+
+ AsyncTimedHistogram gc_parallel_task_latency_histogram(
+ async_counters->gc_parallel_task_latency(), async_counters);
+
+ // Some jobs have more tasks than items (when the items are mere coarse
+ // grain tasks that generate work dynamically for a second phase which all
+ // tasks participate in). Some jobs even have 0 items to preprocess but
+ // still have multiple tasks.
+ // TODO(gab): Figure out a cleaner scheme for this.
+ const size_t num_tasks_processing_items = Min(num_items, tasks_.size());
+
+ // In the event of an uneven workload, distribute an extra item to the first
+ // |items_remainder| tasks.
+ const size_t items_remainder = num_tasks_processing_items > 0
+ ? num_items % num_tasks_processing_items
+ : 0;
+ // Base |items_per_task|, will be bumped by 1 for the first
+ // |items_remainder| tasks.
+ const size_t items_per_task = num_tasks_processing_items > 0
+ ? num_items / num_tasks_processing_items
+ : 0;
+ CancelableTaskManager::Id* task_ids =
+ new CancelableTaskManager::Id[num_tasks];
+ Task* main_task = nullptr;
+ for (size_t i = 0, start_index = 0; i < num_tasks;
+ i++, start_index += items_per_task + (i < items_remainder ? 1 : 0)) {
+ Task* task = tasks_[i];
+
+ // By definition there are less |items_remainder| to distribute then
+ // there are tasks processing items so this cannot overflow while we are
+ // assigning work items.
+ DCHECK_IMPLIES(start_index >= num_items, i >= num_tasks_processing_items);
+
+ task->SetupInternal(pending_tasks_, &items_, start_index,
+ i > 0 ? gc_parallel_task_latency_histogram
+ : base::Optional<AsyncTimedHistogram>());
+ task_ids[i] = task->id();
+ if (i > 0) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ } else {
+ main_task = task;
+ }
+ }
+
+ // Contribute on main thread.
+ main_task->Run();
+ delete main_task;
+
+ // Wait for background tasks.
+ for (size_t i = 0; i < num_tasks; i++) {
+ if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
+ CancelableTaskManager::kTaskAborted) {
+ pending_tasks_->Wait();
+ }
+ }
+ delete[] task_ids;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 23c709f87b..4c21f69ca9 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -2,18 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_ITEM_PARALLEL_JOB_
-#define V8_HEAP_ITEM_PARALLEL_JOB_
+#ifndef V8_HEAP_ITEM_PARALLEL_JOB_H_
+#define V8_HEAP_ITEM_PARALLEL_JOB_H_
+#include <memory>
#include <vector>
-#include "src/base/platform/semaphore.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/cancelable-task.h"
-#include "src/v8.h"
+#include "src/counters.h"
+#include "src/globals.h"
namespace v8 {
+
+namespace base {
+class Semaphore;
+}
+
namespace internal {
+class Counters;
class Isolate;
// This class manages background tasks that process a set of items in parallel.
@@ -25,14 +36,17 @@ class Isolate;
//
// Items need to be marked as finished after processing them. Task and Item
// ownership is transferred to the job.
-class ItemParallelJob {
+//
+// Each parallel (non-main thread) task will report the time between the job
+// being created and it being scheduled to |gc_parallel_task_latency_histogram|.
+class V8_EXPORT_PRIVATE ItemParallelJob {
public:
class Task;
- class Item {
+ class V8_EXPORT_PRIVATE Item {
public:
- Item() : state_(kAvailable) {}
- virtual ~Item() {}
+ Item() = default;
+ virtual ~Item() = default;
// Marks an item as being finished.
void MarkFinished() { CHECK(state_.TrySetValue(kProcessing, kFinished)); }
@@ -45,7 +59,7 @@ class ItemParallelJob {
}
bool IsFinished() { return state_.Value() == kFinished; }
- base::AtomicValue<ProcessingState> state_;
+ base::AtomicValue<ProcessingState> state_{kAvailable};
friend class ItemParallelJob;
friend class ItemParallelJob::Task;
@@ -53,15 +67,10 @@ class ItemParallelJob {
DISALLOW_COPY_AND_ASSIGN(Item);
};
- class Task : public CancelableTask {
+ class V8_EXPORT_PRIVATE Task : public CancelableTask {
public:
- explicit Task(Isolate* isolate)
- : CancelableTask(isolate),
- items_(nullptr),
- cur_index_(0),
- items_considered_(0),
- on_finish_(nullptr) {}
- virtual ~Task() {}
+ explicit Task(Isolate* isolate);
+ virtual ~Task();
virtual void RunInParallel() = 0;
@@ -85,42 +94,36 @@ class ItemParallelJob {
}
private:
- void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
- size_t start_index) {
- on_finish_ = on_finish;
- items_ = items;
- cur_index_ = start_index;
- }
+ friend class ItemParallelJob;
+ friend class Item;
- // We don't allow overriding this method any further.
- void RunInternal() final {
- RunInParallel();
- on_finish_->Signal();
- }
+ // Sets up state required before invoking Run(). If
+ // |start_index is >= items_.size()|, this task will not process work items
+ // (some jobs have more tasks than work items in order to parallelize post-
+ // processing, e.g. scavenging). If |gc_parallel_task_latency_histogram| is
+ // provided, it will be used to report histograms on the latency between
+ // posting the task and it being scheduled.
+ void SetupInternal(
+ base::Semaphore* on_finish, std::vector<Item*>* items,
+ size_t start_index,
+ base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram);
- std::vector<Item*>* items_;
- size_t cur_index_;
- size_t items_considered_;
- base::Semaphore* on_finish_;
+ // We don't allow overriding this method any further.
+ void RunInternal() final;
- friend class ItemParallelJob;
- friend class Item;
+ std::vector<Item*>* items_ = nullptr;
+ size_t cur_index_ = 0;
+ size_t items_considered_ = 0;
+ base::Semaphore* on_finish_ = nullptr;
+ base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
- base::Semaphore* pending_tasks)
- : cancelable_task_manager_(cancelable_task_manager),
- pending_tasks_(pending_tasks) {}
-
- ~ItemParallelJob() {
- for (size_t i = 0; i < items_.size(); i++) {
- Item* item = items_[i];
- CHECK(item->IsFinished());
- delete item;
- }
- }
+ base::Semaphore* pending_tasks);
+
+ ~ItemParallelJob();
// Adds a task to the job. Transfers ownership to the job.
void AddTask(Task* task) { tasks_.push_back(task); }
@@ -131,42 +134,9 @@ class ItemParallelJob {
int NumberOfItems() const { return static_cast<int>(items_.size()); }
int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
- void Run() {
- DCHECK_GE(tasks_.size(), 0);
- const size_t num_tasks = tasks_.size();
- const size_t num_items = items_.size();
- const size_t items_per_task = (num_items + num_tasks - 1) / num_tasks;
- CancelableTaskManager::Id* task_ids =
- new CancelableTaskManager::Id[num_tasks];
- size_t start_index = 0;
- Task* main_task = nullptr;
- Task* task = nullptr;
- for (size_t i = 0; i < num_tasks; i++, start_index += items_per_task) {
- task = tasks_[i];
- if (start_index >= num_items) {
- start_index -= num_items;
- }
- task->SetupInternal(pending_tasks_, &items_, start_index);
- task_ids[i] = task->id();
- if (i > 0) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- } else {
- main_task = task;
- }
- }
- // Contribute on main thread.
- main_task->Run();
- delete main_task;
- // Wait for background tasks.
- for (size_t i = 0; i < num_tasks; i++) {
- if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_tasks_->Wait();
- }
- }
- delete[] task_ids;
- }
+ // Runs this job. Reporting metrics in a thread-safe manner to
+ // |async_counters|.
+ void Run(std::shared_ptr<Counters> async_counters);
private:
std::vector<Item*> items_;
@@ -179,4 +149,4 @@ class ItemParallelJob {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_ITEM_PARALLEL_JOB_
+#endif // V8_HEAP_ITEM_PARALLEL_JOB_H_
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 30a7e55d6b..c6c8c29962 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -16,7 +16,6 @@
#include "src/global-handles.h"
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
-#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/invalidated-slots-inl.h"
@@ -72,7 +71,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
VerifyPointers(start, end);
}
@@ -240,7 +240,8 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
VerifyPointers(start, end);
}
@@ -369,12 +370,14 @@ class RootMarkingVisitorSeedOnly : public RootVisitor {
buffered_objects_.reserve(kBufferSize);
}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
if (!(*p)->IsHeapObject()) return;
AddObject(*p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
AddObject(*p);
@@ -404,14 +407,22 @@ class RootMarkingVisitorSeedOnly : public RootVisitor {
std::vector<Object*> buffered_objects_;
};
-} // namespace
-
-static int NumberOfAvailableCores() {
- return Max(
- 1, static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+int NumberOfAvailableCores() {
+ static int num_cores =
+ static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) +
+ 1;
+ // This number of cores should be greater than zero and never change.
+ DCHECK_GE(num_cores, 1);
+ DCHECK_EQ(
+ num_cores,
+ 1 + static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+ return num_cores;
}
+} // namespace
+
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
DCHECK_GT(pages, 0);
return FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
@@ -855,7 +866,7 @@ void MarkCompactCollector::Prepare() {
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
heap()->incremental_marking()->Stop();
heap()->incremental_marking()->AbortBlackAllocation();
- FinishConcurrentMarking();
+ FinishConcurrentMarking(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
heap()->incremental_marking()->Deactivate();
ClearMarkbits();
AbortWeakCollections();
@@ -891,9 +902,10 @@ void MarkCompactCollector::Prepare() {
#endif
}
-void MarkCompactCollector::FinishConcurrentMarking() {
+void MarkCompactCollector::FinishConcurrentMarking(
+ ConcurrentMarking::StopRequest stop_request) {
if (FLAG_concurrent_marking) {
- heap()->concurrent_marking()->EnsureCompleted();
+ heap()->concurrent_marking()->Stop(stop_request);
heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
}
}
@@ -965,11 +977,12 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
explicit RootMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitRootPointer(Root root, Object** p) final {
+ void VisitRootPointer(Root root, const char* description, Object** p) final {
MarkObjectByPointer(root, p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) final {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) final {
for (Object** p = start; p < end; p++) MarkObjectByPointer(root, p);
}
@@ -1058,7 +1071,8 @@ class ExternalStringTableCleaner : public RootVisitor {
public:
explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
@@ -1093,7 +1107,8 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
: heap_(collector->heap()),
marking_state_(collector->non_atomic_marking_state()) {}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
DCHECK_EQ(static_cast<int>(root),
static_cast<int>(Root::kExternalStringsTable));
// Visit all HeapObject pointers in [start, end).
@@ -1391,7 +1406,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
#ifdef VERIFY_HEAP
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment =
+ HeapObject::RequiredAlignment(object->map());
AllocationResult allocation =
local_allocator_->Allocate(target_space, size, alignment);
if (allocation.To(target_object)) {
@@ -1496,7 +1512,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
HeapObject** target_object) {
- AllocationAlignment alignment = old_object->RequiredAlignment();
+ AllocationAlignment alignment =
+ HeapObject::RequiredAlignment(old_object->map());
AllocationSpace space_allocated_in = NEW_SPACE;
AllocationResult allocation =
local_allocator_->Allocate(NEW_SPACE, size, alignment);
@@ -1758,11 +1775,13 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
: collector_(collector),
marking_state_(collector_->non_atomic_marking_state()) {}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
@@ -1883,6 +1902,8 @@ class BatchedRootMarkingItem : public MarkingItem {
virtual ~BatchedRootMarkingItem() {}
void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "BatchedRootMarkingItem::Process");
for (Object* object : objects_) {
task->MarkObject(object);
}
@@ -1900,6 +1921,8 @@ class PageMarkingItem : public MarkingItem {
virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "PageMarkingItem::Process");
base::LockGuard<base::Mutex> guard(chunk_->mutex());
MarkUntypedPointers(task);
MarkTypedPointers(task);
@@ -1956,6 +1979,8 @@ class GlobalHandlesMarkingItem : public MarkingItem {
virtual ~GlobalHandlesMarkingItem() {}
void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "GlobalHandlesMarkingItem::Process");
GlobalHandlesRootMarkingVisitor visitor(task);
global_handles_
->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
@@ -1968,12 +1993,14 @@ class GlobalHandlesMarkingItem : public MarkingItem {
explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
: task_(task) {}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
DCHECK_EQ(Root::kGlobalHandles, root);
task_->MarkObject(*p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
DCHECK_EQ(Root::kGlobalHandles, root);
for (Object** p = start; p < end; p++) {
task_->MarkObject(*p);
@@ -2061,7 +2088,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel() {
job.AddTask(
new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
}
- job.Run();
+ job.Run(isolate()->async_counters());
DCHECK(worklist()->IsGlobalEmpty());
}
}
@@ -2336,7 +2363,8 @@ void MarkCompactCollector::MarkLiveObjects() {
}
ProcessMarkingWorklist();
- FinishConcurrentMarking();
+ FinishConcurrentMarking(
+ ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
ProcessMarkingWorklist();
}
@@ -2849,11 +2877,13 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
UpdateSlotInternal(p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
}
@@ -3009,6 +3039,7 @@ class Evacuator : public Malloced {
};
void Evacuator::EvacuatePage(Page* page) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
DCHECK(page->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
@@ -3066,11 +3097,15 @@ class FullEvacuator : public Evacuator {
};
void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
+ const EvacuationMode evacuation_mode = ComputeEvacuationMode(page);
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "FullEvacuator::RawEvacuatePage", "evacuation_mode",
+ evacuation_mode);
MarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(page);
HeapObject* failed_object = nullptr;
- switch (ComputeEvacuationMode(page)) {
+ switch (evacuation_mode) {
case kObjectsNewToOld:
LiveObjectVisitor::VisitBlackObjectsNoFail(
page, marking_state, &new_space_visitor_,
@@ -3127,6 +3162,8 @@ class YoungGenerationEvacuator : public Evacuator {
void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
intptr_t* live_bytes) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "YoungGenerationEvacuator::RawEvacuatePage");
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(page);
@@ -3241,7 +3278,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
evacuators[i]->AddObserver(migration_observer);
job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
}
- job->Run();
+ job->Run(isolate()->async_counters());
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i]->Finalize();
delete evacuators[i];
@@ -3249,15 +3286,16 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
delete[] evacuators;
if (FLAG_trace_evacuation) {
- PrintIsolate(isolate(),
- "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
- "wanted_tasks=%d tasks=%d cores=%" PRIuS
- " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
- isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
- wanted_num_tasks, job->NumberOfTasks(),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
- live_bytes, compaction_speed);
+ PrintIsolate(
+ isolate(),
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
+ "wanted_tasks=%d tasks=%d cores=%" PRIuS " live_bytes=%" V8PRIdPTR
+ " compaction_speed=%.f\n",
+ isolate()->time_millis_since_init(),
+ FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
+ wanted_num_tasks, job->NumberOfTasks(),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() + 1,
+ live_bytes, compaction_speed);
}
}
@@ -3365,6 +3403,8 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
Visitor* visitor,
IterationMode iteration_mode,
HeapObject** failed_object) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "LiveObjectVisitor::VisitBlackObjects");
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject* const object = object_and_size.first;
@@ -3389,6 +3429,8 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
MarkingState* marking_state,
Visitor* visitor,
IterationMode iteration_mode) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "LiveObjectVisitor::VisitBlackObjectsNoFail");
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject* const object = object_and_size.first;
@@ -3407,6 +3449,8 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
MarkingState* marking_state,
Visitor* visitor,
IterationMode iteration_mode) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "LiveObjectVisitor::VisitGreyObjectsNoFail");
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject* const object = object_and_size.first;
@@ -3553,6 +3597,8 @@ class ToSpaceUpdatingItem : public UpdatingItem {
private:
void ProcessVisitAll() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ToSpaceUpdatingItem::ProcessVisitAll");
PointersUpdatingVisitor visitor;
for (Address cur = start_; cur < end_;) {
HeapObject* object = HeapObject::FromAddress(cur);
@@ -3564,6 +3610,8 @@ class ToSpaceUpdatingItem : public UpdatingItem {
}
void ProcessVisitLive() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
PointersUpdatingVisitor visitor;
@@ -3592,13 +3640,14 @@ class RememberedSetUpdatingItem : public UpdatingItem {
virtual ~RememberedSetUpdatingItem() {}
void Process() override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "RememberedSetUpdatingItem::Process");
base::LockGuard<base::Mutex> guard(chunk_->mutex());
UpdateUntypedPointers();
UpdateTypedPointers();
}
private:
- template <AccessMode access_mode>
inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (heap_->InFromSpace(*slot)) {
@@ -3606,13 +3655,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
DCHECK(heap_object->IsHeapObject());
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
- if (access_mode == AccessMode::ATOMIC) {
- HeapObject** heap_obj_slot = reinterpret_cast<HeapObject**>(slot);
- base::AsAtomicPointer::Relaxed_Store(heap_obj_slot,
- map_word.ToForwardingAddress());
- } else {
- *slot = map_word.ToForwardingAddress();
- }
+ *slot = map_word.ToForwardingAddress();
}
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
@@ -3648,12 +3691,10 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this](Address slot) {
- return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(slot);
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this](Address slot) { return CheckAndUpdateOldToNewSlot(slot); },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
@@ -3692,7 +3733,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
[isolate, this](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate, slot_type, slot, [this](Object** slot) {
- return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(
+ return CheckAndUpdateOldToNewSlot(
reinterpret_cast<Address>(slot));
});
});
@@ -3748,6 +3789,8 @@ class GlobalHandlesUpdatingItem : public UpdatingItem {
virtual ~GlobalHandlesUpdatingItem() {}
void Process() override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "GlobalHandlesUpdatingItem::Process");
PointersUpdatingVisitor updating_visitor;
global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
}
@@ -3772,6 +3815,9 @@ class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
virtual ~ArrayBufferTrackerUpdatingItem() {}
void Process() override {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ArrayBufferTrackerUpdatingItem::Process", "EvacuationState",
+ state_);
switch (state_) {
case EvacuationState::kRegular:
ArrayBufferTracker::ProcessBuffers(
@@ -3922,7 +3968,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
- updating_job.Run();
+ updating_job.Run(isolate()->async_counters());
}
{
@@ -3954,7 +4000,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
- updating_job.Run();
+ updating_job.Run(isolate()->async_counters());
heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
}
}
@@ -4016,7 +4062,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
- updating_job.Run();
+ updating_job.Run(isolate()->async_counters());
heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 6fda00633c..755f0eb4eb 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -8,6 +8,7 @@
#include <deque>
#include <vector>
+#include "src/heap/concurrent-marking.h"
#include "src/heap/marking.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
@@ -649,7 +650,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// choosing spaces to compact.
void Prepare();
- void FinishConcurrentMarking();
+ // Stop concurrent marking (either by preempting it right away or waiting for
+ // it to complete as requested by |stop_request|).
+ void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
bool StartCompaction();
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index 9b1fe61236..58630c52f0 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MARKING_H
-#define V8_MARKING_H
+#ifndef V8_HEAP_MARKING_H_
+#define V8_HEAP_MARKING_H_
#include "src/base/atomic-utils.h"
#include "src/utils.h"
@@ -316,4 +316,4 @@ class Marking : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_MARKING_H_
+#endif // V8_HEAP_MARKING_H_
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index cc1030846a..77317a7b8a 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -201,6 +201,7 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
+ if (!heap()->use_tasks()) return;
DCHECK_LT(0, delay_ms);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 0f0ad6eaa0..ce6564596e 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_memory_reducer_H
-#define V8_HEAP_memory_reducer_H
+#ifndef V8_HEAP_MEMORY_REDUCER_H_
+#define V8_HEAP_MEMORY_REDUCER_H_
#include "include/v8-platform.h"
#include "src/base/macros.h"
@@ -171,4 +171,4 @@ class V8_EXPORT_PRIVATE MemoryReducer {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_memory_reducer_H
+#endif // V8_HEAP_MEMORY_REDUCER_H_
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index f58a472671..b854dabb2c 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -1,4 +1,5 @@
// Copyright 2015 the V8 project authors. All rights reserved.
+//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -12,6 +13,7 @@
#include "src/counters.h"
#include "src/globals.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/mark-compact.h"
#include "src/isolate.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/utils.h"
@@ -31,7 +33,6 @@ void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
}
- visited_fixed_array_sub_types_.clear();
}
// Tell the compiler to never inline this: occasionally, the optimizer will
@@ -99,23 +100,14 @@ void ObjectStats::PrintJSON(const char* key) {
#define INSTANCE_TYPE_WRAPPER(name) \
PrintInstanceTypeJSON(key, gc_count, #name, name);
-#define CODE_KIND_WRAPPER(name) \
- PrintInstanceTypeJSON(key, gc_count, "*CODE_" #name, \
- FIRST_CODE_KIND_SUB_TYPE + Code::name);
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
- PrintInstanceTypeJSON(key, gc_count, "*FIXED_ARRAY_" #name, \
- FIRST_FIXED_ARRAY_SUB_TYPE + name);
+
#define VIRTUAL_INSTANCE_TYPE_WRAPPER(name) \
PrintInstanceTypeJSON(key, gc_count, #name, FIRST_VIRTUAL_TYPE + name);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER)
- CODE_KIND_LIST(CODE_KIND_WRAPPER)
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER)
VIRTUAL_INSTANCE_TYPE_LIST(VIRTUAL_INSTANCE_TYPE_WRAPPER)
#undef INSTANCE_TYPE_WRAPPER
-#undef CODE_KIND_WRAPPER
-#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
#undef VIRTUAL_INSTANCE_TYPE_WRAPPER
}
@@ -150,25 +142,15 @@ void ObjectStats::Dump(std::stringstream& stream) {
stream << "\"type_data\":{";
#define INSTANCE_TYPE_WRAPPER(name) DumpInstanceTypeData(stream, #name, name);
-#define CODE_KIND_WRAPPER(name) \
- DumpInstanceTypeData(stream, "*CODE_" #name, \
- FIRST_CODE_KIND_SUB_TYPE + Code::name);
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
- DumpInstanceTypeData(stream, "*FIXED_ARRAY_" #name, \
- FIRST_FIXED_ARRAY_SUB_TYPE + name);
#define VIRTUAL_INSTANCE_TYPE_WRAPPER(name) \
DumpInstanceTypeData(stream, #name, FIRST_VIRTUAL_TYPE + name);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
- CODE_KIND_LIST(CODE_KIND_WRAPPER);
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER);
VIRTUAL_INSTANCE_TYPE_LIST(VIRTUAL_INSTANCE_TYPE_WRAPPER)
stream << "\"END\":{}}}";
#undef INSTANCE_TYPE_WRAPPER
-#undef CODE_KIND_WRAPPER
-#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
#undef VIRTUAL_INSTANCE_TYPE_WRAPPER
}
@@ -202,93 +184,88 @@ void ObjectStats::RecordObjectStats(InstanceType type, size_t size) {
}
void ObjectStats::RecordVirtualObjectStats(VirtualInstanceType type,
- size_t size) {
+ size_t size, size_t over_allocated) {
DCHECK_LE(type, LAST_VIRTUAL_TYPE);
object_counts_[FIRST_VIRTUAL_TYPE + type]++;
object_sizes_[FIRST_VIRTUAL_TYPE + type] += size;
size_histogram_[FIRST_VIRTUAL_TYPE + type][HistogramIndexFromSize(size)]++;
-}
-
-void ObjectStats::RecordCodeSubTypeStats(int code_sub_type, size_t size) {
- int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
- DCHECK_GE(code_sub_type_index, FIRST_CODE_KIND_SUB_TYPE);
- DCHECK_LT(code_sub_type_index, FIRST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[code_sub_type_index]++;
- object_sizes_[code_sub_type_index] += size;
- size_histogram_[code_sub_type_index][HistogramIndexFromSize(size)]++;
-}
-
-bool ObjectStats::RecordFixedArraySubTypeStats(FixedArrayBase* array,
- int array_sub_type, size_t size,
- size_t over_allocated) {
- auto it = visited_fixed_array_sub_types_.insert(array);
- if (!it.second) return false;
- DCHECK_LE(array_sub_type, LAST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
- object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
- size_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
- [HistogramIndexFromSize(size)]++;
- if (over_allocated > 0) {
- InstanceType type =
- array->IsHashTable() ? HASH_TABLE_TYPE : FIXED_ARRAY_TYPE;
- over_allocated_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] +=
- over_allocated;
- over_allocated_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
- [HistogramIndexFromSize(over_allocated)]++;
- over_allocated_[type] += over_allocated;
- over_allocated_histogram_[type][HistogramIndexFromSize(over_allocated)]++;
- }
- return true;
+ over_allocated_[FIRST_VIRTUAL_TYPE + type] += over_allocated;
+ over_allocated_histogram_[FIRST_VIRTUAL_TYPE + type]
+ [HistogramIndexFromSize(size)]++;
}
Isolate* ObjectStats::isolate() { return heap()->isolate(); }
class ObjectStatsCollectorImpl {
public:
+ enum Phase {
+ kPhase1,
+ kPhase2,
+ };
+ static const int kNumberOfPhases = kPhase2 + 1;
+
ObjectStatsCollectorImpl(Heap* heap, ObjectStats* stats);
void CollectGlobalStatistics();
+ void CollectStatistics(HeapObject* obj, Phase phase);
- // Collects statistics of objects for virtual instance types.
- void CollectVirtualStatistics(HeapObject* obj);
+ private:
+ enum CowMode {
+ kCheckCow,
+ kIgnoreCow,
+ };
- // Collects statistics of objects for regular instance types.
- void CollectStatistics(HeapObject* obj);
+ Isolate* isolate() { return heap_->isolate(); }
- private:
- class CompilationCacheTableVisitor;
+ bool RecordVirtualObjectStats(HeapObject* parent, HeapObject* obj,
+ ObjectStats::VirtualInstanceType type,
+ size_t size, size_t over_allocated,
+ CowMode check_cow_array = kCheckCow);
+ // Gets size from |ob| and assumes no over allocating.
+ bool RecordSimpleVirtualObjectStats(HeapObject* parent, HeapObject* obj,
+ ObjectStats::VirtualInstanceType type);
+ // For HashTable it is possible to compute over allocated memory.
+ void RecordHashTableVirtualObjectStats(HeapObject* parent,
+ FixedArray* hash_table,
+ ObjectStats::VirtualInstanceType type);
- void RecordObjectStats(HeapObject* obj, InstanceType type, size_t size);
- void RecordBytecodeArrayDetails(BytecodeArray* obj);
- void RecordCodeDetails(Code* code);
- void RecordFixedArrayDetails(FixedArray* array);
- void RecordJSCollectionDetails(JSObject* obj);
- void RecordJSObjectDetails(JSObject* object);
- void RecordJSWeakCollectionDetails(JSWeakCollection* obj);
- void RecordMapDetails(Map* map);
- void RecordScriptDetails(Script* obj);
- void RecordTemplateInfoDetails(TemplateInfo* obj);
- void RecordSharedFunctionInfoDetails(SharedFunctionInfo* sfi);
-
- bool RecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
- int subtype, size_t overhead);
- void RecursivelyRecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
- int subtype);
- template <class HashTable>
- void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype);
bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
+ bool CanRecordFixedArray(FixedArrayBase* array);
+ bool IsCowArray(FixedArrayBase* array);
- void RecordVirtualObjectStats(HeapObject* obj,
- ObjectStats::VirtualInstanceType type,
- size_t size);
+ // Blacklist for objects that should not be recorded using
+ // VirtualObjectStats and RecordSimpleVirtualObjectStats. For recording those
+ // objects dispatch to the low level ObjectStats::RecordObjectStats manually.
+ bool ShouldRecordObject(HeapObject* object, CowMode check_cow_array);
+
+ void RecordObjectStats(HeapObject* obj, InstanceType type, size_t size);
+
+ // Specific recursion into constant pool or embedded code objects. Records
+ // FixedArrays and Tuple2 that look like ConstantElementsPair.
+ void RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ HeapObject* parent, HeapObject* object,
+ ObjectStats::VirtualInstanceType type);
+
+ // Details.
void RecordVirtualAllocationSiteDetails(AllocationSite* site);
+ void RecordVirtualBytecodeArrayDetails(BytecodeArray* bytecode);
+ void RecordVirtualCodeDetails(Code* code);
+ void RecordVirtualContext(Context* context);
+ void RecordVirtualFeedbackVectorDetails(FeedbackVector* vector);
+ void RecordVirtualFixedArrayDetails(FixedArray* array);
+ void RecordVirtualFunctionTemplateInfoDetails(FunctionTemplateInfo* fti);
+ void RecordVirtualJSGlobalObjectDetails(JSGlobalObject* object);
+ void RecordVirtualJSCollectionDetails(JSObject* object);
+ void RecordVirtualJSObjectDetails(JSObject* object);
+ void RecordVirtualMapDetails(Map* map);
+ void RecordVirtualScriptDetails(Script* script);
+ void RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo* info);
+ void RecordVirtualJSFunctionDetails(JSFunction* function);
Heap* heap_;
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
std::unordered_set<HeapObject*> virtual_objects_;
-
- friend class ObjectStatsCollectorImpl::CompilationCacheTableVisitor;
};
ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
@@ -298,18 +275,45 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()) {}
-// For entries which shared the same instance type (historically FixedArrays)
-// we do a pre-pass and create virtual instance types.
-void ObjectStatsCollectorImpl::CollectVirtualStatistics(HeapObject* obj) {
- if (obj->IsAllocationSite()) {
- RecordVirtualAllocationSiteDetails(AllocationSite::cast(obj));
+bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject* obj,
+ CowMode check_cow_array) {
+ if (obj->IsFixedArray()) {
+ FixedArray* fixed_array = FixedArray::cast(obj);
+ bool cow_check = check_cow_array == kIgnoreCow || !IsCowArray(fixed_array);
+ return CanRecordFixedArray(fixed_array) && cow_check;
}
+ if (obj == heap_->empty_property_array()) return false;
+ return true;
+}
+
+void ObjectStatsCollectorImpl::RecordHashTableVirtualObjectStats(
+ HeapObject* parent, FixedArray* hash_table,
+ ObjectStats::VirtualInstanceType type) {
+ CHECK(hash_table->IsHashTable());
+ // TODO(mlippautz): Implement over allocation for hash tables.
+ RecordVirtualObjectStats(parent, hash_table, type, hash_table->Size(),
+ ObjectStats::kNoOverAllocation);
}
-void ObjectStatsCollectorImpl::RecordVirtualObjectStats(
- HeapObject* obj, ObjectStats::VirtualInstanceType type, size_t size) {
- virtual_objects_.insert(obj);
- stats_->RecordVirtualObjectStats(type, size);
+bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
+ HeapObject* parent, HeapObject* obj,
+ ObjectStats::VirtualInstanceType type) {
+ return RecordVirtualObjectStats(parent, obj, type, obj->Size(),
+ ObjectStats::kNoOverAllocation, kCheckCow);
+}
+
+bool ObjectStatsCollectorImpl::RecordVirtualObjectStats(
+ HeapObject* parent, HeapObject* obj, ObjectStats::VirtualInstanceType type,
+ size_t size, size_t over_allocated, CowMode check_cow_array) {
+ if (!SameLiveness(parent, obj) || !ShouldRecordObject(obj, check_cow_array))
+ return false;
+
+ if (virtual_objects_.find(obj) == virtual_objects_.end()) {
+ virtual_objects_.insert(obj);
+ stats_->RecordVirtualObjectStats(type, size, over_allocated);
+ return true;
+ }
+ return false;
}
void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
@@ -317,141 +321,290 @@ void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
if (!site->PointsToLiteral()) return;
JSObject* boilerplate = site->boilerplate();
if (boilerplate->IsJSArray()) {
- RecordVirtualObjectStats(boilerplate,
- ObjectStats::JS_ARRAY_BOILERPLATE_TYPE,
- boilerplate->Size());
+ RecordSimpleVirtualObjectStats(site, boilerplate,
+ ObjectStats::JS_ARRAY_BOILERPLATE_TYPE);
// Array boilerplates cannot have properties.
} else {
- RecordVirtualObjectStats(boilerplate,
- ObjectStats::JS_OBJECT_BOILERPLATE_TYPE,
- boilerplate->Size());
+ RecordVirtualObjectStats(
+ site, boilerplate, ObjectStats::JS_OBJECT_BOILERPLATE_TYPE,
+ boilerplate->Size(), ObjectStats::kNoOverAllocation);
if (boilerplate->HasFastProperties()) {
- // We'll misclassify the empty_proeprty_array here. Given that there is a
- // single instance, this is neglible.
+ // We'll mis-classify the empty_property_array here. Given that there is a
+ // single instance, this is negligible.
PropertyArray* properties = boilerplate->property_array();
- RecordVirtualObjectStats(properties,
- ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE,
- properties->Size());
+ RecordSimpleVirtualObjectStats(
+ site, properties, ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE);
} else {
NameDictionary* properties = boilerplate->property_dictionary();
- RecordVirtualObjectStats(properties,
- ObjectStats::BOILERPLATE_NAME_DICTIONARY_TYPE,
- properties->Size());
+ RecordSimpleVirtualObjectStats(
+ site, properties, ObjectStats::BOILERPLATE_PROPERTY_DICTIONARY_TYPE);
}
}
FixedArrayBase* elements = boilerplate->elements();
- // We skip COW elements since they are shared, and we are sure that if the
- // boilerplate exists there must have been at least one instantiation.
- if (!elements->IsCowArray()) {
- RecordVirtualObjectStats(elements, ObjectStats::BOILERPLATE_ELEMENTS_TYPE,
- elements->Size());
- }
+ RecordSimpleVirtualObjectStats(site, elements,
+ ObjectStats::BOILERPLATE_ELEMENTS_TYPE);
}
-void ObjectStatsCollectorImpl::CollectStatistics(HeapObject* obj) {
- Map* map = obj->map();
-
- // Record for the InstanceType.
- int object_size = obj->Size();
- RecordObjectStats(obj, map->instance_type(), object_size);
-
- // Record specific sub types where possible.
- if (obj->IsMap()) RecordMapDetails(Map::cast(obj));
- if (obj->IsObjectTemplateInfo() || obj->IsFunctionTemplateInfo()) {
- RecordTemplateInfoDetails(TemplateInfo::cast(obj));
+void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
+ FunctionTemplateInfo* fti) {
+ // named_property_handler and indexed_property_handler are recorded as
+ // INTERCEPTOR_INFO_TYPE.
+ if (!fti->call_code()->IsUndefined(isolate())) {
+ RecordSimpleVirtualObjectStats(
+ fti, CallHandlerInfo::cast(fti->call_code()),
+ ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
- if (obj->IsBytecodeArray()) {
- RecordBytecodeArrayDetails(BytecodeArray::cast(obj));
+ if (!fti->instance_call_handler()->IsUndefined(isolate())) {
+ RecordSimpleVirtualObjectStats(
+ fti, CallHandlerInfo::cast(fti->instance_call_handler()),
+ ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
- if (obj->IsCode()) RecordCodeDetails(Code::cast(obj));
- if (obj->IsSharedFunctionInfo()) {
- RecordSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualJSGlobalObjectDetails(
+ JSGlobalObject* object) {
+ // Properties.
+ GlobalDictionary* properties = object->global_dictionary();
+ RecordHashTableVirtualObjectStats(object, properties,
+ ObjectStats::GLOBAL_PROPERTIES_TYPE);
+ // Elements.
+ FixedArrayBase* elements = object->elements();
+ RecordSimpleVirtualObjectStats(object, elements,
+ ObjectStats::GLOBAL_ELEMENTS_TYPE);
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualJSCollectionDetails(
+ JSObject* object) {
+ if (object->IsJSMap()) {
+ RecordSimpleVirtualObjectStats(
+ object, FixedArray::cast(JSMap::cast(object)->table()),
+ ObjectStats::JS_COLLETION_TABLE_TYPE);
}
- if (obj->IsFixedArray()) RecordFixedArrayDetails(FixedArray::cast(obj));
- if (obj->IsJSObject()) RecordJSObjectDetails(JSObject::cast(obj));
- if (obj->IsJSWeakCollection()) {
- RecordJSWeakCollectionDetails(JSWeakCollection::cast(obj));
+ if (object->IsJSSet()) {
+ RecordSimpleVirtualObjectStats(
+ object, FixedArray::cast(JSSet::cast(object)->table()),
+ ObjectStats::JS_COLLETION_TABLE_TYPE);
}
- if (obj->IsJSCollection()) {
- RecordJSCollectionDetails(JSObject::cast(obj));
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject* object) {
+ // JSGlobalObject is recorded separately.
+ if (object->IsJSGlobalObject()) return;
+
+ // Properties.
+ if (object->HasFastProperties()) {
+ PropertyArray* properties = object->property_array();
+ CHECK_EQ(PROPERTY_ARRAY_TYPE, properties->map()->instance_type());
+ } else {
+ NameDictionary* properties = object->property_dictionary();
+ RecordHashTableVirtualObjectStats(
+ object, properties, ObjectStats::OBJECT_PROPERTY_DICTIONARY_TYPE);
}
- if (obj->IsScript()) RecordScriptDetails(Script::cast(obj));
+ // Elements.
+ FixedArrayBase* elements = object->elements();
+ RecordSimpleVirtualObjectStats(object, elements, ObjectStats::ELEMENTS_TYPE);
}
-class ObjectStatsCollectorImpl::CompilationCacheTableVisitor
- : public RootVisitor {
- public:
- explicit CompilationCacheTableVisitor(ObjectStatsCollectorImpl* parent)
- : parent_(parent) {}
-
- void VisitRootPointers(Root root, Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- HeapObject* obj = HeapObject::cast(*current);
- if (obj->IsUndefined(parent_->heap_->isolate())) continue;
- CHECK(obj->IsCompilationCacheTable());
- parent_->RecordHashTableHelper(nullptr, CompilationCacheTable::cast(obj),
- COMPILATION_CACHE_TABLE_SUB_TYPE);
+static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
+ Object* obj, FeedbackSlotKind kind, Isolate* isolate) {
+ switch (kind) {
+ case FeedbackSlotKind::kCall:
+ if (obj == *isolate->factory()->uninitialized_symbol() ||
+ obj == *isolate->factory()->premonomorphic_symbol()) {
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE;
+ }
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_TYPE;
+
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadKeyed:
+ if (obj == *isolate->factory()->uninitialized_symbol() ||
+ obj == *isolate->factory()->premonomorphic_symbol()) {
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE;
+ }
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_TYPE;
+
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ if (obj == *isolate->factory()->uninitialized_symbol() ||
+ obj == *isolate->factory()->premonomorphic_symbol()) {
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE;
+ }
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_TYPE;
+
+ case FeedbackSlotKind::kBinaryOp:
+ case FeedbackSlotKind::kCompareOp:
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_ENUM_TYPE;
+
+ default:
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_OTHER_TYPE;
+ }
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
+ FeedbackVector* vector) {
+ if (virtual_objects_.find(vector) == virtual_objects_.end()) {
+ // Manually insert the feedback vector into the virtual object list, since
+ // we're logging its component parts separately.
+ virtual_objects_.insert(vector);
+
+ size_t calculated_size = 0;
+
+ // Log the feedback vector's header (fixed fields).
+ size_t header_size =
+ reinterpret_cast<Address>(vector->slots_start()) - vector->address();
+ stats_->RecordVirtualObjectStats(ObjectStats::FEEDBACK_VECTOR_HEADER_TYPE,
+ header_size,
+ ObjectStats::kNoOverAllocation);
+ calculated_size += header_size;
+
+ // Iterate over the feedback slots and log each one.
+ FeedbackMetadataIterator it(vector->metadata());
+ while (it.HasNext()) {
+ FeedbackSlot slot = it.Next();
+ // Log the entry (or entries) taken up by this slot.
+ size_t slot_size = it.entry_size() * kPointerSize;
+ stats_->RecordVirtualObjectStats(
+ GetFeedbackSlotType(vector->Get(slot), it.kind(), heap_->isolate()),
+ slot_size, ObjectStats::kNoOverAllocation);
+ calculated_size += slot_size;
+
+ // Log the monomorphic/polymorphic helper objects that this slot owns.
+ for (int i = 0; i < it.entry_size(); i++) {
+ Object* raw_object = vector->get(slot.ToInt() + i);
+ if (!raw_object->IsHeapObject()) continue;
+ HeapObject* object = HeapObject::cast(raw_object);
+ if (object->IsCell() || object->IsFixedArray()) {
+ RecordSimpleVirtualObjectStats(
+ vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
+ }
+ }
}
+
+ CHECK_EQ(calculated_size, vector->Size());
}
+}
- private:
- ObjectStatsCollectorImpl* parent_;
-};
+void ObjectStatsCollectorImpl::RecordVirtualFixedArrayDetails(
+ FixedArray* array) {
+ if (IsCowArray(array)) {
+ RecordVirtualObjectStats(nullptr, array, ObjectStats::COW_ARRAY_TYPE,
+ array->Size(), ObjectStats::kNoOverAllocation,
+ kIgnoreCow);
+ }
+}
+
+void ObjectStatsCollectorImpl::CollectStatistics(HeapObject* obj, Phase phase) {
+ Map* map = obj->map();
+ switch (phase) {
+ case kPhase1:
+ if (obj->IsFeedbackVector()) {
+ RecordVirtualFeedbackVectorDetails(FeedbackVector::cast(obj));
+ } else if (obj->IsMap()) {
+ RecordVirtualMapDetails(Map::cast(obj));
+ } else if (obj->IsBytecodeArray()) {
+ RecordVirtualBytecodeArrayDetails(BytecodeArray::cast(obj));
+ } else if (obj->IsCode()) {
+ RecordVirtualCodeDetails(Code::cast(obj));
+ } else if (obj->IsFunctionTemplateInfo()) {
+ RecordVirtualFunctionTemplateInfoDetails(
+ FunctionTemplateInfo::cast(obj));
+ } else if (obj->IsJSFunction()) {
+ RecordVirtualJSFunctionDetails(JSFunction::cast(obj));
+ } else if (obj->IsJSGlobalObject()) {
+ RecordVirtualJSGlobalObjectDetails(JSGlobalObject::cast(obj));
+ } else if (obj->IsJSObject()) {
+ // This phase needs to come after RecordVirtualAllocationSiteDetails
+ // to properly split among boilerplates.
+ RecordVirtualJSObjectDetails(JSObject::cast(obj));
+ } else if (obj->IsJSCollection()) {
+ RecordVirtualJSCollectionDetails(JSObject::cast(obj));
+ } else if (obj->IsSharedFunctionInfo()) {
+ RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
+ } else if (obj->IsContext()) {
+ RecordVirtualContext(Context::cast(obj));
+ } else if (obj->IsScript()) {
+ RecordVirtualScriptDetails(Script::cast(obj));
+ } else if (obj->IsFixedArray()) {
+ // Has to go last as it triggers too eagerly.
+ RecordVirtualFixedArrayDetails(FixedArray::cast(obj));
+ }
+ break;
+ case kPhase2:
+ RecordObjectStats(obj, map->instance_type(), obj->Size());
+ break;
+ }
+}
void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
- // Global FixedArrays.
- RecordFixedArrayHelper(nullptr, heap_->weak_new_space_object_to_code_list(),
- WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->serialized_objects(),
- SERIALIZED_OBJECTS_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->number_string_cache(),
- NUMBER_STRING_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->single_character_string_cache(),
- SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->string_split_cache(),
- STRING_SPLIT_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->regexp_multiple_cache(),
- REGEXP_MULTIPLE_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->retained_maps(),
- RETAINED_MAPS_SUB_TYPE, 0);
-
- // Global weak FixedArrays.
- RecordFixedArrayHelper(
+ // Iterate boilerplates first to disambiguate them from regular JS objects.
+ Object* list = heap_->allocation_sites_list();
+ while (list->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(list);
+ RecordVirtualAllocationSiteDetails(site);
+ list = site->weak_next();
+ }
+
+ // FixedArray.
+ RecordSimpleVirtualObjectStats(
+ nullptr, heap_->weak_new_space_object_to_code_list(),
+ ObjectStats::WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->serialized_objects(),
+ ObjectStats::SERIALIZED_OBJECTS_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->number_string_cache(),
+ ObjectStats::NUMBER_STRING_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(
+ nullptr, heap_->single_character_string_cache(),
+ ObjectStats::SINGLE_CHARACTER_STRING_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->string_split_cache(),
+ ObjectStats::STRING_SPLIT_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->regexp_multiple_cache(),
+ ObjectStats::REGEXP_MULTIPLE_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->retained_maps(),
+ ObjectStats::RETAINED_MAPS_TYPE);
+
+ // WeakFixedArray.
+ RecordSimpleVirtualObjectStats(
nullptr, WeakFixedArray::cast(heap_->noscript_shared_function_infos()),
- NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, WeakFixedArray::cast(heap_->script_list()),
- SCRIPT_LIST_SUB_TYPE, 0);
-
- // Global hash tables.
- RecordHashTableHelper(nullptr, heap_->string_table(), STRING_TABLE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->weak_object_to_code_table(),
- OBJECT_TO_CODE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->code_stubs(),
- CODE_STUBS_TABLE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->empty_property_dictionary(),
- EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE);
- CompilationCache* compilation_cache = heap_->isolate()->compilation_cache();
- CompilationCacheTableVisitor v(this);
- compilation_cache->Iterate(&v);
+ ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr,
+ WeakFixedArray::cast(heap_->script_list()),
+ ObjectStats::SCRIPT_LIST_TYPE);
+
+ // HashTable.
+ RecordHashTableVirtualObjectStats(nullptr, heap_->string_table(),
+ ObjectStats::STRING_TABLE_TYPE);
+ RecordHashTableVirtualObjectStats(nullptr, heap_->code_stubs(),
+ ObjectStats::CODE_STUBS_TABLE_TYPE);
+
+ // WeakHashTable.
+ RecordHashTableVirtualObjectStats(nullptr, heap_->weak_object_to_code_table(),
+ ObjectStats::OBJECT_TO_CODE_TYPE);
}
void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
InstanceType type,
size_t size) {
- if (virtual_objects_.find(obj) == virtual_objects_.end())
+ if (virtual_objects_.find(obj) == virtual_objects_.end()) {
stats_->RecordObjectStats(type, size);
+ }
}
-static bool CanRecordFixedArray(Heap* heap, FixedArrayBase* array) {
- return array->map()->instance_type() == FIXED_ARRAY_TYPE &&
- array != heap->empty_fixed_array() &&
- array != heap->empty_sloppy_arguments_elements() &&
- array != heap->empty_slow_element_dictionary() &&
- array != heap->empty_property_dictionary();
+bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase* array) {
+ return array != heap_->empty_fixed_array() &&
+ array != heap_->empty_sloppy_arguments_elements() &&
+ array != heap_->empty_slow_element_dictionary() &&
+ array != heap_->empty_property_dictionary();
}
-static bool IsCowArray(Heap* heap, FixedArrayBase* array) {
- return array->map() == heap->fixed_cow_array_map();
+bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase* array) {
+ return array->map() == heap_->fixed_cow_array_map();
}
bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
@@ -460,256 +613,226 @@ bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
marking_state_->Color(obj1) == marking_state_->Color(obj2);
}
-bool ObjectStatsCollectorImpl::RecordFixedArrayHelper(HeapObject* parent,
- FixedArray* array,
- int subtype,
- size_t overhead) {
- if (SameLiveness(parent, array) && CanRecordFixedArray(heap_, array) &&
- !IsCowArray(heap_, array)) {
- return stats_->RecordFixedArraySubTypeStats(array, subtype, array->Size(),
- overhead);
+void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
+ // TODO(mlippautz): map->dependent_code(): DEPENDENT_CODE_TYPE.
+
+ DescriptorArray* array = map->instance_descriptors();
+ if (map->owns_descriptors() && array != heap_->empty_descriptor_array()) {
+ // DescriptorArray has its own instance type.
+ EnumCache* enum_cache = array->GetEnumCache();
+ RecordSimpleVirtualObjectStats(array, enum_cache->keys(),
+ ObjectStats::ENUM_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(array, enum_cache->indices(),
+ ObjectStats::ENUM_INDICES_CACHE_TYPE);
}
- return false;
-}
-void ObjectStatsCollectorImpl::RecursivelyRecordFixedArrayHelper(
- HeapObject* parent, FixedArray* array, int subtype) {
- if (RecordFixedArrayHelper(parent, array, subtype, 0)) {
- for (int i = 0; i < array->length(); i++) {
- if (array->get(i)->IsFixedArray()) {
- RecursivelyRecordFixedArrayHelper(
- parent, FixedArray::cast(array->get(i)), subtype);
+ if (map->is_prototype_map()) {
+ if (map->prototype_info()->IsPrototypeInfo()) {
+ PrototypeInfo* info = PrototypeInfo::cast(map->prototype_info());
+ Object* users = info->prototype_users();
+ if (users->IsWeakFixedArray()) {
+ RecordSimpleVirtualObjectStats(map, WeakFixedArray::cast(users),
+ ObjectStats::PROTOTYPE_USERS_TYPE);
}
}
}
}
-template <class HashTable>
-void ObjectStatsCollectorImpl::RecordHashTableHelper(HeapObject* parent,
- HashTable* array,
- int subtype) {
- int used = array->NumberOfElements() * HashTable::kEntrySize * kPointerSize;
- CHECK_GE(array->Size(), used);
- size_t overhead = array->Size() - used -
- HashTable::kElementsStartIndex * kPointerSize -
- FixedArray::kHeaderSize;
- RecordFixedArrayHelper(parent, array, subtype, overhead);
-}
-
-void ObjectStatsCollectorImpl::RecordJSObjectDetails(JSObject* object) {
- size_t overhead = 0;
- FixedArrayBase* elements = object->elements();
- if (CanRecordFixedArray(heap_, elements) && !IsCowArray(heap_, elements)) {
- if (elements->IsDictionary() && SameLiveness(object, elements)) {
- NumberDictionary* dict = NumberDictionary::cast(elements);
- RecordHashTableHelper(object, dict, DICTIONARY_ELEMENTS_SUB_TYPE);
- } else {
- if (IsHoleyElementsKind(object->GetElementsKind())) {
- int used = object->GetFastElementsUsage() * kPointerSize;
- if (object->GetElementsKind() == HOLEY_DOUBLE_ELEMENTS) used *= 2;
- CHECK_GE(elements->Size(), used);
- overhead = elements->Size() - used - FixedArray::kHeaderSize;
- }
- stats_->RecordFixedArraySubTypeStats(elements, PACKED_ELEMENTS_SUB_TYPE,
- elements->Size(), overhead);
+void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
+ FixedArray* infos = script->shared_function_infos();
+ RecordSimpleVirtualObjectStats(
+ script, script->shared_function_infos(),
+ ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
+ // Split off weak cells from the regular weak cell type.
+ for (int i = 0; i < infos->length(); i++) {
+ if (infos->get(i)->IsWeakCell()) {
+ RecordSimpleVirtualObjectStats(
+ infos, WeakCell::cast(infos->get(i)),
+ ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
}
}
- if (object->IsJSGlobalObject()) {
- GlobalDictionary* properties =
- JSGlobalObject::cast(object)->global_dictionary();
- if (CanRecordFixedArray(heap_, properties) &&
- SameLiveness(object, properties)) {
- RecordHashTableHelper(object, properties, DICTIONARY_PROPERTIES_SUB_TYPE);
- }
- } else if (!object->HasFastProperties()) {
- NameDictionary* properties = object->property_dictionary();
- if (CanRecordFixedArray(heap_, properties) &&
- SameLiveness(object, properties)) {
- RecordHashTableHelper(object, properties, DICTIONARY_PROPERTIES_SUB_TYPE);
- }
+ // Log the size of external source code.
+ Object* source = script->source();
+ if (source->IsExternalString()) {
+ // The contents of external strings aren't on the heap, so we have to record
+ // them manually.
+ ExternalString* external_source_string = ExternalString::cast(source);
+ size_t length_multiplier = external_source_string->IsTwoByteRepresentation()
+ ? kShortSize
+ : kCharSize;
+ size_t off_heap_size = external_source_string->length() * length_multiplier;
+ size_t on_heap_size = external_source_string->Size();
+ RecordVirtualObjectStats(script, external_source_string,
+ ObjectStats::SCRIPT_SOURCE_EXTERNAL_TYPE,
+ on_heap_size + off_heap_size,
+ ObjectStats::kNoOverAllocation);
+ } else if (source->IsHeapObject()) {
+ RecordSimpleVirtualObjectStats(
+ script, HeapObject::cast(source),
+ ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_TYPE);
}
}
-void ObjectStatsCollectorImpl::RecordJSWeakCollectionDetails(
- JSWeakCollection* obj) {
- if (obj->table()->IsHashTable()) {
- ObjectHashTable* table = ObjectHashTable::cast(obj->table());
- int used = table->NumberOfElements() * ObjectHashTable::kEntrySize;
- size_t overhead = table->Size() - used;
- RecordFixedArrayHelper(obj, table, JS_WEAK_COLLECTION_SUB_TYPE, overhead);
+void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
+ SharedFunctionInfo* info) {
+ // Uncompiled SharedFunctionInfo gets its own category.
+ if (!info->is_compiled()) {
+ RecordSimpleVirtualObjectStats(
+ nullptr, info, ObjectStats::UNCOMPILED_SHARED_FUNCTION_INFO_TYPE);
}
+ // SharedFunctonInfo::feedback_metadata() is a COW array.
+ FeedbackMetadata* fm = FeedbackMetadata::cast(info->feedback_metadata());
+ RecordVirtualObjectStats(info, fm, ObjectStats::FEEDBACK_METADATA_TYPE,
+ fm->Size(), ObjectStats::kNoOverAllocation,
+ kIgnoreCow);
}
-void ObjectStatsCollectorImpl::RecordJSCollectionDetails(JSObject* obj) {
- // The JS versions use a different HashTable implementation that cannot use
- // the regular helper. Since overall impact is usually small just record
- // without overhead.
- if (obj->IsJSMap()) {
- RecordFixedArrayHelper(nullptr, FixedArray::cast(JSMap::cast(obj)->table()),
- JS_COLLECTION_SUB_TYPE, 0);
- }
- if (obj->IsJSSet()) {
- RecordFixedArrayHelper(nullptr, FixedArray::cast(JSSet::cast(obj)->table()),
- JS_COLLECTION_SUB_TYPE, 0);
+void ObjectStatsCollectorImpl::RecordVirtualJSFunctionDetails(
+ JSFunction* function) {
+ // Uncompiled JSFunctions get their own category.
+ if (!function->is_compiled()) {
+ RecordSimpleVirtualObjectStats(nullptr, function,
+ ObjectStats::UNCOMPILED_JS_FUNCTION_TYPE);
}
}
-void ObjectStatsCollectorImpl::RecordScriptDetails(Script* obj) {
- FixedArray* infos = FixedArray::cast(obj->shared_function_infos());
- RecordFixedArrayHelper(obj, infos, SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
-}
+namespace {
-void ObjectStatsCollectorImpl::RecordMapDetails(Map* map_obj) {
- DescriptorArray* array = map_obj->instance_descriptors();
- if (map_obj->owns_descriptors() && array != heap_->empty_descriptor_array() &&
- SameLiveness(map_obj, array)) {
- RecordFixedArrayHelper(map_obj, array, DESCRIPTOR_ARRAY_SUB_TYPE, 0);
- EnumCache* enum_cache = array->GetEnumCache();
- RecordFixedArrayHelper(array, enum_cache->keys(), ENUM_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(array, enum_cache->indices(),
- ENUM_INDICES_CACHE_SUB_TYPE, 0);
- }
+bool MatchesConstantElementsPair(Object* object) {
+ if (!object->IsTuple2()) return false;
+ Tuple2* tuple = Tuple2::cast(object);
+ return tuple->value1()->IsSmi() && tuple->value2()->IsFixedArray();
+}
- for (DependentCode* cur_dependent_code = map_obj->dependent_code();
- cur_dependent_code != heap_->empty_fixed_array();
- cur_dependent_code = DependentCode::cast(
- cur_dependent_code->get(DependentCode::kNextLinkIndex))) {
- RecordFixedArrayHelper(map_obj, cur_dependent_code, DEPENDENT_CODE_SUB_TYPE,
- 0);
- }
+} // namespace
- if (map_obj->is_prototype_map()) {
- if (map_obj->prototype_info()->IsPrototypeInfo()) {
- PrototypeInfo* info = PrototypeInfo::cast(map_obj->prototype_info());
- Object* users = info->prototype_users();
- if (users->IsWeakFixedArray()) {
- RecordFixedArrayHelper(map_obj, WeakFixedArray::cast(users),
- PROTOTYPE_USERS_SUB_TYPE, 0);
+void ObjectStatsCollectorImpl::
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ HeapObject* parent, HeapObject* object,
+ ObjectStats::VirtualInstanceType type) {
+ if (RecordSimpleVirtualObjectStats(parent, object, type)) {
+ if (object->IsFixedArray()) {
+ FixedArray* array = FixedArray::cast(object);
+ for (int i = 0; i < array->length(); i++) {
+ Object* entry = array->get(i);
+ if (!entry->IsHeapObject()) continue;
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ array, HeapObject::cast(entry), type);
}
+ } else if (MatchesConstantElementsPair(object)) {
+ Tuple2* tuple = Tuple2::cast(object);
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ tuple, HeapObject::cast(tuple->value2()), type);
}
}
}
-void ObjectStatsCollectorImpl::RecordTemplateInfoDetails(TemplateInfo* obj) {
- if (obj->property_accessors()->IsFixedArray()) {
- RecordFixedArrayHelper(obj, FixedArray::cast(obj->property_accessors()),
- TEMPLATE_INFO_SUB_TYPE, 0);
- }
- if (obj->property_list()->IsFixedArray()) {
- RecordFixedArrayHelper(obj, FixedArray::cast(obj->property_list()),
- TEMPLATE_INFO_SUB_TYPE, 0);
+void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
+ BytecodeArray* bytecode) {
+ RecordSimpleVirtualObjectStats(
+ bytecode, bytecode->constant_pool(),
+ ObjectStats::BYTECODE_ARRAY_CONSTANT_POOL_TYPE);
+ // FixedArrays on constant pool are used for holding descriptor information.
+ // They are shared with optimized code.
+ FixedArray* constant_pool = FixedArray::cast(bytecode->constant_pool());
+ for (int i = 0; i < constant_pool->length(); i++) {
+ Object* entry = constant_pool->get(i);
+ if (entry->IsFixedArray() || MatchesConstantElementsPair(entry)) {
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ constant_pool, HeapObject::cast(entry),
+ ObjectStats::EMBEDDED_OBJECT_TYPE);
+ }
}
+ RecordSimpleVirtualObjectStats(
+ bytecode, bytecode->handler_table(),
+ ObjectStats::BYTECODE_ARRAY_HANDLER_TABLE_TYPE);
}
-void ObjectStatsCollectorImpl::RecordBytecodeArrayDetails(BytecodeArray* obj) {
- RecordFixedArrayHelper(obj, obj->constant_pool(),
- BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE, 0);
- RecordFixedArrayHelper(obj, obj->handler_table(),
- BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE, 0);
+namespace {
+
+ObjectStats::VirtualInstanceType CodeKindToVirtualInstanceType(
+ Code::Kind kind) {
+ switch (kind) {
+#define CODE_KIND_CASE(type) \
+ case Code::type: \
+ return ObjectStats::type;
+ CODE_KIND_LIST(CODE_KIND_CASE)
+#undef CODE_KIND_CASE
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
}
-void ObjectStatsCollectorImpl::RecordCodeDetails(Code* code) {
- stats_->RecordCodeSubTypeStats(code->kind(), code->Size());
- RecordFixedArrayHelper(code, code->deoptimization_data(),
- DEOPTIMIZATION_DATA_SUB_TYPE, 0);
+} // namespace
+
+void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
+ RecordSimpleVirtualObjectStats(nullptr, code,
+ CodeKindToVirtualInstanceType(code->kind()));
+ RecordSimpleVirtualObjectStats(code, code->deoptimization_data(),
+ ObjectStats::DEOPTIMIZATION_DATA_TYPE);
if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
DeoptimizationData* input_data =
DeoptimizationData::cast(code->deoptimization_data());
if (input_data->length() > 0) {
- RecordFixedArrayHelper(code->deoptimization_data(),
- input_data->LiteralArray(),
- OPTIMIZED_CODE_LITERALS_SUB_TYPE, 0);
+ RecordSimpleVirtualObjectStats(code->deoptimization_data(),
+ input_data->LiteralArray(),
+ ObjectStats::OPTIMIZED_CODE_LITERALS_TYPE);
}
}
- RecordFixedArrayHelper(code, code->handler_table(), HANDLER_TABLE_SUB_TYPE,
- 0);
int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Object* target = it.rinfo()->target_object();
- if (target->IsFixedArray()) {
- RecursivelyRecordFixedArrayHelper(code, FixedArray::cast(target),
- EMBEDDED_OBJECT_SUB_TYPE);
+ if (target->IsFixedArray() || MatchesConstantElementsPair(target)) {
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
}
}
}
}
-void ObjectStatsCollectorImpl::RecordSharedFunctionInfoDetails(
- SharedFunctionInfo* sfi) {
- FixedArray* scope_info = sfi->scope_info();
- RecordFixedArrayHelper(sfi, scope_info, SCOPE_INFO_SUB_TYPE, 0);
- FeedbackMetadata* feedback_metadata = sfi->feedback_metadata();
- if (!feedback_metadata->is_empty()) {
- RecordFixedArrayHelper(sfi, feedback_metadata, FEEDBACK_METADATA_SUB_TYPE,
- 0);
- }
-}
-
-void ObjectStatsCollectorImpl::RecordFixedArrayDetails(FixedArray* array) {
- if (array->IsContext()) {
- RecordFixedArrayHelper(nullptr, array, CONTEXT_SUB_TYPE, 0);
- }
- if (IsCowArray(heap_, array) && CanRecordFixedArray(heap_, array)) {
- stats_->RecordFixedArraySubTypeStats(array, COPY_ON_WRITE_SUB_TYPE,
- array->Size(), 0);
- }
- if (array->IsNativeContext()) {
- Context* native_ctx = Context::cast(array);
- RecordHashTableHelper(array,
- native_ctx->slow_template_instantiations_cache(),
- SLOW_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE);
- FixedArray* fast_cache = native_ctx->fast_template_instantiations_cache();
- stats_->RecordFixedArraySubTypeStats(
- fast_cache, FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE,
- fast_cache->Size(), 0);
+void ObjectStatsCollectorImpl::RecordVirtualContext(Context* context) {
+ if (context->IsNativeContext()) {
+ RecordSimpleVirtualObjectStats(nullptr, context,
+ ObjectStats::NATIVE_CONTEXT_TYPE);
+ } else if (context->IsFunctionContext()) {
+ RecordSimpleVirtualObjectStats(nullptr, context,
+ ObjectStats::FUNCTION_CONTEXT_TYPE);
+ } else {
+ RecordSimpleVirtualObjectStats(nullptr, context,
+ ObjectStats::OTHER_CONTEXT_TYPE);
}
}
class ObjectStatsVisitor {
public:
- enum CollectionMode {
- kRegular,
- kVirtual,
- };
-
ObjectStatsVisitor(Heap* heap, ObjectStatsCollectorImpl* live_collector,
ObjectStatsCollectorImpl* dead_collector,
- CollectionMode mode)
+ ObjectStatsCollectorImpl::Phase phase)
: live_collector_(live_collector),
dead_collector_(dead_collector),
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()),
- mode_(mode) {}
+ phase_(phase) {}
bool Visit(HeapObject* obj, int size) {
if (marking_state_->IsBlack(obj)) {
- Collect(live_collector_, obj);
+ live_collector_->CollectStatistics(obj, phase_);
} else {
DCHECK(!marking_state_->IsGrey(obj));
- Collect(dead_collector_, obj);
+ dead_collector_->CollectStatistics(obj, phase_);
}
return true;
}
private:
- void Collect(ObjectStatsCollectorImpl* collector, HeapObject* obj) {
- switch (mode_) {
- case kRegular:
- collector->CollectStatistics(obj);
- break;
- case kVirtual:
- collector->CollectVirtualStatistics(obj);
- break;
- }
- }
-
ObjectStatsCollectorImpl* live_collector_;
ObjectStatsCollectorImpl* dead_collector_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
- CollectionMode mode_;
+ ObjectStatsCollectorImpl::Phase phase_;
};
namespace {
@@ -731,19 +854,10 @@ void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
void ObjectStatsCollector::Collect() {
ObjectStatsCollectorImpl live_collector(heap_, live_);
ObjectStatsCollectorImpl dead_collector(heap_, dead_);
- // 1. Collect system type otherwise indistinguishable from other types.
- {
- ObjectStatsVisitor visitor(heap_, &live_collector, &dead_collector,
- ObjectStatsVisitor::kVirtual);
- IterateHeap(heap_, &visitor);
- }
-
- // 2. Collect globals; only applies to live objects.
live_collector.CollectGlobalStatistics();
- // 3. Collect rest.
- {
+ for (int i = 0; i < ObjectStatsCollectorImpl::kNumberOfPhases; i++) {
ObjectStatsVisitor visitor(heap_, &live_collector, &dead_collector,
- ObjectStatsVisitor::kRegular);
+ static_cast<ObjectStatsCollectorImpl::Phase>(i));
IterateHeap(heap_, &visitor);
}
}
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 500ce36bd9..723ae53fd5 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -5,13 +5,8 @@
#ifndef V8_HEAP_OBJECT_STATS_H_
#define V8_HEAP_OBJECT_STATS_H_
-#include <set>
-
-#include "src/base/ieee754.h"
-#include "src/heap/heap.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/objects-visiting.h"
#include "src/objects.h"
+#include "src/objects/code.h"
// These instance types do not exist for actual use but are merely introduced
// for object stats tracing. In contrast to Code and FixedArray sub types
@@ -19,18 +14,71 @@
// tracing.
//
// Update LAST_VIRTUAL_TYPE below when changing this macro.
-#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
- V(BOILERPLATE_ELEMENTS_TYPE) \
- V(BOILERPLATE_NAME_DICTIONARY_TYPE) \
- V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
- V(JS_ARRAY_BOILERPLATE_TYPE) \
- V(JS_OBJECT_BOILERPLATE_TYPE)
+#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
+ CODE_KIND_LIST(V) \
+ V(BOILERPLATE_ELEMENTS_TYPE) \
+ V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
+ V(BOILERPLATE_PROPERTY_DICTIONARY_TYPE) \
+ V(BYTECODE_ARRAY_CONSTANT_POOL_TYPE) \
+ V(BYTECODE_ARRAY_HANDLER_TABLE_TYPE) \
+ V(CODE_STUBS_TABLE_TYPE) \
+ V(COW_ARRAY_TYPE) \
+ V(DEOPTIMIZATION_DATA_TYPE) \
+ V(DEPENDENT_CODE_TYPE) \
+ V(ELEMENTS_TYPE) \
+ V(EMBEDDED_OBJECT_TYPE) \
+ V(ENUM_CACHE_TYPE) \
+ V(ENUM_INDICES_CACHE_TYPE) \
+ V(FEEDBACK_METADATA_TYPE) \
+ V(FEEDBACK_VECTOR_ENTRY_TYPE) \
+ V(FEEDBACK_VECTOR_HEADER_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_CALL_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_ENUM_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_LOAD_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_OTHER_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_STORE_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE) \
+ V(FUNCTION_CONTEXT_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE) \
+ V(GLOBAL_ELEMENTS_TYPE) \
+ V(GLOBAL_PROPERTIES_TYPE) \
+ V(JS_ARRAY_BOILERPLATE_TYPE) \
+ V(JS_COLLETION_TABLE_TYPE) \
+ V(JS_OBJECT_BOILERPLATE_TYPE) \
+ V(NATIVE_CONTEXT_TYPE) \
+ V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
+ V(NUMBER_STRING_CACHE_TYPE) \
+ V(OBJECT_PROPERTY_DICTIONARY_TYPE) \
+ V(OBJECT_TO_CODE_TYPE) \
+ V(OPTIMIZED_CODE_LITERALS_TYPE) \
+ V(OTHER_CONTEXT_TYPE) \
+ V(PROTOTYPE_USERS_TYPE) \
+ V(REGEXP_MULTIPLE_CACHE_TYPE) \
+ V(RETAINED_MAPS_TYPE) \
+ V(SCRIPT_LIST_TYPE) \
+ V(SCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
+ V(SCRIPT_SOURCE_EXTERNAL_TYPE) \
+ V(SCRIPT_SOURCE_NON_EXTERNAL_TYPE) \
+ V(SERIALIZED_OBJECTS_TYPE) \
+ V(SINGLE_CHARACTER_STRING_CACHE_TYPE) \
+ V(STRING_SPLIT_CACHE_TYPE) \
+ V(STRING_TABLE_TYPE) \
+ V(UNCOMPILED_JS_FUNCTION_TYPE) \
+ V(UNCOMPILED_SHARED_FUNCTION_INFO_TYPE) \
+ V(WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE)
namespace v8 {
namespace internal {
+class Heap;
+class Isolate;
+
class ObjectStats {
public:
+ static const size_t kNoOverAllocation = 0;
+
explicit ObjectStats(Heap* heap) : heap_(heap) { ClearObjectStats(); }
// See description on VIRTUAL_INSTANCE_TYPE_LIST.
@@ -38,18 +86,14 @@ class ObjectStats {
#define DEFINE_VIRTUAL_INSTANCE_TYPE(type) type,
VIRTUAL_INSTANCE_TYPE_LIST(DEFINE_VIRTUAL_INSTANCE_TYPE)
#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
- LAST_VIRTUAL_TYPE = JS_OBJECT_BOILERPLATE_TYPE,
+ LAST_VIRTUAL_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE,
};
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
enum {
- FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
- FIRST_FIXED_ARRAY_SUB_TYPE =
- FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
- FIRST_VIRTUAL_TYPE =
- FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+ FIRST_VIRTUAL_TYPE = LAST_TYPE + 1,
OBJECT_STATS_COUNT = FIRST_VIRTUAL_TYPE + LAST_VIRTUAL_TYPE + 1,
};
@@ -60,10 +104,8 @@ class ObjectStats {
void CheckpointObjectStats();
void RecordObjectStats(InstanceType type, size_t size);
- void RecordVirtualObjectStats(VirtualInstanceType type, size_t size);
- void RecordCodeSubTypeStats(int code_sub_type, size_t size);
- bool RecordFixedArraySubTypeStats(FixedArrayBase* array, int array_sub_type,
- size_t size, size_t over_allocated);
+ void RecordVirtualObjectStats(VirtualInstanceType type, size_t size,
+ size_t over_allocated);
size_t object_count_last_gc(size_t index) {
return object_counts_last_time_[index];
@@ -105,8 +147,6 @@ class ObjectStats {
// Detailed histograms by InstanceType.
size_t size_histogram_[OBJECT_STATS_COUNT][kNumberOfBuckets];
size_t over_allocated_histogram_[OBJECT_STATS_COUNT][kNumberOfBuckets];
-
- std::set<FixedArrayBase*> visited_fixed_array_sub_types_;
};
class ObjectStatsCollector {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 0a8c866979..8384cead02 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_VISITING_INL_H_
-#define V8_OBJECTS_VISITING_INL_H_
+#ifndef V8_HEAP_OBJECTS_VISITING_INL_H_
+#define V8_HEAP_OBJECTS_VISITING_INL_H_
#include "src/heap/objects-visiting.h"
@@ -189,4 +189,4 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
} // namespace internal
} // namespace v8
-#endif // V8_OBJECTS_VISITING_INL_H_
+#endif // V8_HEAP_OBJECTS_VISITING_INL_H_
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index c20434a283..7746c91c71 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_VISITING_H_
-#define V8_OBJECTS_VISITING_H_
+#ifndef V8_HEAP_OBJECTS_VISITING_H_
+#define V8_HEAP_OBJECTS_VISITING_H_
#include "src/allocation.h"
#include "src/layout-descriptor.h"
@@ -31,6 +31,7 @@ class JSWeakCollection;
V(Code) \
V(CodeDataContainer) \
V(ConsString) \
+ V(FeedbackCell) \
V(FeedbackVector) \
V(FixedArray) \
V(FixedDoubleArray) \
@@ -132,4 +133,4 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
} // namespace internal
} // namespace v8
-#endif // V8_OBJECTS_VISITING_H_
+#endif // V8_HEAP_OBJECTS_VISITING_H_
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index cd9c45141d..4e0f259c00 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REMEMBERED_SET_H
-#define V8_REMEMBERED_SET_H
+#ifndef V8_HEAP_REMEMBERED_SET_H_
+#define V8_HEAP_REMEMBERED_SET_H_
#include "src/assembler.h"
#include "src/heap/heap.h"
@@ -298,8 +298,7 @@ class UpdateTypedSlotHelper {
Object* new_target = old_target;
SlotCallbackResult result = callback(&new_target);
if (new_target != old_target) {
- rinfo->set_target_address(old_target->GetIsolate(),
- Code::cast(new_target)->instruction_start());
+ rinfo->set_target_address(Code::cast(new_target)->instruction_start());
}
return result;
}
@@ -359,4 +358,4 @@ inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
} // namespace internal
} // namespace v8
-#endif // V8_REMEMBERED_SET_H
+#endif // V8_HEAP_REMEMBERED_SET_H_
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index e84659c6d4..34f7bfafc3 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -64,7 +64,7 @@ class V8_EXPORT_PRIVATE ScavengeJob {
static const int kAverageIdleTimeMs = 5;
// The number of bytes to be allocated in new space before the next idle
// task is posted.
- static const size_t kBytesAllocatedBeforeNextIdleTask = 512 * KB;
+ static const size_t kBytesAllocatedBeforeNextIdleTask = 1024 * KB;
// The minimum size of allocated new space objects to trigger a scavenge.
static const size_t kMinAllocationLimit = 512 * KB;
// The allocation limit cannot exceed this fraction of the new space capacity.
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index b61872074e..2971db98cc 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -71,7 +71,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(NEW_SPACE, object_size, alignment);
@@ -97,7 +97,7 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
int object_size) {
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(OLD_SPACE, object_size, alignment);
@@ -228,9 +228,8 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
- HeapObject* dest = first_word.ToForwardingAddress();
- DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
- base::AsAtomicPointer::Relaxed_Store(p, dest);
+ DCHECK(heap()->InFromSpace(*p));
+ *p = first_word.ToForwardingAddress();
return;
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index be5fb87a90..3baba9521b 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -97,6 +97,7 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
}
void Scavenger::ScavengePage(MemoryChunk* page) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::ScavengePage");
CodePageMemoryModificationScope memory_modification_scope(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
@@ -115,6 +116,7 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
}
void Scavenger::Process(OneshotBarrier* barrier) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::Process");
// Threshold when to switch processing the promotion list to avoid
// allocating too much backing store in the worklist.
const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
@@ -160,12 +162,13 @@ void Scavenger::Finalize() {
allocator_.Finalize();
}
-void RootScavengeVisitor::VisitRootPointer(Root root, Object** p) {
+void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
+ Object** p) {
ScavengePointer(p);
}
-void RootScavengeVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
// Copy all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) ScavengePointer(p);
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 27ae2e8ab7..e0008ae694 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -117,8 +117,9 @@ class RootScavengeVisitor final : public RootVisitor {
RootScavengeVisitor(Heap* heap, Scavenger* scavenger)
: heap_(heap), scavenger_(scavenger) {}
- void VisitRootPointer(Root root, Object** p) final;
- void VisitRootPointers(Root root, Object** start, Object** end) final;
+ void VisitRootPointer(Root root, const char* description, Object** p) final;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) final;
private:
void ScavengePointer(Object** p);
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 9e2d7e6354..8a7aca1694 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -222,7 +222,7 @@ bool Heap::CreateInitialMaps() {
(constructor_function_index)); \
}
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+ ALLOCATE_VARSIZE_MAP(SCOPE_INFO_TYPE, scope_info)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
@@ -289,12 +289,17 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
+ // The "no closures" and "one closure" FeedbackCell maps need
+ // to be marked unstable because their objects can change maps.
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_closures_cell)
+ no_closures_cell_map()->mark_unstable();
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
+ one_closure_cell_map()->mark_unstable();
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
+
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
@@ -303,6 +308,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, name_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, global_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, number_dictionary)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, simple_number_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, string_table)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, weak_hash_table)
@@ -475,7 +481,7 @@ void Heap::CreateInitialObjects() {
// Create the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
- set_code_stubs(*NumberDictionary::New(isolate(), 128));
+ set_code_stubs(*SimpleNumberDictionary::New(isolate(), 128));
{
HandleScope scope(isolate());
@@ -533,7 +539,10 @@ void Heap::CreateInitialObjects() {
set_regexp_multiple_cache(*factory->NewFixedArray(
RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
- set_undefined_cell(*factory->NewCell(factory->undefined_value()));
+ // Allocate FeedbackCell for builtins.
+ Handle<FeedbackCell> many_closures_cell =
+ factory->NewManyClosuresCell(factory->undefined_value());
+ set_many_closures_cell(*many_closures_cell);
// Microtask queue uses the empty fixed array as a sentinel for "empty".
// Number of queued microtasks stored in Isolate::pending_microtask_count().
@@ -638,6 +647,14 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_promise_hook_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_promise_then_protector(*cell);
+
set_serialized_objects(empty_fixed_array());
set_serialized_global_proxy_sizes(empty_fixed_array());
@@ -650,6 +667,9 @@ void Heap::CreateInitialObjects() {
set_deserialize_lazy_handler_wide(Smi::kZero);
set_deserialize_lazy_handler_extra_wide(Smi::kZero);
+ // Initialize builtins constants table.
+ set_builtins_constants_table(empty_fixed_array());
+
// Initialize context slot cache.
isolate_->context_slot_cache()->Clear();
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index f1edb6f2fb..7423665bcb 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SLOT_SET_H
-#define V8_SLOT_SET_H
+#ifndef V8_HEAP_SLOT_SET_H_
+#define V8_HEAP_SLOT_SET_H_
#include <map>
#include <stack>
@@ -641,4 +641,4 @@ class TypedSlotSet {
} // namespace internal
} // namespace v8
-#endif // V8_SLOT_SET_H
+#endif // V8_HEAP_SLOT_SET_H_
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 39a62327df..498c34bd54 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_
+#include "src/base/v8-fallthrough.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
#include "src/msan.h"
@@ -137,12 +138,6 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-void MemoryChunk::InitializeFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
- }
-}
-
bool PagedSpace::Contains(Address addr) {
if (heap_->lo_space()->FindPage(addr)) return false;
return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
@@ -157,6 +152,7 @@ void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
DCHECK_EQ(free_list(), category->owner());
+ category->set_free_list(nullptr);
free_list()->RemoveCategory(category);
});
}
@@ -164,7 +160,8 @@ void PagedSpace::UnlinkFreeListCategories(Page* page) {
size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
- page->ForAllFreeListCategories([&added](FreeListCategory* category) {
+ page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
+ category->set_free_list(&free_list_);
added += category->available();
category->Relink();
});
@@ -230,23 +227,23 @@ MemoryChunk* MemoryChunkIterator::next() {
case kOldSpaceState: {
if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
state_ = kMapState;
- // Fall through.
+ V8_FALLTHROUGH;
}
case kMapState: {
if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
state_ = kCodeState;
- // Fall through.
+ V8_FALLTHROUGH;
}
case kCodeState: {
if (code_iterator_ != heap_->code_space()->end())
return *(code_iterator_++);
state_ = kLargeObjectState;
- // Fall through.
+ V8_FALLTHROUGH;
}
case kLargeObjectState: {
if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
state_ = kFinishedState;
- // Fall through;
+ V8_FALLTHROUGH;
}
case kFinishedState:
return nullptr;
@@ -256,23 +253,14 @@ MemoryChunk* MemoryChunkIterator::next() {
UNREACHABLE();
}
-Page* FreeListCategory::page() const {
- return Page::FromAddress(
- reinterpret_cast<Address>(const_cast<FreeListCategory*>(this)));
-}
-
Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
return top(type) ? top(type)->page() : nullptr;
}
-FreeList* FreeListCategory::owner() {
- return reinterpret_cast<PagedSpace*>(
- Page::FromAddress(reinterpret_cast<Address>(this))->owner())
- ->free_list();
-}
+FreeList* FreeListCategory::owner() { return free_list_; }
bool FreeListCategory::is_linked() {
- return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
+ return prev_ != nullptr || next_ != nullptr;
}
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 2dd5e9b24d..d90cac90f2 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -71,6 +71,8 @@ bool HeapObjectIterator::AdvanceToNextPage() {
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
+ DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
+
for (SpaceIterator it(heap_); it.has_next();) {
it.next()->PauseAllocationObservers();
}
@@ -322,7 +324,12 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
+ unmapper_->active_unmapping_tasks_.Decrement(1);
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(unmapper_->heap_->isolate(),
+ "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
+ }
}
Unmapper* const unmapper_;
@@ -332,13 +339,26 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
- if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
+ if (!MakeRoomForNewTasks()) {
// kMaxUnmapperTasks are already running. Avoid creating any more.
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
+ kMaxUnmapperTasks);
+ }
return;
}
UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
- DCHECK_LT(concurrent_unmapping_tasks_active_, kMaxUnmapperTasks);
- task_ids_[concurrent_unmapping_tasks_active_++] = task->id();
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
+ task->id());
+ }
+ DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
+ DCHECK_LE(active_unmapping_tasks_.Value(), pending_unmapping_tasks_);
+ DCHECK_GE(active_unmapping_tasks_.Value(), 0);
+ active_unmapping_tasks_.Increment(1);
+ task_ids_[pending_unmapping_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
} else {
@@ -347,18 +367,41 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
void MemoryAllocator::Unmapper::WaitUntilCompleted() {
- for (int i = 0; i < concurrent_unmapping_tasks_active_; i++) {
+ for (int i = 0; i < pending_unmapping_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
CancelableTaskManager::kTaskAborted) {
pending_unmapping_tasks_semaphore_.Wait();
}
}
- concurrent_unmapping_tasks_active_ = 0;
+ pending_unmapping_tasks_ = 0;
+ active_unmapping_tasks_.SetValue(0);
+
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::WaitUntilCompleted: no tasks remaining\n");
+ }
+}
+
+bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
+ DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
+
+ if (active_unmapping_tasks_.Value() == 0 && pending_unmapping_tasks_ > 0) {
+ // All previous unmapping tasks have been run to completion.
+ // Finalize those tasks to make room for new ones.
+ WaitUntilCompleted();
+ }
+ return pending_unmapping_tasks_ != kMaxUnmapperTasks;
}
template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
MemoryChunk* chunk = nullptr;
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(
+ heap_->isolate(),
+ "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
+ NumberOfChunks());
+ }
// Regular chunks.
while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
@@ -380,7 +423,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
void MemoryAllocator::Unmapper::TearDown() {
- CHECK_EQ(0, concurrent_unmapping_tasks_active_);
+ CHECK_EQ(0, pending_unmapping_tasks_);
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
@@ -583,7 +626,10 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
chunk->local_tracker_ = nullptr;
- chunk->InitializeFreeListCategories();
+
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ chunk->categories_[i] = nullptr;
+ }
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
@@ -606,6 +652,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
if (reservation != nullptr) {
chunk->reservation_.TakeControl(reservation);
}
+
return chunk;
}
@@ -615,6 +662,8 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
// Make sure that categories are initialized before freeing the area.
page->ResetAllocatedBytes();
heap()->incremental_marking()->SetOldSpacePageFlags(page);
+ page->AllocateFreeListCategories();
+ page->InitializeFreeListCategories();
page->InitializationMemoryFence();
return page;
}
@@ -662,6 +711,28 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
return page;
}
+void Page::AllocateFreeListCategories() {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ categories_[i] = new FreeListCategory(
+ reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
+ }
+}
+
+void Page::InitializeFreeListCategories() {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
+ }
+}
+
+void Page::ReleaseFreeListCategories() {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ if (categories_[i] != nullptr) {
+ delete categories_[i];
+ categories_[i] = nullptr;
+ }
+ }
+}
+
Page* Page::ConvertNewToOld(Page* old_page) {
DCHECK(!old_page->is_anchor());
DCHECK(old_page->InNewSpace());
@@ -679,6 +750,10 @@ size_t MemoryChunk::CommittedPhysicalMemory() {
return high_water_mark_.Value();
}
+bool MemoryChunk::IsPagedSpace() const {
+ return owner()->identity() != LO_SPACE;
+}
+
void MemoryChunk::InsertAfter(MemoryChunk* other) {
MemoryChunk* other_next = other->next_chunk();
@@ -710,7 +785,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
VirtualMemory reservation;
Address area_start = nullptr;
Address area_end = nullptr;
- void* address_hint = heap->GetRandomMmapAddr();
+ void* address_hint =
+ AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
//
// MemoryChunk layout:
@@ -826,8 +902,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
owner);
}
- return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
- executable, owner, &reservation);
+ MemoryChunk* chunk =
+ MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
+ executable, owner, &reservation);
+
+ if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
+ return chunk;
}
void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
@@ -970,6 +1050,8 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
+
+ if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
}
@@ -1005,7 +1087,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
chunk->SetFlag(MemoryChunk::POOLED);
- // Fall through to kPreFreeAndQueue.
+ V8_FALLTHROUGH;
case kPreFreeAndQueue:
PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
@@ -1198,6 +1280,11 @@ void MemoryChunk::ReleaseAllocatedMemory() {
ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
+
+ if (IsPagedSpace()) {
+ Page* page = static_cast<Page*>(this);
+ page->ReleaseFreeListCategories();
+ }
}
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
@@ -1345,12 +1432,17 @@ void Space::ResumeAllocationObservers() {
void Space::AllocationStep(int bytes_since_last, Address soon_object,
int size) {
- if (AllocationObserversActive()) {
- heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
- for (AllocationObserver* observer : allocation_observers_) {
- observer->AllocationStep(bytes_since_last, soon_object, size);
- }
+ if (!AllocationObserversActive()) {
+ return;
+ }
+
+ DCHECK(!heap()->allocation_step_in_progress());
+ heap()->set_allocation_step_in_progress(true);
+ heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
+ for (AllocationObserver* observer : allocation_observers_) {
+ observer->AllocationStep(bytes_since_last, soon_object, size);
}
+ heap()->set_allocation_step_in_progress(false);
}
intptr_t Space::GetNextInlineAllocationStepSize() {
@@ -1359,15 +1451,13 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
: observer->bytes_to_next_step();
}
- DCHECK(allocation_observers_.size() == 0 || next_step != 0);
+ DCHECK(allocation_observers_.size() == 0 || next_step > 0);
return next_step;
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
- : SpaceWithLinearArea(heap, space, executable),
- anchor_(this),
- free_list_(this) {
+ : SpaceWithLinearArea(heap, space, executable), anchor_(this) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
}
@@ -1570,7 +1660,8 @@ bool PagedSpace::Expand() {
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
AddPage(page);
- Free(page->area_start(), page->area_size());
+ Free(page->area_start(), page->area_size(),
+ SpaceAccountingMode::kSpaceAccounted);
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
return true;
}
@@ -1606,7 +1697,8 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
DCHECK_GE(old_limit, new_limit);
if (new_limit != old_limit) {
SetTopAndLimit(top(), new_limit);
- Free(new_limit, old_limit - new_limit);
+ Free(new_limit, old_limit - new_limit,
+ SpaceAccountingMode::kSpaceAccounted);
if (heap()->incremental_marking()->black_allocation()) {
Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
old_limit);
@@ -1692,7 +1784,8 @@ void PagedSpace::FreeLinearAllocationArea() {
InlineAllocationStep(current_top, nullptr, nullptr, 0);
SetTopAndLimit(nullptr, nullptr);
DCHECK_GE(current_limit, current_top);
- Free(current_top, current_limit - current_top);
+ Free(current_top, current_limit - current_top,
+ SpaceAccountingMode::kSpaceAccounted);
}
void PagedSpace::ReleasePage(Page* page) {
@@ -1722,6 +1815,7 @@ void PagedSpace::ReleasePage(Page* page) {
void PagedSpace::SetReadAndExecutable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndExecutable();
}
}
@@ -1729,6 +1823,7 @@ void PagedSpace::SetReadAndExecutable() {
void PagedSpace::SetReadAndWritable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndWritable();
}
}
@@ -1786,7 +1881,7 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
- Free(limit, end - limit);
+ Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
@@ -2078,22 +2173,21 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
}
void NewSpace::UpdateLinearAllocationArea() {
- Address old_top = top();
- Address new_top = to_space_.page_low();
+ // Make sure there is no unaccounted allocations.
+ DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
+ Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high());
original_top_.SetValue(top());
original_limit_.SetValue(limit());
- UpdateInlineAllocationLimit(0);
- // TODO(ofrobots): It would be more correct to do a step before setting the
- // limit on the new allocation area. However, fixing this causes a regression
- // due to the idle scavenger getting pinged too frequently. crbug.com/795323.
- InlineAllocationStep(old_top, new_top, nullptr, 0);
+ StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::ResetLinearAllocationArea() {
+ // Do a step to account for memory allocated so far before resetting.
+ InlineAllocationStep(top(), top(), nullptr, 0);
to_space_.Reset();
UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
@@ -2121,6 +2215,10 @@ void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
DCHECK(!Page::IsAtObjectStart(top));
+
+ // Do a step to account for memory allocated on previous page.
+ InlineAllocationStep(top, top, nullptr, 0);
+
if (!to_space_.AdvancePage()) {
// No more pages left to advance.
return false;
@@ -2176,6 +2274,11 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
}
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
+ if (heap()->allocation_step_in_progress()) {
+ // If we are mid-way through an existing step, don't start a new one.
+ return;
+ }
+
if (AllocationObserversActive()) {
top_on_previous_step_ = top();
UpdateInlineAllocationLimit(0);
@@ -2217,6 +2320,11 @@ void SpaceWithLinearArea::InlineAllocationStep(Address top,
Address top_for_next_step,
Address soon_object,
size_t size) {
+ if (heap()->allocation_step_in_progress()) {
+ // Avoid starting a new step if we are mid-way through an existing one.
+ return;
+ }
+
if (top_on_previous_step_) {
if (top < top_on_previous_step_) {
// Generated code decreased the top pointer to do folded allocations.
@@ -2608,7 +2716,6 @@ void FreeListCategory::Reset() {
FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
DCHECK(page()->CanAllocate());
-
FreeSpace* node = top();
if (node == nullptr) return nullptr;
set_top(node->next());
@@ -2620,10 +2727,9 @@ FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
size_t* node_size) {
DCHECK(page()->CanAllocate());
-
FreeSpace* node = PickNodeFromList(node_size);
if ((node != nullptr) && (*node_size < minimum_size)) {
- Free(node, *node_size, kLinkCategory);
+ Free(node->address(), *node_size, kLinkCategory);
*node_size = 0;
return nullptr;
}
@@ -2633,7 +2739,6 @@ FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
size_t* node_size) {
DCHECK(page()->CanAllocate());
-
FreeSpace* prev_non_evac_node = nullptr;
for (FreeSpace* cur_node = top(); cur_node != nullptr;
cur_node = cur_node->next()) {
@@ -2656,9 +2761,10 @@ FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
return nullptr;
}
-void FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
+void FreeListCategory::Free(Address start, size_t size_in_bytes,
FreeMode mode) {
- CHECK(page()->CanAllocate());
+ DCHECK(page()->CanAllocate());
+ FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
free_space->set_next(top());
set_top(free_space);
available_ += size_in_bytes;
@@ -2686,7 +2792,7 @@ void FreeListCategory::Relink() {
owner()->AddCategory(this);
}
-FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
+FreeList::FreeList() : wasted_bytes_(0) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i] = nullptr;
}
@@ -2704,11 +2810,6 @@ void FreeList::Reset() {
}
size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
- if (size_in_bytes == 0) return 0;
-
- owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
- ClearRecordedSlots::kNo);
-
Page* page = Page::FromAddress(start);
page->DecreaseAllocatedBytes(size_in_bytes);
@@ -2719,11 +2820,10 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
return size_in_bytes;
}
- FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- page->free_list_category(type)->Free(free_space, size_in_bytes, mode);
+ page->free_list_category(type)->Free(start, size_in_bytes, mode);
DCHECK_EQ(page->AvailableInFreeList(),
page->AvailableInFreeListFromAllocatedBytes());
return 0;
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 08fef7d6e3..1c8bad8dc5 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -139,6 +139,8 @@ enum FreeListCategoryType {
enum FreeMode { kLinkCategory, kDoNotLinkCategory };
+enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
+
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
@@ -148,15 +150,10 @@ enum RememberedSetType {
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
- static const int kSize = kIntSize + // FreeListCategoryType type_
- kIntSize + // padding for type_
- kSizetSize + // size_t available_
- kPointerSize + // FreeSpace* top_
- kPointerSize + // FreeListCategory* prev_
- kPointerSize; // FreeListCategory* next_
-
- FreeListCategory()
- : type_(kInvalidCategory),
+ FreeListCategory(FreeList* free_list, Page* page)
+ : free_list_(free_list),
+ page_(page),
+ type_(kInvalidCategory),
available_(0),
top_(nullptr),
prev_(nullptr),
@@ -180,7 +177,7 @@ class FreeListCategory {
// category is currently unlinked.
void Relink();
- void Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
+ void Free(Address address, size_t size_in_bytes, FreeMode mode);
// Picks a node from the list and stores its size in |node_size|. Returns
// nullptr if the category is empty.
@@ -196,11 +193,13 @@ class FreeListCategory {
FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
inline FreeList* owner();
- inline Page* page() const;
+ inline Page* page() const { return page_; }
inline bool is_linked();
bool is_empty() { return top() == nullptr; }
size_t available() const { return available_; }
+ void set_free_list(FreeList* free_list) { free_list_ = free_list; }
+
#ifdef DEBUG
size_t SumFreeList();
int FreeListLength();
@@ -218,6 +217,12 @@ class FreeListCategory {
FreeListCategory* next() { return next_; }
void set_next(FreeListCategory* next) { next_ = next; }
+ // This FreeListCategory is owned by the given free_list_.
+ FreeList* free_list_;
+
+ // This FreeListCategory holds free list entries of the given page_.
+ Page* const page_;
+
// |type_|: The type of this free list category.
FreeListCategoryType type_;
@@ -233,6 +238,8 @@ class FreeListCategory {
friend class FreeList;
friend class PagedSpace;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
// MemoryChunk represents a memory region owned by a specific space.
@@ -370,7 +377,7 @@ class MemoryChunk {
+ kSizetSize // size_t wasted_memory_
+ kPointerSize // AtomicValue next_chunk_
+ kPointerSize // AtomicValue prev_chunk_
- + FreeListCategory::kSize * kNumberOfCategories
+ + kPointerSize * kNumberOfCategories
// FreeListCategory categories_[kNumberOfCategories]
+ kPointerSize // LocalArrayBufferTracker* local_tracker_
+ kIntptrSize // intptr_t young_generation_live_byte_count_
@@ -610,6 +617,8 @@ class MemoryChunk {
void set_owner(Space* space) { owner_.SetValue(space); }
+ bool IsPagedSpace() const;
+
void InsertAfter(MemoryChunk* other);
void Unlink();
@@ -620,8 +629,6 @@ class MemoryChunk {
void SetReadAndExecutable();
void SetReadAndWritable();
- inline void InitializeFreeListCategories();
-
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -699,7 +706,7 @@ class MemoryChunk {
// prev_chunk_ holds a pointer of type MemoryChunk
base::AtomicValue<MemoryChunk*> prev_chunk_;
- FreeListCategory categories_[kNumberOfCategories];
+ FreeListCategory* categories_[kNumberOfCategories];
LocalArrayBufferTracker* local_tracker_;
@@ -788,7 +795,7 @@ class Page : public MemoryChunk {
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- callback(&categories_[i]);
+ callback(categories_[i]);
}
}
@@ -820,7 +827,7 @@ class Page : public MemoryChunk {
}
FreeListCategory* free_list_category(FreeListCategoryType type) {
- return &categories_[type];
+ return categories_[type];
}
bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
@@ -845,6 +852,10 @@ class Page : public MemoryChunk {
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
void DestroyBlackArea(Address start, Address end);
+ void InitializeFreeListCategories();
+ void AllocateFreeListCategories();
+ void ReleaseFreeListCategories();
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -1170,14 +1181,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
: heap_(heap),
allocator_(allocator),
pending_unmapping_tasks_semaphore_(0),
- concurrent_unmapping_tasks_active_(0) {
+ pending_unmapping_tasks_(0),
+ active_unmapping_tasks_(0) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
chunks_[kPooled].reserve(kReservedQueueingSlots);
}
void AddMemoryChunkSafe(MemoryChunk* chunk) {
- if ((chunk->size() == Page::kPageSize) &&
- (chunk->executable() != EXECUTABLE)) {
+ if (chunk->IsPagedSpace() && chunk->executable() != EXECUTABLE) {
AddMemoryChunkSafe<kRegular>(chunk);
} else {
AddMemoryChunkSafe<kNonRegular>(chunk);
@@ -1238,6 +1249,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return chunk;
}
+ bool MakeRoomForNewTasks();
+
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
@@ -1247,7 +1260,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
- intptr_t concurrent_unmapping_tasks_active_;
+ intptr_t pending_unmapping_tasks_;
+ base::AtomicNumber<intptr_t> active_unmapping_tasks_;
friend class MemoryAllocator;
};
@@ -1359,6 +1373,12 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// and false otherwise.
bool CommitBlock(Address start, size_t size, Executability executable);
+ // Checks if an allocated MemoryChunk was intended to be used for executable
+ // memory.
+ bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
+ return executable_memory_.find(chunk) != executable_memory_.end();
+ }
+
// Uncommit a contiguous block of memory [start..(start+size)[.
// start is not nullptr, the size is greater than zero, and the
// block is contained in the initial chunk. Returns true if it succeeded
@@ -1409,6 +1429,17 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
}
+ void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
+ DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
+ executable_memory_.insert(chunk);
+ }
+
+ void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
+ DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
+ executable_memory_.erase(chunk);
+ }
+
Isolate* isolate_;
CodeRange* code_range_;
@@ -1431,6 +1462,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
VirtualMemory last_chunk_;
Unmapper unmapper_;
+ // Data structure to remember allocated executable memory chunks.
+ std::unordered_set<MemoryChunk*> executable_memory_;
+
friend class heap::TestCodeRangeScope;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
@@ -1731,7 +1765,7 @@ class V8_EXPORT_PRIVATE FreeList {
return kHuge;
}
- explicit FreeList(PagedSpace* owner);
+ FreeList();
// Adds a node on the free list. The block of size {size_in_bytes} starting
// at {start} is placed on the free list. The return value is the number of
@@ -1779,7 +1813,6 @@ class V8_EXPORT_PRIVATE FreeList {
size_t EvictFreeListItems(Page* page);
bool ContainsPageFreeListItems(Page* page);
- PagedSpace* owner() { return owner_; }
size_t wasted_bytes() { return wasted_bytes_.Value(); }
template <typename Callback>
@@ -1874,13 +1907,10 @@ class V8_EXPORT_PRIVATE FreeList {
return categories_[type];
}
- PagedSpace* owner_;
base::AtomicNumber<size_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
// LocalAllocationBuffer represents a linear allocation area that is created
@@ -2086,11 +2116,22 @@ class V8_EXPORT_PRIVATE PagedSpace
MUST_USE_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment);
+ size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
+ if (size_in_bytes == 0) return 0;
+ heap_->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
+ ClearRecordedSlots::kNo);
+ if (mode == SpaceAccountingMode::kSpaceAccounted) {
+ return AccountedFree(start, size_in_bytes);
+ } else {
+ return UnaccountedFree(start, size_in_bytes);
+ }
+ }
+
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
- size_t Free(Address start, size_t size_in_bytes) {
+ size_t AccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index a69abcc886..58f47f4834 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STORE_BUFFER_H_
-#define V8_STORE_BUFFER_H_
+#ifndef V8_HEAP_STORE_BUFFER_H_
+#define V8_HEAP_STORE_BUFFER_H_
#include "src/allocation.h"
#include "src/base/logging.h"
@@ -225,4 +225,4 @@ class StoreBuffer {
} // namespace internal
} // namespace v8
-#endif // V8_STORE_BUFFER_H_
+#endif // V8_HEAP_STORE_BUFFER_H_
diff --git a/deps/v8/src/heap/stress-marking-observer.h b/deps/v8/src/heap/stress-marking-observer.h
index b97c2b179c..37ebb82197 100644
--- a/deps/v8/src/heap/stress-marking-observer.h
+++ b/deps/v8/src/heap/stress-marking-observer.h
@@ -23,4 +23,4 @@ class StressMarkingObserver : public AllocationObserver {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_HEAP_STRESS_MARKING_OBSERVER_H_
diff --git a/deps/v8/src/heap/stress-scavenge-observer.h b/deps/v8/src/heap/stress-scavenge-observer.h
index 6f69afe4c5..b39b2eac59 100644
--- a/deps/v8/src/heap/stress-scavenge-observer.h
+++ b/deps/v8/src/heap/stress-scavenge-observer.h
@@ -36,4 +36,4 @@ class StressScavengeObserver : public AllocationObserver {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_HEAP_STRESS_SCAVENGE_OBSERVER_H_
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 25ba0df8fd..2072e407e9 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -279,8 +279,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
memset(free_start, 0xCC, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
- free_start, size);
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
+ free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
@@ -318,8 +318,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
memset(free_start, 0xCC, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
- free_start, size);
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
+ free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
index 3421e16611..bb3eae2228 100644
--- a/deps/v8/src/heap/worklist.h
+++ b/deps/v8/src/heap/worklist.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_WORKLIST_
-#define V8_HEAP_WORKLIST_
+#ifndef V8_HEAP_WORKLIST_H_
+#define V8_HEAP_WORKLIST_H_
#include <cstddef>
#include <utility>
@@ -388,4 +388,4 @@ class Worklist {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_WORKLIST_
+#endif // V8_HEAP_WORKLIST_H_